Update Kafka controller(s) via SOC UI

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
This commit is contained in:
reyesj2
2024-06-10 11:08:54 -04:00
parent c4723263a4
commit 284c1be85f
7 changed files with 30 additions and 10 deletions

View File

@@ -7,8 +7,6 @@
{% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %} {% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %}
{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:kafka_controllers', default=None) %}
{# Create list of KRaft controllers #} {# Create list of KRaft controllers #}
{% set controllers = [] %} {% set controllers = [] %}

View File

@@ -66,6 +66,12 @@ kafka_kraft_{{sc}}_properties:
- show_changes: False - show_changes: False
{% endfor %} {% endfor %}
reset_quorum_on_changes:
cmd.run:
- name: rm -f /nsm/kafka/data/__cluster_metadata-0/quorum-state
- watch:
- file: /opt/so/conf/kafka/server.properties
{% else %} {% else %}
{{sls}}_state_not_allowed: {{sls}}_state_not_allowed:

View File

@@ -2,7 +2,7 @@ kafka:
enabled: False enabled: False
cluster_id: cluster_id:
kafka_pass: kafka_pass:
kafka_controllers: [] kafka_controllers:
config: config:
broker: broker:
advertised_x_listeners: advertised_x_listeners:

View File

@@ -53,9 +53,7 @@ so-kafka:
- /nsm/kafka/data/:/nsm/kafka/data/:rw - /nsm/kafka/data/:/nsm/kafka/data/:rw
- /opt/so/log/kafka:/opt/kafka/logs/:rw - /opt/so/log/kafka:/opt/kafka/logs/:rw
- /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro - /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro
{% if GLOBALS.is_manager %}
- /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties - /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties
{% endif %}
- watch: - watch:
{% for sc in ['server', 'client'] %} {% for sc in ['server', 'client'] %}
- file: kafka_kraft_{{sc}}_properties - file: kafka_kraft_{{sc}}_properties

View File

@@ -68,14 +68,15 @@
{# Update the process_x_roles value for any host in the kafka_controllers_pillar configured from SOC UI #} {# Update the process_x_roles value for any host in the kafka_controllers_pillar configured from SOC UI #}
{% set ns = namespace(has_controller=false) %} {% set ns = namespace(has_controller=false) %}
{% if KAFKA_CONTROLLERS_PILLAR != none %} {% if KAFKA_CONTROLLERS_PILLAR != none %}
{% for hostname in KAFKA_CONTROLLERS_PILLAR %} {% set KAFKA_CONTROLLERS_PILLAR_LIST = KAFKA_CONTROLLERS_PILLAR.split(',') %}
{% for hostname in KAFKA_CONTROLLERS_PILLAR_LIST %}
{% if hostname in COMBINED_KAFKANODES %} {% if hostname in COMBINED_KAFKANODES %}
{% do COMBINED_KAFKANODES[hostname].update({'role': 'controller'}) %} {% do COMBINED_KAFKANODES[hostname].update({'role': 'controller'}) %}
{% set ns.has_controller = true %} {% set ns.has_controller = true %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
{% for hostname in COMBINED_KAFKANODES %} {% for hostname in COMBINED_KAFKANODES %}
{% if hostname not in KAFKA_CONTROLLERS_PILLAR %} {% if hostname not in KAFKA_CONTROLLERS_PILLAR_LIST %}
{% do COMBINED_KAFKANODES[hostname].update({'role': 'broker'}) %} {% do COMBINED_KAFKANODES[hostname].update({'role': 'broker'}) %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}

View File

@@ -13,9 +13,8 @@ kafka:
sensitive: True sensitive: True
helpLink: kafka.html helpLink: kafka.html
kafka_controllers: kafka_controllers:
description: A list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start. description: A comma-seperated list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start.
forcedType: "[]string" forcedType: "string"
multiline: True
helpLink: kafka.html helpLink: kafka.html
config: config:
broker: broker:

View File

@@ -57,4 +57,22 @@ engines:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs
- cmd.run: - cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate
- files:
- /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
- /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
pillar: kafka.kafka_controllers
default: ''
actions:
from:
'*':
to:
'*':
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs
- cmd.run:
cmd: salt-call state.apply kafka.nodes
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka
- cmd.run:
cmd: salt-call state.apply elasticfleet
interval: 10 interval: 10