move kafka_cluster_id to kafka:cluster_id

This commit is contained in:
m0duspwnens
2024-04-12 11:19:20 -04:00
parent f514e5e9bb
commit a54a72c269
6 changed files with 32 additions and 28 deletions

View File

@@ -61,7 +61,7 @@ base:
- backup.adv_backup
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.*
- stig.soc_stig
'*_sensor':
@@ -177,6 +177,7 @@ base:
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- kafka.*
'*_heavynode':
- elasticsearch.auth
@@ -233,6 +234,7 @@ base:
- redis.adv_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.*
'*_kafkanode':
- logstash.nodes

View File

@@ -123,7 +123,8 @@
'utility',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-searchnode': [
'ssl',
@@ -157,7 +158,8 @@
'schedule',
'tcpreplay',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-sensor': [
'ssl',
@@ -188,16 +190,8 @@
'telegraf',
'firewall',
'schedule',
'docker_clean'
],
'so-kafkanode': [
'kafka',
'logstash',
'ssl',
'telegraf',
'firewall',
'schedule',
'docker_clean'
'docker_clean',
'kafka'
],
'so-desktop': [
'ssl',
@@ -214,7 +208,7 @@
{% do allowed_states.append('strelka') %}
{% endif %}
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-kafkanode'] %}
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
{% do allowed_states.append('elasticsearch') %}
{% endif %}

View File

@@ -2,6 +2,12 @@ kafka:
enabled:
description: Enable or disable Kafka.
helpLink: kafka.html
cluster_id:
description: The ID of the Kafka cluster.
readonly: True
advanced: True
sensitive: True
helpLink: kafka.html
config:
server:
advertised_x_listeners:

View File

@@ -6,17 +6,18 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id', default=None) %}
{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
{% if kafka_cluster_id is none %}
generate_kafka_cluster_id:
cmd.run:
- name: /usr/sbin/so-kafka-clusterid
{% endif %}
{% endif %}
{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #}
{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
{% else %}
{% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
kafka_storage_init:
cmd.run:
- name: |

View File

@@ -13,10 +13,11 @@ else
source $(dirname $0)/../../../common/tools/sbin/so-common
fi
if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then
if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
kafka_cluster_id=$(get_random_value 22)
echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls
echo 'kafka: ' > $local_salt_dir/pillar/kafka/soc_kafka.sls
echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls
else
echo 'kafka_cluster_id exists'
salt-call pillar.get secrets
echo 'kafka:cluster_id pillar exists'
salt-call pillar.get kafka:cluster_id
fi

View File

@@ -666,7 +666,7 @@ elastickeyperms:
# Roles will need to be modified. Below is just for testing encrypted kafka pipelines
# Remove so-manager. Just inplace for testing
{% if grains['role'] in ['so-manager', 'so-kafkanode', 'so-searchnode'] %}
{% if grains['role'] in ['so-manager', 'so-receiver', 'so-searchnode'] %}
# Create a cert for Redis encryption
kafka_key:
x509.private_key_managed:
@@ -770,7 +770,7 @@ kafka_logstash_crt:
- onchanges:
- x509: /etc/pki/kafka-logstash.key
{% if grains['role'] in ['so-manager'] %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-receiver'] %}
kafka_client_key:
x509.private_key_managed:
- name: /etc/pki/kafka-client.key