From d791b23838c5edd4f407256eccd04db70dc60428 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 10 Jul 2024 11:29:09 -0400 Subject: [PATCH 01/10] Generate new Kafka truststore Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- pillar/top.sls | 2 + salt/allowed_states.map.jinja | 4 +- salt/kafka/ca.sls | 37 +++++++++++++++++++ salt/kafka/config.map.jinja | 16 +++----- salt/kafka/config.sls | 22 +++++++++-- salt/kafka/defaults.yaml | 21 ++++++----- salt/kafka/enabled.sls | 10 +++-- salt/kafka/soc_kafka.yaml | 24 +++++++++--- salt/kafka/tools/sbin_jinja/so-kafka-trust | 13 +++++++ .../config/so/0800_input_kafka.conf.jinja | 5 ++- salt/manager/tools/sbin/soup | 5 ++- setup/so-functions | 4 +- 12 files changed, 125 insertions(+), 38 deletions(-) create mode 100644 salt/kafka/ca.sls create mode 100644 salt/kafka/tools/sbin_jinja/so-kafka-trust diff --git a/pillar/top.sls b/pillar/top.sls index 14229162f..76d1a14e1 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -233,6 +233,8 @@ base: - stig.soc_stig - soc.license - kafka.nodes + - kafka.soc_kafka + - kafka.adv_kafka '*_receiver': - logstash.nodes diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index eb73e6e29..29ee968aa 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -136,7 +136,9 @@ 'firewall', 'schedule', 'docker_clean', - 'stig' + 'stig', + 'kafka.ca', + 'kafka.ssl' ], 'so-standalone': [ 'salt.master', diff --git a/salt/kafka/ca.sls b/salt/kafka/ca.sls new file mode 100644 index 000000000..f5e78ee2c --- /dev/null +++ b/salt/kafka/ca.sls @@ -0,0 +1,37 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states or sls in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% set KAFKATRUST = salt['pillar.get']('kafka:truststore') %} + +kafkaconfdir: + file.directory: + - name: /opt/so/conf/kafka + - user: 960 + - group: 960 + - makedirs: True + +{% if GLOBALS.is_manager %} +# Manager runs so-kafka-trust to create truststore for Kafka ssl communication +kafka_truststore: + cmd.script: + - source: salt://kafka/tools/sbin_jinja/so-kafka-trust + - template: jinja + - cwd: /opt/so + - defaults: + GLOBALS: {{ GLOBALS }} + KAFKATRUST: {{ KAFKATRUST }} +{% endif %} + +kafkacertz: + file.managed: + - name: /opt/so/conf/kafka/kafka-truststore.jks + - source: salt://kafka/files/kafka-truststore + - user: 960 + - group: 931 + +{% endif %} \ No newline at end of file diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index 4c408a1e7..b054e0656 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -7,6 +7,7 @@ {% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %} {% set KAFKA_PASSWORD = salt['pillar.get']('kafka:password') %} +{% set KAFKA_TRUSTPASS = salt['pillar.get']('kafka:trustpass') %} {# Create list of KRaft controllers #} {% set controllers = [] %} @@ -67,19 +68,12 @@ {% endif %} -{# If a password other than PLACEHOLDER isn't set remove it from the server.properties #} -{% if KAFKAMERGED.config.broker.ssl_x_truststore_x_password == 'PLACEHOLDER' %} -{% do KAFKAMERGED.config.broker.pop('ssl_x_truststore_x_password') %} -{% endif %} - -{% if KAFKAMERGED.config.controller.ssl_x_truststore_x_password == 'PLACEHOLDER' %} -{% do KAFKAMERGED.config.controller.pop('ssl_x_truststore_x_password') %} -{% endif %} +{# Truststore config #} +{% do KAFKAMERGED.config.broker.update({'ssl_x_truststore_x_password': KAFKA_TRUSTPASS }) %} +{% do KAFKAMERGED.config.controller.update({'ssl_x_truststore_x_password': KAFKA_TRUSTPASS }) %} +{% do KAFKAMERGED.config.client.update({'ssl_x_truststore_x_password': KAFKA_TRUSTPASS }) %} {# Client properties stuff #} -{% if KAFKAMERGED.config.client.ssl_x_truststore_x_password == 'PLACEHOLDER' %} -{% do KAFKAMERGED.config.client.pop('ssl_x_truststore_x_password') %} -{% endif %} {% do KAFKAMERGED.config.client.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %} {% if 'broker' in node_type %} diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 6293ee697..1cfd1d3eb 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -7,18 +7,21 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -include: - - ssl - kafka_group: group.present: - name: kafka - gid: 960 -kafka: +kafka_user: user.present: - uid: 960 - gid: 960 + - home: /opt/so/conf/kafka + - createhome: False + +kafka_home_dir: + file.absent: + - name: /home/kafka kafka_sbin_tools: file.recurse: @@ -28,6 +31,17 @@ kafka_sbin_tools: - group: 960 - file_mode: 755 +kafka_sbin_jinja_tools: + file.recurse: + - name: /usr/sbin + - source: salt://kafka/tools/sbin_jinja + - user: 960 + - group: 960 + - file_mode: 755 + - template: jinja + - defaults: + GLOBALS: {{ GLOBALS }} + kafka_log_dir: file.directory: - name: /opt/so/log/kafka diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index ad626458f..c20d8552c 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -1,10 +1,11 @@ kafka: enabled: False cluster_id: - password: controllers: reset: config: + password: + trustpass: broker: advertised_x_listeners: auto_x_create_x_topics_x_enable: true @@ -30,16 +31,16 @@ kafka: ssl_x_keystore_x_location: /etc/pki/kafka.p12 ssl_x_keystore_x_type: PKCS12 ssl_x_keystore_x_password: - ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts - ssl_x_truststore_x_password: PLACEHOLDER - ssl_x_truststore_x_type: PEM + ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks + ssl_x_truststore_x_type: JKS + ssl_x_truststore_x_password: transaction_x_state_x_log_x_min_x_isr: 1 transaction_x_state_x_log_x_replication_x_factor: 1 client: security_x_protocol: SSL - ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts - ssl_x_truststore_x_password: PLACEHOLDER - ssl_x_truststore_x_type: PEM + ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks + ssl_x_truststore_x_type: JKS + ssl_x_truststore_x_password: ssl_x_keystore_x_location: /etc/pki/kafka.p12 ssl_x_keystore_x_type: PKCS12 ssl_x_keystore_x_password: @@ -57,6 +58,6 @@ kafka: ssl_x_keystore_x_location: /etc/pki/kafka.p12 ssl_x_keystore_x_type: PKCS12 ssl_x_keystore_x_password: - ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts - ssl_x_truststore_x_password: PLACEHOLDER - ssl_x_truststore_x_type: PEM \ No newline at end of file + ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks + ssl_x_truststore_x_type: JKS + ssl_x_truststore_x_password: \ No newline at end of file diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 0837b5af6..362f7fde3 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -17,10 +17,11 @@ {% if 'gmd' in salt['pillar.get']('features', []) %} include: - - elasticsearch.ca - - kafka.sostatus + - kafka.ca - kafka.config + - kafka.ssl - kafka.storage + - kafka.sostatus so-kafka: docker_container.running: @@ -49,7 +50,7 @@ so-kafka: {% endfor %} - binds: - /etc/pki/kafka.p12:/etc/pki/kafka.p12:ro - - /etc/pki/tls/certs/intca.crt:/etc/pki/java/sos/cacerts:ro + - /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro - /nsm/kafka/data/:/nsm/kafka/data/:rw - /opt/so/log/kafka:/opt/kafka/logs/:rw - /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro @@ -58,6 +59,9 @@ so-kafka: {% for sc in ['server', 'client'] %} - file: kafka_kraft_{{sc}}_properties {% endfor %} + - file: kafkacertz + - require: + - file: kafkacertz delete_so-kafka_so-status.disabled: file.uncomment: diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 0c9c8a57e..872bf51f2 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -8,19 +8,25 @@ kafka: advanced: True sensitive: True helpLink: kafka.html - password: - description: The password to use for the Kafka certificates. - sensitive: True - helpLink: kafka.html controllers: description: A comma-separated list of hostnames that will act as Kafka controllers. These hosts will be responsible for managing the Kafka cluster. Note that only manager and receiver nodes are eligible to run Kafka. This configuration needs to be set before enabling Kafka. Failure to do so may result in Kafka topics becoming unavailable requiring manual intervention to restore functionality or reset Kafka, either of which can result in data loss. - forcedType: "string" + forcedType: string helpLink: kafka.html reset: description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed. advanced: True helpLink: kafka.html config: + password: + description: The password used for the Kafka certificates. + readonly: True + sensitive: True + helpLink: kafka.html + trustpass: + description: The password used for the Kafka truststore. + readonly: True + sensitive: True + helpLink: kafka.html broker: advertised_x_listeners: description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication. @@ -128,6 +134,10 @@ kafka: description: The trust store file location within the Docker container. title: ssl.truststore.location helpLink: kafka.html + ssl_x_truststore_x_type: + description: The trust store file format. + title: ssl.truststore.type + helpLink: kafka.html ssl_x_truststore_x_password: description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format. title: ssl.truststore.password @@ -167,6 +177,10 @@ kafka: description: The trust store file location within the Docker container. title: ssl.truststore.location helpLink: kafka.html + ssl_x_truststore_x_type: + description: The trust store file format. + title: ssl.truststore.type + helpLink: kafka.html ssl_x_truststore_x_password: description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format. title: ssl.truststore.password diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-trust b/salt/kafka/tools/sbin_jinja/so-kafka-trust new file mode 100644 index 000000000..8d404cb9a --- /dev/null +++ b/salt/kafka/tools/sbin_jinja/so-kafka-trust @@ -0,0 +1,13 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. +{% set TRUSTPASS = salt['pillar.get']('kafka:trustpass') %} + +if [ ! -f /opt/so/saltstack/local/salt/kafka/files/kafka-truststore ]; then + docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-kafkatrust --user root --entrypoint /opt/java/openjdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -import -file /etc/pki/ca.crt -alias SOS -keystore /etc/pki/kafka-truststore -storepass {{ TRUSTPASS }} -storetype jks -noprompt + docker cp so-kafkatrust:/etc/pki/kafka-truststore /opt/so/saltstack/local/salt/kafka/files/kafka-truststore + docker rm so-kafkatrust +fi \ No newline at end of file diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index dfb246210..00dd6d530 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -1,4 +1,5 @@ {%- set kafka_password = salt['pillar.get']('kafka:password') %} +{%- set kafka_trustpass = salt['pillar.get']('kafka:trustpass') %} {%- set kafka_brokers = salt['pillar.get']('kafka:nodes', {}) %} {%- set brokers = [] %} @@ -22,8 +23,8 @@ input { ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' ssl_keystore_password => '{{ kafka_password }}' ssl_keystore_type => 'PKCS12' - ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' - ssl_truststore_password => 'changeit' + ssl_truststore_location => '/etc/pki/kafka-truststore.jks' + ssl_truststore_password => '{{ kafka_trustpass }}' decorate_events => true tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ] } diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c76fe311e..019f29ebb 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -673,7 +673,10 @@ up_to_2.4.80() { } up_to_2.4.90() { - echo "Nothing to apply" + kafkatrust=$(get_random_value) + echo ' trustpass: '$kafkatrust >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + + INSTALLEDVERSION=2.4.90 } diff --git a/setup/so-functions b/setup/so-functions index b1469b7eb..aa9eb1909 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1180,13 +1180,15 @@ kibana_pillar() { kafka_pillar() { KAFKACLUSTERID=$(get_random_value 22) KAFKAPASS=$(get_random_value) + KAFKATRUST=$(get_random_value) logCmd "mkdir -p $local_salt_dir/pillar/kafka" logCmd "touch $adv_kafka_pillar_file" logCmd "touch $kafka_pillar_file" printf '%s\n'\ "kafka:"\ " cluster_id: $KAFKACLUSTERID"\ - " password: $KAFKAPASS" > $kafka_pillar_file + " password: $KAFKAPASS"\ + " trustpass: $KAFKATRUST" > $kafka_pillar_file } logrotate_pillar() { From 8e1edd1d91e21aea68e6092f743917295d228786 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 10 Jul 2024 11:32:43 -0400 Subject: [PATCH 02/10] split Kafka ssl from ssl/init. Certs won't be generated until Kafka is enabled. Also runs some clean up for old Kafka certs Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/disabled.sls | 11 ++- salt/kafka/ssl.sls | 196 ++++++++++++++++++++++++++++++++++++++ salt/logstash/enabled.sls | 14 ++- salt/ssl/init.sls | 190 +----------------------------------- 4 files changed, 221 insertions(+), 190 deletions(-) create mode 100644 salt/kafka/ssl.sls diff --git a/salt/kafka/disabled.sls b/salt/kafka/disabled.sls index 707e953a4..79fd0c261 100644 --- a/salt/kafka/disabled.sls +++ b/salt/kafka/disabled.sls @@ -22,4 +22,13 @@ ensure_default_pipeline: - name: | /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False; /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/global/soc_global.sls global.pipeline REDIS -{% endif %} \ No newline at end of file +{% endif %} + +{# If Kafka has never been manually enabled, the 'Kafka' user does not exist. In this case certs for Kafka should not exist since they'll be owned by uid 960 #} +{% for cert in ['kafka-client.crt','kafka-client.key','kafka.crt','kafka.key','kafka-logstash.crt','kafka-logstash.key','kafka-logstash.p12','kafka.p12','elasticfleet-kafka.p8'] %} +check_kafka_cert_{{cert}}: + file.absent: + - name: /etc/pki/{{cert}} + - onlyif: stat -c %U /etc/pki/{{cert}} | grep -q UNKNOWN + - show_changes: False +{% endfor %} \ No newline at end of file diff --git a/salt/kafka/ssl.sls b/salt/kafka/ssl.sls new file mode 100644 index 000000000..c4e46ac8a --- /dev/null +++ b/salt/kafka/ssl.sls @@ -0,0 +1,196 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set kafka_password = salt['pillar.get']('kafka:password') %} + +include: + - ca.dirs + {% set global_ca_server = [] %} + {% set x509dict = salt['mine.get'](GLOBALS.manager | lower~'*', 'x509.get_pem_entries') %} + {% for host in x509dict %} + {% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %} + {% do global_ca_server.append(host) %} + {% endif %} + {% endfor %} + {% set ca_server = global_ca_server[0] %} + + +{% if GLOBALS.pipeline == "KAFKA" %} + +{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %} +kafka_client_key: + x509.private_key_managed: + - name: /etc/pki/kafka-client.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%} + - prereq: + - x509: /etc/pki/kafka-client.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_client_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-client.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka-client.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + +kafka_client_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-client.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_client_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-client.crt + - mode: 640 + - user: 960 + - group: 939 +{% endif %} + +{% if grains['role'] in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %} +kafka_key: + x509.private_key_managed: + - name: /etc/pki/kafka.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka.key') -%} + - prereq: + - x509: /etc/pki/kafka.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_crt: + x509.certificate_managed: + - name: /etc/pki/kafka.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}" + - onchanges: + - x509: /etc/pki/kafka.key +kafka_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.crt + - mode: 640 + - user: 960 + - group: 939 + +kafka_pkcs12_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.p12 + - mode: 640 + - user: 960 + - group: 939 +{% endif %} + +# Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka. +# Manager will have cert, but be unused until a pipeline is created and logstash enabled. +{% if grains['role'] in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %} +kafka_logstash_key: + x509.private_key_managed: + - name: /etc/pki/kafka-logstash.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} + - prereq: + - x509: /etc/pki/kafka-logstash.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_logstash_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-logstash.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka-logstash.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:{{ kafka_password }}" + - onchanges: + - x509: /etc/pki/kafka-logstash.key + +kafka_logstash_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.key + - mode: 640 + - user: 931 + - group: 939 + +kafka_logstash_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.crt + - mode: 640 + - user: 931 + - group: 939 + +kafka_logstash_pkcs12_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.p12 + - mode: 640 + - user: 931 + - group: 939 + +{% endif %} + +{% endif %} \ No newline at end of file diff --git a/salt/logstash/enabled.sls b/salt/logstash/enabled.sls index f95a76f13..0f44a3767 100644 --- a/salt/logstash/enabled.sls +++ b/salt/logstash/enabled.sls @@ -14,6 +14,11 @@ include: {% if GLOBALS.role not in ['so-receiver','so-fleet'] %} - elasticsearch.ca +{% endif %} +{# Kafka ca runs on nodes that can run logstash for Kafka input / output. Only when Kafka is global pipeline #} +{% if GLOBALS.role in ['so-searchnode', 'so-manager', 'so-managersearch', 'so-receiver', 'so-standalone'] and GLOBALS.pipeline == 'KAFKA' %} + - kafka.ca + - kafka.ssl {% endif %} - logstash.config - logstash.sostatus @@ -79,8 +84,9 @@ so-logstash: - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro {% endif %} - {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} + {% if GLOBALS.pipeline == "KAFKA" and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} - /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro + - /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro {% endif %} {% if GLOBALS.role == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro @@ -105,6 +111,9 @@ so-logstash: - file: ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }} {% endfor %} {% endfor %} + {% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} + - file: kafkacertz + {% endif %} - require: {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %} - x509: etc_filebeat_crt @@ -118,6 +127,9 @@ so-logstash: - file: cacertz - file: capemz {% endif %} + {% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} + - file: kafkacertz + {% endif %} delete_so-logstash_so-status.disabled: file.uncomment: diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index abcb1a559..f5be34c40 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -17,8 +17,6 @@ {% set COMMONNAME = GLOBALS.manager %} {% endif %} -{% set kafka_password = salt['pillar.get']('kafka:password') %} - {% if grains.id.split('_')|last in ['manager', 'managersearch', 'eval', 'standalone', 'import'] %} include: - ca @@ -666,7 +664,6 @@ elastickeyperms: {%- endif %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %} - elasticfleet_kafka_key: x509.private_key_managed: - name: /etc/pki/elasticfleet-kafka.key @@ -696,17 +693,13 @@ elasticfleet_kafka_crt: - retry: attempts: 5 interval: 30 - cmd.run: - - name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-kafka.key -topk8 -out /etc/pki/elasticfleet-kafka.p8 -nocrypt" - - onchanges: - - x509: elasticfleet_kafka_key elasticfleet_kafka_cert_perms: file.managed: - replace: False - name: /etc/pki/elasticfleet-kafka.crt - mode: 640 - - user: 960 + - user: 947 - group: 939 elasticfleet_kafka_key_perms: @@ -714,187 +707,8 @@ elasticfleet_kafka_key_perms: - replace: False - name: /etc/pki/elasticfleet-kafka.key - mode: 640 - - user: 960 + - user: 947 - group: 939 - -elasticfleet_kafka_pkcs8_perms: - file.managed: - - replace: False - - name: /etc/pki/elasticfleet-kafka.p8 - - mode: 640 - - user: 960 - - group: 939 - -kafka_client_key: - x509.private_key_managed: - - name: /etc/pki/kafka-client.key - - keysize: 4096 - - backup: True - - new: True - {% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%} - - prereq: - - x509: /etc/pki/kafka-client.crt - {%- endif %} - - retry: - attempts: 5 - interval: 30 - -kafka_client_crt: - x509.certificate_managed: - - name: /etc/pki/kafka-client.crt - - ca_server: {{ ca_server }} - - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - - signing_policy: kafka - - private_key: /etc/pki/kafka-client.key - - CN: {{ GLOBALS.hostname }} - - days_remaining: 0 - - days_valid: 820 - - backup: True - - timeout: 30 - - retry: - attempts: 5 - interval: 30 - -kafka_client_key_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka-client.key - - mode: 640 - - user: 960 - - group: 939 - -kafka_client_crt_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka-client.crt - - mode: 640 - - user: 960 - - group: 939 - -{% endif %} - -{% if grains['role'] in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %} - -kafka_key: - x509.private_key_managed: - - name: /etc/pki/kafka.key - - keysize: 4096 - - backup: True - - new: True - {% if salt['file.file_exists']('/etc/pki/kafka.key') -%} - - prereq: - - x509: /etc/pki/kafka.crt - {%- endif %} - - retry: - attempts: 5 - interval: 30 - -kafka_crt: - x509.certificate_managed: - - name: /etc/pki/kafka.crt - - ca_server: {{ ca_server }} - - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - - signing_policy: kafka - - private_key: /etc/pki/kafka.key - - CN: {{ GLOBALS.hostname }} - - days_remaining: 0 - - days_valid: 820 - - backup: True - - timeout: 30 - - retry: - attempts: 5 - interval: 30 - cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}" - - onchanges: - - x509: /etc/pki/kafka.key -kafka_key_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka.key - - mode: 640 - - user: 960 - - group: 939 - -kafka_crt_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka.crt - - mode: 640 - - user: 960 - - group: 939 - -kafka_pkcs12_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka.p12 - - mode: 640 - - user: 960 - - group: 939 - -{% endif %} - -# Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka. -# Manager will have cert, but be unused until a pipeline is created and logstash enabled. -{% if grains['role'] in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %} -kafka_logstash_key: - x509.private_key_managed: - - name: /etc/pki/kafka-logstash.key - - keysize: 4096 - - backup: True - - new: True - {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} - - prereq: - - x509: /etc/pki/kafka-logstash.crt - {%- endif %} - - retry: - attempts: 5 - interval: 30 - -kafka_logstash_crt: - x509.certificate_managed: - - name: /etc/pki/kafka-logstash.crt - - ca_server: {{ ca_server }} - - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - - signing_policy: kafka - - private_key: /etc/pki/kafka-logstash.key - - CN: {{ GLOBALS.hostname }} - - days_remaining: 0 - - days_valid: 820 - - backup: True - - timeout: 30 - - retry: - attempts: 5 - interval: 30 - cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:{{ kafka_password }}" - - onchanges: - - x509: /etc/pki/kafka-logstash.key - -kafka_logstash_key_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka-logstash.key - - mode: 640 - - user: 960 - - group: 939 - -kafka_logstash_crt_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka-logstash.crt - - mode: 640 - - user: 960 - - group: 939 - -kafka_logstash_pkcs12_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka-logstash.p12 - - mode: 640 - - user: 960 - - group: 931 - {% endif %} {% else %} From d5faf535c3adf6101d2f7ad6c1498956aeba0a10 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 10 Jul 2024 11:36:44 -0400 Subject: [PATCH 03/10] Only interact with logstash configuration when Kafka pipeline is enabled otherwise leave it default Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 1 + salt/kafka/soc_kafka.yaml | 6 ++++++ salt/logstash/defaults.yaml | 3 +-- salt/logstash/init.sls | 6 +----- salt/logstash/map.jinja | 12 ++++++++++++ 5 files changed, 21 insertions(+), 7 deletions(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index c20d8552c..21d6956ba 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -3,6 +3,7 @@ kafka: cluster_id: controllers: reset: + logstash: [] config: password: trustpass: diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 872bf51f2..8087f9bdf 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -16,6 +16,12 @@ kafka: description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed. advanced: True helpLink: kafka.html + logstash: + description: By default logstash is disabled when Kafka is enabled. This option allows you to specify any hosts you would like to re-enable logstash on alongside Kafka. + forcedType: "[]string" + multiline: True + advanced: True + helpLink: kafka.html config: password: description: The password used for the Kafka certificates. diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index b4bc885f6..9930b7bcf 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -25,7 +25,7 @@ logstash: - so/0011_input_endgame.conf - so/0012_input_elastic_agent.conf.jinja - so/0013_input_lumberjack_fleet.conf - - so/9999_output_redis.conf.jinja + - so/9999_output_redis.conf.jinja receiver: - so/0011_input_endgame.conf - so/0012_input_elastic_agent.conf.jinja @@ -35,7 +35,6 @@ logstash: - so/0900_input_redis.conf.jinja - so/9805_output_elastic_agent.conf.jinja - so/9900_output_endgame.conf.jinja - - so/0800_input_kafka.conf.jinja custom0: [] custom1: [] custom2: [] diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 3bc539b35..62b2a2ebb 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -4,13 +4,9 @@ # Elastic License 2.0. {% from 'logstash/map.jinja' import LOGSTASH_MERGED %} -{% from 'kafka/map.jinja' import KAFKAMERGED %} include: -{# Disable logstash when Kafka is enabled except when the role is standalone #} -{% if LOGSTASH_MERGED.enabled and grains.role == 'so-standalone' %} - - logstash.enabled -{% elif LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %} +{% if LOGSTASH_MERGED.enabled %} - logstash.enabled {% else %} - logstash.disabled diff --git a/salt/logstash/map.jinja b/salt/logstash/map.jinja index 0bb6de2b7..2c3e02864 100644 --- a/salt/logstash/map.jinja +++ b/salt/logstash/map.jinja @@ -6,6 +6,7 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% import_yaml 'logstash/defaults.yaml' as LOGSTASH_DEFAULTS %} {% set LOGSTASH_MERGED = salt['pillar.get']('logstash', LOGSTASH_DEFAULTS.logstash, merge=True) %} +{% set KAFKA_LOGSTASH = salt['pillar.get']('kafka:logstash', []) %} {# used to store the redis nodes that logstash needs to know about to pull from the queue #} {% set LOGSTASH_REDIS_NODES = [] %} @@ -30,3 +31,14 @@ {% endfor %} {% endfor %} +{# Append Kafka input pipeline when Kafka is enabled #} +{% if GLOBALS.pipeline == 'KAFKA' %} +{% do LOGSTASH_MERGED.defined_pipelines.search.append('so/0800_input_kafka.conf.jinja') %} +{% do LOGSTASH_MERGED.defined_pipelines.manager.append('so/0800_input_kafka.conf.jinja') %} +{# Disable logstash on manager & receiver nodes unless it has an override configured #} +{% if not KAFKA_LOGSTASH %} +{% if GLOBALS.role in ['so-manager', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %} +{% do LOGSTASH_MERGED.update({'enabled': False}) %} +{% endif %} +{% endif %} +{% endif %} \ No newline at end of file From 0d8fd42be3e6c79f2a7f3e32fec01bc8291e81f2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 10 Jul 2024 11:37:07 -0400 Subject: [PATCH 04/10] update pillarwatch engine Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/salt/files/engines.conf | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 3a9b51207..15d55e18f 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -43,20 +43,20 @@ engines: - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True - cmd.run: - cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode' saltutil.kill_all_jobs - cmd.run: cmd: salt-call state.apply kafka.nodes - cmd.run: - cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode' state.highstate 'KAFKA': to: 'REDIS': - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False - cmd.run: - cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode' saltutil.kill_all_jobs - cmd.run: - cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode' state.highstate - files: - /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls From cfe5c1d76a160dec4e7b1888b04bd26bd06eb977 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 10 Jul 2024 13:24:02 -0400 Subject: [PATCH 05/10] remove elasticsearch.ca from receiver allowed_states. Replaced by generated kafka trust Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/allowed_states.map.jinja | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 29ee968aa..37795e9d7 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -197,7 +197,6 @@ 'schedule', 'docker_clean', 'kafka', - 'elasticsearch.ca', 'stig' ], 'so-desktop': [ From 4a88dedcb88833907aa53aec7f44cac27e66ed50 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 10 Jul 2024 16:18:46 -0400 Subject: [PATCH 06/10] Fixin kafka.ssl state and include name for kafka_user Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 1 + salt/kafka/ssl.sls | 21 +++++++++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 1cfd1d3eb..e9222388b 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -14,6 +14,7 @@ kafka_group: kafka_user: user.present: + - name: kafka - uid: 960 - gid: 960 - home: /opt/so/conf/kafka diff --git a/salt/kafka/ssl.sls b/salt/kafka/ssl.sls index c4e46ac8a..50a01b22c 100644 --- a/salt/kafka/ssl.sls +++ b/salt/kafka/ssl.sls @@ -4,10 +4,9 @@ # Elastic License 2.0. {% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} - -{% set kafka_password = salt['pillar.get']('kafka:password') %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% set kafka_password = salt['pillar.get']('kafka:password') %} include: - ca.dirs @@ -20,10 +19,9 @@ include: {% endfor %} {% set ca_server = global_ca_server[0] %} - {% if GLOBALS.pipeline == "KAFKA" %} -{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %} +{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} kafka_client_key: x509.private_key_managed: - name: /etc/pki/kafka-client.key @@ -71,7 +69,7 @@ kafka_client_crt_perms: - group: 939 {% endif %} -{% if grains['role'] in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %} +{% if GLOBALS.role in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %} kafka_key: x509.private_key_managed: - name: /etc/pki/kafka.key @@ -132,7 +130,7 @@ kafka_pkcs12_perms: # Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka. # Manager will have cert, but be unused until a pipeline is created and logstash enabled. -{% if grains['role'] in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %} +{% if GLOBALS.role in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %} kafka_logstash_key: x509.private_key_managed: - name: /etc/pki/kafka-logstash.key @@ -191,6 +189,13 @@ kafka_logstash_pkcs12_perms: - user: 931 - group: 939 +{% endif %} {% endif %} +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file From ff29d9ca51501ce2df877ed0d2b30754f6ef22ff Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Jul 2024 10:23:51 -0400 Subject: [PATCH 07/10] Update log-check to ignore kafka data directories Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index cf1691589..2a86b9f2c 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -241,6 +241,7 @@ exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable +exclude_log "/nsm/kafka/data/" # ignore Kafka data directory from log check. for log_file in $(cat /tmp/log_check_files); do status "Checking log file $log_file" From 4182ff66a0368a181c79e12c145baa7e5e1c68e3 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Jul 2024 16:37:16 -0400 Subject: [PATCH 08/10] rearrange kafka pillar, declutters SOC ui Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 4 ++-- salt/kafka/ssl.sls | 2 +- salt/kafka/tools/sbin_jinja/so-kafka-trust | 2 +- .../pipelines/config/so/0800_input_kafka.conf.jinja | 4 ++-- salt/manager/tools/sbin/soup | 8 +++++++- setup/so-functions | 5 +++-- 6 files changed, 16 insertions(+), 9 deletions(-) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index b054e0656..1e43a3ec7 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -6,8 +6,8 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %} -{% set KAFKA_PASSWORD = salt['pillar.get']('kafka:password') %} -{% set KAFKA_TRUSTPASS = salt['pillar.get']('kafka:trustpass') %} +{% set KAFKA_PASSWORD = salt['pillar.get']('kafka:config:password') %} +{% set KAFKA_TRUSTPASS = salt['pillar.get']('kafka:config:trustpass') %} {# Create list of KRaft controllers #} {% set controllers = [] %} diff --git a/salt/kafka/ssl.sls b/salt/kafka/ssl.sls index 50a01b22c..8604d4e7e 100644 --- a/salt/kafka/ssl.sls +++ b/salt/kafka/ssl.sls @@ -6,7 +6,7 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_password = salt['pillar.get']('kafka:password') %} +{% set kafka_password = salt['pillar.get']('kafka:config:password') %} include: - ca.dirs diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-trust b/salt/kafka/tools/sbin_jinja/so-kafka-trust index 8d404cb9a..55ba9612e 100644 --- a/salt/kafka/tools/sbin_jinja/so-kafka-trust +++ b/salt/kafka/tools/sbin_jinja/so-kafka-trust @@ -4,7 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -{% set TRUSTPASS = salt['pillar.get']('kafka:trustpass') %} +{% set TRUSTPASS = salt['pillar.get']('kafka:config:trustpass') %} if [ ! -f /opt/so/saltstack/local/salt/kafka/files/kafka-truststore ]; then docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-kafkatrust --user root --entrypoint /opt/java/openjdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -import -file /etc/pki/ca.crt -alias SOS -keystore /etc/pki/kafka-truststore -storepass {{ TRUSTPASS }} -storetype jks -noprompt diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 00dd6d530..7478375b0 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -1,5 +1,5 @@ -{%- set kafka_password = salt['pillar.get']('kafka:password') %} -{%- set kafka_trustpass = salt['pillar.get']('kafka:trustpass') %} +{%- set kafka_password = salt['pillar.get']('kafka:config:password') %} +{%- set kafka_trustpass = salt['pillar.get']('kafka:config:trustpass') %} {%- set kafka_brokers = salt['pillar.get']('kafka:nodes', {}) %} {%- set brokers = [] %} diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 019f29ebb..0ace4a5b2 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -674,7 +674,13 @@ up_to_2.4.80() { up_to_2.4.90() { kafkatrust=$(get_random_value) - echo ' trustpass: '$kafkatrust >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + # rearranging the kafka pillar to reduce clutter in SOC UI + kafkasavedpass=$(so-yaml.py get /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password) + kafkatrimpass=$(echo $kafkasavedpass | awk '{print $1}') + so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password + echo ' config:' >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + echo ' password: '$kafkatrimpass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + echo ' trustpass: '$kafkatrust >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls INSTALLEDVERSION=2.4.90 diff --git a/setup/so-functions b/setup/so-functions index aa9eb1909..02467117e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1187,8 +1187,9 @@ kafka_pillar() { printf '%s\n'\ "kafka:"\ " cluster_id: $KAFKACLUSTERID"\ - " password: $KAFKAPASS"\ - " trustpass: $KAFKATRUST" > $kafka_pillar_file + " config:"\ + " password: $KAFKAPASS"\ + " trustpass: $KAFKATRUST" > $kafka_pillar_file } logrotate_pillar() { From 7ea8d5efd01bd26cc91dbef41e4d287697a4b4ec Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Jul 2024 14:44:10 -0400 Subject: [PATCH 09/10] Remove redis input pipeline from searchnodes when global pipeline is Kafka Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/logstash/map.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/logstash/map.jinja b/salt/logstash/map.jinja index 2c3e02864..8fc3291e5 100644 --- a/salt/logstash/map.jinja +++ b/salt/logstash/map.jinja @@ -33,6 +33,7 @@ {# Append Kafka input pipeline when Kafka is enabled #} {% if GLOBALS.pipeline == 'KAFKA' %} +{% do LOGSTASH_MERGED.defined_pipelines.search.remove('so/0900_input_redis.conf.jinja') %} {% do LOGSTASH_MERGED.defined_pipelines.search.append('so/0800_input_kafka.conf.jinja') %} {% do LOGSTASH_MERGED.defined_pipelines.manager.append('so/0800_input_kafka.conf.jinja') %} {# Disable logstash on manager & receiver nodes unless it has an override configured #} From 5d322ebc0b4a6d17521503c05c824aa574619848 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Jul 2024 14:45:11 -0400 Subject: [PATCH 10/10] Allow searchnodes to run kafka.ssl state for kafka-logstash cert generation Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/ssl.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/ssl.sls b/salt/kafka/ssl.sls index 8604d4e7e..04b6b4ba7 100644 --- a/salt/kafka/ssl.sls +++ b/salt/kafka/ssl.sls @@ -4,7 +4,7 @@ # Elastic License 2.0. {% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} +{% if sls.split('.')[0] in allowed_states or sls in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% set kafka_password = salt['pillar.get']('kafka:config:password') %}