Compare commits

...

70 Commits

Author SHA1 Message Date
m0duspwnens
d91dd0dd3c watch some values 2024-04-29 17:14:00 -04:00
m0duspwnens
a0388fd568 engines config for valueWatch 2024-04-29 14:02:10 -04:00
m0duspwnens
05244cfd75 watch files change engine 2024-04-24 13:19:39 -04:00
m0duspwnens
6c5e0579cf logging changes. ensure salt master has pillarWatch engine 2024-04-19 09:32:32 -04:00
m0duspwnens
1f6eb9cdc3 match keys better. go through files reverse first found is prio 2024-04-18 13:50:37 -04:00
m0duspwnens
610dd2c08d improve it 2024-04-18 11:11:14 -04:00
m0duspwnens
506bbd314d more comments, better logging 2024-04-18 10:26:10 -04:00
m0duspwnens
4caa6a10b5 watch a pillar in files and take action 2024-04-17 18:09:04 -04:00
m0duspwnens
4b79623ce3 watch pillar files for changes and do something 2024-04-16 16:51:35 -04:00
m0duspwnens
c4994a208b restart salt minion if a manager and signing policies change 2024-04-15 11:37:21 -04:00
m0duspwnens
bb983d4ba2 just broker as default process 2024-04-12 16:16:03 -04:00
m0duspwnens
c014508519 need /opt/so/conf/ca/cacerts on receiver for kafka to run 2024-04-12 13:50:25 -04:00
reyesj2
fcfbb1e857 Merge kaffytaffy
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 12:50:56 -04:00
reyesj2
911ee579a9 Typo
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 12:16:20 -04:00
reyesj2
a6ff92b099 Note to remove so-kafka-clusterid. Update soup and setup to generate needed kafka pillar values
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 12:11:18 -04:00
m0duspwnens
d73ba7dd3e order kafka pillar assignment 2024-04-12 11:55:26 -04:00
m0duspwnens
04ddcd5c93 add receiver managersearch and standalone to kafka.nodes pillar 2024-04-12 11:52:57 -04:00
reyesj2
af29ae1968 Merge kaffytaffy
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 11:43:46 -04:00
reyesj2
fbd3cff90d Make global.pipeline use GLOBALMERGED value
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 11:21:19 -04:00
m0duspwnens
0ed9894b7e create kratos local pillar dirs during setup 2024-04-12 11:19:46 -04:00
m0duspwnens
a54a72c269 move kafka_cluster_id to kafka:cluster_id 2024-04-12 11:19:20 -04:00
m0duspwnens
f514e5e9bb add kafka to receiver 2024-04-11 16:23:05 -04:00
reyesj2
3955587372 Use global.pipeline for redis / kafka states
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-11 16:20:09 -04:00
reyesj2
6b28dc72e8 Update annotation for global.pipeline
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-11 15:38:33 -04:00
reyesj2
ca7253a589 Run kafka-clusterid script when pillar values are missing
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-11 15:38:03 -04:00
reyesj2
af53dcda1b Remove references to kafkanode
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-11 15:32:00 -04:00
m0duspwnens
d3bd56b131 disable logstash and redis if kafka enabled 2024-04-10 14:13:27 -04:00
m0duspwnens
e9e61ea2d8 Merge remote-tracking branch 'origin/2.4/dev' into kaffytaffy 2024-04-10 13:14:13 -04:00
m0duspwnens
86b984001d annotations and enable/disable from ui 2024-04-10 10:39:06 -04:00
m0duspwnens
fa7f8104c8 Merge remote-tracking branch 'origin/reyesj2/kafka' into kaffytaffy 2024-04-09 11:13:02 -04:00
m0duspwnens
bd5fe43285 jinja config files 2024-04-09 11:07:53 -04:00
m0duspwnens
d38051e806 fix client and server properties formatting 2024-04-09 10:36:37 -04:00
m0duspwnens
daa5342986 items not keys in for loop 2024-04-09 10:22:05 -04:00
m0duspwnens
c48436ccbf fix dict update 2024-04-09 10:19:17 -04:00
m0duspwnens
7aa00faa6c fix var 2024-04-09 09:31:54 -04:00
m0duspwnens
6217a7b9a9 add defaults and jijafy kafka config 2024-04-09 09:27:21 -04:00
reyesj2
d67ebabc95 Remove logstash output to kafka pipeline. Add additional topics for searchnodes to ingest and add partition/offset info to event
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-08 16:38:03 -04:00
reyesj2
65274e89d7 Add client_id to logstash pipeline. To identify which searchnode is pulling messages
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-05 15:38:00 -04:00
reyesj2
721e04f793 initial logstash input from kafka over ssl
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-05 13:37:14 -04:00
reyesj2
433309ef1a Generate kafka cluster id if it doesn't exist
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-05 09:35:12 -04:00
reyesj2
735cfb4c29 Autogenerate kafka topics when a message it sent to non-existing topic
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-04 16:45:58 -04:00
reyesj2
6202090836 Merge remote-tracking branch 'origin/kaffytaffy' into reyesj2/kafka 2024-04-04 16:27:06 -04:00
reyesj2
436cbc1f06 Add kafka signing_policy for client/server auth. Add kafka-client cert on manager so manager can interact with kafka using its own cert
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-04 16:21:29 -04:00
reyesj2
40b08d737c Generate kafka keystore on changes to kafka.key
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-04 16:16:53 -04:00
m0duspwnens
4c5b42b898 restart container on server config changes 2024-04-04 15:47:01 -04:00
m0duspwnens
7a6b72ebac add so-kafka to manager for firewall 2024-04-04 15:46:11 -04:00
m0duspwnens
1b8584d4bb allow manager to manager on kafka ports 2024-04-03 15:36:35 -04:00
reyesj2
13105c4ab3 Generate certs for use with elasticfleet kafka output policy
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-03 14:34:07 -04:00
reyesj2
dc27bbb01d Set kafka heap size. To be later configured from SOC
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-03 14:30:52 -04:00
m0duspwnens
b863060df1 kafka broker and listener on 0.0.0.0 2024-04-03 11:05:24 -04:00
m0duspwnens
18f95e867f port 9093 for kafka docker 2024-04-03 10:24:53 -04:00
m0duspwnens
ed6137a76a allow sensor and searchnode to connect to manager kafka ports 2024-04-03 10:24:10 -04:00
m0duspwnens
c3f02a698e add kafka nodes as extra hosts for the container 2024-04-03 10:23:36 -04:00
m0duspwnens
db106f8ca1 listen on 0.0.0.0 for CONTROLLER 2024-04-03 10:22:47 -04:00
m0duspwnens
8e47cc73a5 kafka.nodes pillar to lf 2024-04-03 08:54:17 -04:00
m0duspwnens
639bf05081 add so-manager to kafka.nodes pillar 2024-04-03 08:52:26 -04:00
m0duspwnens
4e142e0212 put alphabetical 2024-04-02 16:47:35 -04:00
m0duspwnens
c9bf1c86c6 Merge remote-tracking branch 'origin/reyesj2/kafka' into kaffytaffy 2024-04-02 16:40:47 -04:00
reyesj2
82830c8173 Fix typos and fix error related to elasticsearch saltstate being called from logstash state. Logstash will be removed from kafkanodes in future
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:37:39 -04:00
reyesj2
7f5741c43b Fix kafka storage setup
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:36:22 -04:00
reyesj2
643d4831c1 CRLF -> LF
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:35:14 -04:00
reyesj2
b032eed22a Update kafka to use manager docker registry
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:34:06 -04:00
reyesj2
1b49c8540e Fix kafka keystore script
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:32:15 -04:00
m0duspwnens
f7534a0ae3 make manager download so-kafka container 2024-04-02 16:01:12 -04:00
m0duspwnens
780ad9eb10 add kafka to manager nodes 2024-04-02 15:50:25 -04:00
m0duspwnens
e25bc8efe4 Merge remote-tracking branch 'origin/reyesj2/kafka' into kaffytaffy 2024-04-02 13:36:47 -04:00
reyesj2
26abe90671 Removed duplicate kafka setup
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 12:19:46 -04:00
reyesj2
446f1ffdf5 merge 2.4/dev
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-03-25 13:55:48 -04:00
reyesj2
8cf29682bb Update to merge in 2.4/dev
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2023-11-29 13:41:23 -05:00
reyesj2
86dc7cc804 Kafka init
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2023-11-29 13:34:25 -05:00
46 changed files with 1496 additions and 19 deletions

View File

@@ -19,4 +19,4 @@ role:
receiver:
standalone:
searchnode:
sensor:
sensor:

30
pillar/kafka/nodes.sls Normal file
View File

@@ -0,0 +1,30 @@
{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %}
{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %}
{% set existing_ids = [] %}
{% for node in pillar_kafkanodes.values() %}
{% if node.get('id') %}
{% do existing_ids.append(node['nodeid']) %}
{% endif %}
{% endfor %}
{% set all_possible_ids = range(1, 256)|list %}
{% set available_ids = [] %}
{% for id in all_possible_ids %}
{% if id not in existing_ids %}
{% do available_ids.append(id) %}
{% endif %}
{% endfor %}
{% set final_nodes = pillar_kafkanodes.copy() %}
{% for minionid, ip in current_kafkanodes.items() %}
{% set hostname = minionid.split('_')[0] %}
{% if hostname not in final_nodes %}
{% set new_id = available_ids.pop(0) %}
{% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %}
{% endif %}
{% endfor %}
kafka:
nodes: {{ final_nodes|tojson }}

View File

@@ -61,6 +61,9 @@ base:
- backup.adv_backup
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- stig.soc_stig
'*_sensor':
@@ -176,6 +179,9 @@ base:
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
'*_heavynode':
- elasticsearch.auth
@@ -232,6 +238,9 @@ base:
- redis.adv_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
'*_import':
- secrets

View File

@@ -101,7 +101,8 @@
'utility',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-managersearch': [
'salt.master',
@@ -122,7 +123,8 @@
'utility',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-searchnode': [
'ssl',
@@ -156,7 +158,8 @@
'schedule',
'tcpreplay',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-sensor': [
'ssl',
@@ -187,7 +190,9 @@
'telegraf',
'firewall',
'schedule',
'docker_clean'
'docker_clean',
'kafka',
'elasticsearch.ca'
],
'so-desktop': [
'ssl',

View File

@@ -70,3 +70,17 @@ x509_signing_policies:
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 820
- copypath: /etc/pki/issued_certs/
kafka:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "digitalSignature, keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- extendedKeyUsage: "serverAuth, clientAuth"
- days_valid: 820
- copypath: /etc/pki/issued_certs/

View File

@@ -50,6 +50,7 @@ container_list() {
"so-idh"
"so-idstools"
"so-influxdb"
"so-kafka"
"so-kibana"
"so-kratos"
"so-logstash"
@@ -64,7 +65,7 @@ container_list() {
"so-strelka-manager"
"so-suricata"
"so-telegraf"
"so-zeek"
"so-zeek"
)
else
TRUSTED_CONTAINERS=(

View File

@@ -185,3 +185,11 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-kafka':
final_octet: 88
port_bindings:
- 0.0.0.0:9092:9092
- 0.0.0.0:9093:9093
custom_bind_mounts: []
extra_hosts: []
extra_env: []

View File

@@ -65,3 +65,4 @@ docker:
so-steno: *dockerOptions
so-suricata: *dockerOptions
so-zeek: *dockerOptions
so-kafka: *dockerOptions

View File

@@ -4,7 +4,7 @@
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA

View File

@@ -27,6 +27,7 @@
'so-elastic-fleet',
'so-elastic-fleet-package-registry',
'so-influxdb',
'so-kafka',
'so-kibana',
'so-kratos',
'so-logstash',
@@ -80,6 +81,7 @@
{% set NODE_CONTAINERS = [
'so-logstash',
'so-redis',
'so-kafka'
] %}
{% elif GLOBALS.role == 'so-idh' %}

View File

@@ -90,6 +90,11 @@ firewall:
tcp:
- 8086
udp: []
kafka:
tcp:
- 9092
- 9093
udp: []
kibana:
tcp:
- 5601
@@ -364,6 +369,7 @@ firewall:
- elastic_agent_update
- localrules
- sensoroni
- kafka
fleet:
portgroups:
- elasticsearch_rest
@@ -399,6 +405,7 @@ firewall:
- docker_registry
- influxdb
- sensoroni
- kafka
searchnode:
portgroups:
- redis
@@ -412,6 +419,7 @@ firewall:
- elastic_agent_data
- elastic_agent_update
- sensoroni
- kafka
heavynode:
portgroups:
- redis
@@ -1275,14 +1283,17 @@ firewall:
- beats_5044
- beats_5644
- elastic_agent_data
- kafka
searchnode:
portgroups:
- redis
- beats_5644
- kafka
managersearch:
portgroups:
- redis
- beats_5644
- kafka
self:
portgroups:
- redis

View File

@@ -115,6 +115,9 @@ firewall:
influxdb:
tcp: *tcpsettings
udp: *udpsettings
kafka:
tcp: *tcpsettings
udp: *udpsettings
kibana:
tcp: *tcpsettings
udp: *udpsettings
@@ -932,7 +935,6 @@ firewall:
portgroups: *portgroupshost
customhostgroup9:
portgroups: *portgroupshost
idh:
chain:
DOCKER-USER:

View File

@@ -1,2 +1,3 @@
global:
pcapengine: STENO
pcapengine: STENO
pipeline: REDIS

View File

@@ -36,9 +36,10 @@ global:
global: True
advanced: True
pipeline:
description: Sets which pipeline technology for events to use. Currently only Redis is supported.
description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license.
regex: ^(REDIS|KAFKA)$
regexFailureMessage: You must enter either REDIS or KAFKA.
global: True
readonly: True
advanced: True
repo_host:
description: Specify the host where operating system packages will be served from.

106
salt/kafka/config.sls Normal file
View File

@@ -0,0 +1,106 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set kafka_ips_logstash = [] %}
{% set kafka_ips_kraft = [] %}
{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %}
{% set kafka_ip = GLOBALS.node_ip %}
{# Create list for kafka <-> logstash/searchnode communcations #}
{% for node, node_data in kafkanodes.items() %}
{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %}
{% endfor %}
{% set kafka_server_list = "','".join(kafka_ips_logstash) %}
{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #}
{% for node, node_data in kafkanodes.items() %}
{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %}
{% endfor %}
{% set kraft_server_list = "','".join(kafka_ips_kraft) %}
include:
- ssl
kafka_group:
group.present:
- name: kafka
- gid: 960
kafka:
user.present:
- uid: 960
- gid: 960
{# Future tools to query kafka directly / show consumer groups
kafka_sbin_tools:
file.recurse:
- name: /usr/sbin
- source: salt://kafka/tools/sbin
- user: 960
- group: 960
- file_mode: 755 #}
kafka_sbin_jinja_tools:
file.recurse:
- name: /usr/sbin
- source: salt://kafka/tools/sbin_jinja
- user: 960
- group: 960
- file_mode: 755
- template: jinja
- defaults:
GLOBALS: {{ GLOBALS }}
kakfa_log_dir:
file.directory:
- name: /opt/so/log/kafka
- user: 960
- group: 960
- makedirs: True
kafka_data_dir:
file.directory:
- name: /nsm/kafka/data
- user: 960
- group: 960
- makedirs: True
kafka_generate_keystore:
cmd.run:
- name: "/usr/sbin/so-kafka-generate-keystore"
- onchanges:
- x509: /etc/pki/kafka.key
kafka_keystore_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka.jks
- mode: 640
- user: 960
- group: 939
{% for sc in ['server', 'client'] %}
kafka_kraft_{{sc}}_properties:
file.managed:
- source: salt://kafka/etc/{{sc}}.properties.jinja
- name: /opt/so/conf/kafka/{{sc}}.properties
- template: jinja
- user: 960
- group: 960
- makedirs: True
- show_changes: False
{% endfor %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

39
salt/kafka/defaults.yaml Normal file
View File

@@ -0,0 +1,39 @@
kafka:
enabled: False
config:
server:
advertised_x_listeners:
auto_x_create_x_topics_x_enable: true
controller_x_listener_x_names: CONTROLLER
controller_x_quorum_x_voters:
inter_x_broker_x_listener_x_name: BROKER
listeners: BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093
listener_x_security_x_protocol_x_map: CONTROLLER:SSL,BROKER:SSL
log_x_dirs: /nsm/kafka/data
log_x_retention_x_check_x_interval_x_ms: 300000
log_x_retention_x_hours: 168
log_x_segment_x_bytes: 1073741824
node_x_id:
num_x_io_x_threads: 8
num_x_network_x_threads: 3
num_x_partitions: 1
num_x_recovery_x_threads_x_per_x_data_x_dir: 1
offsets_x_topic_x_replication_x_factor: 1
process_x_roles: broker
socket_x_receive_x_buffer_x_bytes: 102400
socket_x_request_x_max_x_bytes: 104857600
socket_x_send_x_buffer_x_bytes: 102400
ssl_x_keystore_x_location: /etc/pki/kafka.jks
ssl_x_keystore_x_password: changeit
ssl_x_keystore_x_type: JKS
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
ssl_x_truststore_x_password: changeit
transaction_x_state_x_log_x_min_x_isr: 1
transaction_x_state_x_log_x_replication_x_factor: 1
client:
security_x_protocol: SSL
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
ssl_x_truststore_x_password: changeit
ssl_x_keystore_x_location: /etc/pki/kafka.jks
ssl_x_keystore_x_type: JKS
ssl_x_keystore_x_password: changeit

16
salt/kafka/disabled.sls Normal file
View File

@@ -0,0 +1,16 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
include:
- kafka.sostatus
so-kafka:
docker_container.absent:
- force: True
so-kafka_so-status.disabled:
file.comment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-kafka$

64
salt/kafka/enabled.sls Normal file
View File

@@ -0,0 +1,64 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %}
include:
- elasticsearch.ca
- kafka.sostatus
- kafka.config
- kafka.storage
so-kafka:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }}
- hostname: so-kafka
- name: so-kafka
- networks:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
- user: kafka
- environment:
- KAFKA_HEAP_OPTS=-Xmx2G -Xms1G
- extra_hosts:
{% for node in KAFKANODES %}
- {{ node }}:{{ KAFKANODES[node].ip }}
{% endfor %}
{% if DOCKER.containers['so-kafka'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
- {{ BINDING }}
{% endfor %}
- binds:
- /etc/pki/kafka.jks:/etc/pki/kafka.jks
- /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts
- /nsm/kafka/data/:/nsm/kafka/data/:rw
- /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties
- /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties
- watch:
{% for sc in ['server', 'client'] %}
- file: kafka_kraft_{{sc}}_properties
{% endfor %}
delete_so-kafka_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-kafka$
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,7 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'kafka/map.jinja' import KAFKAMERGED -%}
{{ KAFKAMERGED.config.client | yaml(False) | replace("_x_", ".") }}

View File

@@ -0,0 +1,7 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'kafka/map.jinja' import KAFKAMERGED -%}
{{ KAFKAMERGED.config.server | yaml(False) | replace("_x_", ".") }}

14
salt/kafka/init.sls Normal file
View File

@@ -0,0 +1,14 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'kafka/map.jinja' import KAFKAMERGED %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
{% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %}
- kafka.enabled
{% else %}
- kafka.disabled
{% endif %}

20
salt/kafka/map.jinja Normal file
View File

@@ -0,0 +1,20 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %}
{% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %}
{% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %}
{% set nodes = salt['pillar.get']('kafka:nodes', {}) %}
{% set combined = [] %}
{% for hostname, data in nodes.items() %}
{% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %}
{% endfor %}
{% set kraft_controller_quorum_voters = ','.join(combined) %}
{% do KAFKAMERGED.config.server.update({'controller_x_quorum_x_voters': kraft_controller_quorum_voters}) %}

170
salt/kafka/soc_kafka.yaml Normal file
View File

@@ -0,0 +1,170 @@
kafka:
enabled:
description: Enable or disable Kafka.
helpLink: kafka.html
cluster_id:
description: The ID of the Kafka cluster.
readonly: True
advanced: True
sensitive: True
helpLink: kafka.html
config:
server:
advertised_x_listeners:
description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication.
title: advertised.listeners
helpLink: kafka.html
auto_x_create_x_topics_x_enable:
description: Enable the auto creation of topics.
title: auto.create.topics.enable
forcedType: bool
helpLink: kafka.html
controller_x_listener_x_names:
description: Set listeners used by the controller in a comma-seperated list.
title: controller.listener.names
helpLink: kafka.html
controller_x_quorum_x_voters:
description: A comma-seperated list of ID and endpoint information mapped for a set of voters.
title: controller.quorum.voters
helpLink: kafka.html
inter_x_broker_x_listener_x_name:
description: The name of the listener used for inter-broker communication.
title: inter.broker.listener.name
helpLink: kafka.html
listeners:
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
helpLink: kafka.html
listener_x_security_x_protocol_x_map:
description: Comma-seperated mapping of listener name and security protocols.
title: listener.security.protocol.map
helpLink: kafka.html
log_x_dirs:
description: Where Kafka logs are stored within the Docker container.
title: log.dirs
helpLink: kafka.html
log_x_retention_x_check_x_interval_x_ms:
description: Frequency at which log files are checked if they are qualified for deletion.
title: log.retention.check.interval.ms
helpLink: kafka.html
log_x_retention_x_hours:
description: How long, in hours, a log file is kept.
title: log.retention.hours
forcedType: int
helpLink: kafka.html
log_x_segment_x_bytes:
description: The maximum allowable size for a log file.
title: log.segment.bytes
forcedType: int
helpLink: kafka.html
node_x_id:
description: The node ID corresponds to the roles performed by this process whenever process.roles is populated.
title: node.id
forcedType: int
readonly: True
helpLink: kafka.html
num_x_io_x_threads:
description: The number of threads used by Kafka.
title: num.io.threads
forcedType: int
helpLink: kafka.html
num_x_network_x_threads:
description: The number of threads used for network communication.
title: num.network.threads
forcedType: int
helpLink: kafka.html
num_x_partitions:
description: The number of log partitions assigned per topic.
title: num.partitions
forcedType: int
helpLink: kafka.html
num_x_recovery_x_threads_x_per_x_data_x_dir:
description: The number of threads used for log recuperation at startup and purging at shutdown. This ammount of threads is used per data directory.
title: num.recovery.threads.per.data.dir
forcedType: int
helpLink: kafka.html
offsets_x_topic_x_replication_x_factor:
description: The offsets topic replication factor.
title: offsets.topic.replication.factor
forcedType: int
helpLink: kafka.html
process_x_roles:
description: The roles the process performs. Use a comma-seperated list is multiple.
title: process.roles
helpLink: kafka.html
socket_x_receive_x_buffer_x_bytes:
description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default.
title: socket.receive.buffer.bytes
#forcedType: int - soc needs to allow -1 as an int before we can use this
helpLink: kafka.html
socket_x_request_x_max_x_bytes:
description: The maximum bytes allowed for a request to the socket.
title: socket.request.max.bytes
forcedType: int
helpLink: kafka.html
socket_x_send_x_buffer_x_bytes:
description: Size, in bytes of the SO_SNDBUF buffer. A value of -1 will use the OS default.
title: socket.send.buffer.byte
#forcedType: int - soc needs to allow -1 as an int before we can use this
helpLink: kafka.html
ssl_x_keystore_x_location:
description: The key store file location within the Docker container.
title: ssl.keystore.location
helpLink: kafka.html
ssl_x_keystore_x_password:
description: The key store file password. Invalid for PEM format.
title: ssl.keystore.password
sensitive: True
helpLink: kafka.html
ssl_x_keystore_x_type:
description: The key store file format.
title: ssl.keystore.type
regex: ^(JKS|PKCS12|PEM)$
helpLink: kafka.html
ssl_x_truststore_x_location:
description: The trust store file location within the Docker container.
title: ssl.truststore.location
helpLink: kafka.html
ssl_x_truststore_x_password:
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
title: ssl.truststore.password
sensitive: True
helpLink: kafka.html
transaction_x_state_x_log_x_min_x_isr:
description: Overrides min.insync.replicas for the transaction topic. When a producer configures acks to "all" (or "-1"), this setting determines the minimum number of replicas required to acknowledge a write as successful. Failure to meet this minimum triggers an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used in conjunction, min.insync.replicas and acks enable stronger durability guarantees. For instance, creating a topic with a replication factor of 3, setting min.insync.replicas to 2, and using acks of "all" ensures that the producer raises an exception if a majority of replicas fail to receive a write.
title: transaction.state.log.min.isr
forcedType: int
helpLink: kafka.html
transaction_x_state_x_log_x_replication_x_factor:
description: Set the replication factor higher for the transaction topic to ensure availability. Internal topic creation will not proceed until the cluster size satisfies this replication factor prerequisite.
title: transaction.state.log.replication.factor
forcedType: int
helpLink: kafka.html
client:
security_x_protocol:
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
title: security.protocol
regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
helpLink: kafka.html
ssl_x_keystore_x_location:
description: The key store file location within the Docker container.
title: ssl.keystore.location
helpLink: kafka.html
ssl_x_keystore_x_password:
description: The key store file password. Invalid for PEM format.
title: ssl.keystore.password
sensitive: True
helpLink: kafka.html
ssl_x_keystore_x_type:
description: The key store file format.
title: ssl.keystore.type
regex: ^(JKS|PKCS12|PEM)$
helpLink: kafka.html
ssl_x_truststore_x_location:
description: The trust store file location within the Docker container.
title: ssl.truststore.location
helpLink: kafka.html
ssl_x_truststore_x_password:
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
title: ssl.truststore.password
sensitive: True
helpLink: kafka.html

21
salt/kafka/sostatus.sls Normal file
View File

@@ -0,0 +1,21 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
append_so-kafka_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-kafka
- unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

38
salt/kafka/storage.sls Normal file
View File

@@ -0,0 +1,38 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
{% if kafka_cluster_id is none %}
generate_kafka_cluster_id:
cmd.run:
- name: /usr/sbin/so-kafka-clusterid
{% endif %}
{% endif %}
{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #}
{% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
kafka_storage_init:
cmd.run:
- name: |
docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties
kafka_rm_kafkainit:
cmd.run:
- name: |
docker rm so-kafkainit
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -0,0 +1,13 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
# Generate a new keystore
docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt
docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks
docker rm so-kafka-keystore

View File

@@ -75,9 +75,10 @@ so-logstash:
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode'] %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
{% endif %}
{% if GLOBALS.role == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro

View File

@@ -4,9 +4,10 @@
# Elastic License 2.0.
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
{% from 'kafka/map.jinja' import KAFKAMERGED %}
include:
{% if LOGSTASH_MERGED.enabled %}
{% if LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %}
- logstash.enabled
{% else %}
- logstash.disabled

View File

@@ -0,0 +1,35 @@
{% set kafka_brokers = salt['pillar.get']('logstash:nodes:receiver', {}) %}
{% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %}
{% set broker_ips = [] %}
{% for node, node_data in kafka_brokers.items() %}
{% do broker_ips.append(node_data['ip'] + ":9092") %}
{% endfor %}
{% for node, node_data in kafka_on_mngr.items() %}
{% do broker_ips.append(node_data['ip'] + ":9092") %}
{% endfor %}
{% set bootstrap_servers = "','".join(broker_ips) %}
input {
kafka {
codec => json
topics => ['default-logs', 'kratos-logs', 'soc-logs', 'strelka-logs', 'suricata-logs', 'zeek-logs']
group_id => 'searchnodes'
client_id => '{{ GLOBALS.hostname }}'
security_protocol => 'SSL'
bootstrap_servers => '{{ bootstrap_servers }}'
ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12'
ssl_keystore_password => 'changeit'
ssl_keystore_type => 'PKCS12'
ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts'
ssl_truststore_password => 'changeit'
decorate_events => true
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ]
}
}
filter {
if ![metadata] {
mutate {
rename => { "@metadata" => "metadata" }
}
}
}

View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
### THIS SCRIPT AND SALT STATE REFERENCES TO THIS SCRIPT TO BE REMOVED ONCE INITIAL TESTING IS DONE - THESE VALUES WILL GENERATED IN SETUP AND SOUP
local_salt_dir=/opt/so/saltstack/local
if [[ -f /usr/sbin/so-common ]]; then
source /usr/sbin/so-common
else
source $(dirname $0)/../../../common/tools/sbin/so-common
fi
if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
kafka_cluster_id=$(get_random_value 22)
echo 'kafka: ' > $local_salt_dir/pillar/kafka/soc_kafka.sls
echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls
if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
kafkapass=$(get_random_value)
echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls
fi

View File

@@ -438,7 +438,25 @@ post_to_2.4.60() {
}
post_to_2.4.70() {
echo "Nothing to apply"
# Global pipeline changes to REDIS or KAFKA
echo "Removing global.pipeline pillar configuration"
sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls
# Kafka configuration
mkdir -p /opt/so/saltstack/local/pillar/kafka
touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
kafka_cluster_id=$(get_random_value 22)
echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls
if ! grep -q "^ certpass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
kafkapass=$(get_random_value)
echo ' certpass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls
fi
POSTVERSION=2.4.70
}

View File

@@ -4,9 +4,10 @@
# Elastic License 2.0.
{% from 'redis/map.jinja' import REDISMERGED %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
{% if REDISMERGED.enabled %}
{% if GLOBALS.pipeline == "REDIS" and REDISMERGED.enabled %}
- redis.enabled
{% else %}
- redis.disabled

View File

@@ -0,0 +1,125 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# -*- coding: utf-8 -*-
import logging
import re
log = logging.getLogger(__name__)
# will need this in future versions of this engine
#import salt.client
#local = salt.client.LocalClient()
def start(fpa, interval=10):
log.info("pillarWatch engine: ##### checking watched pillars for changes #####")
# try to open the file that stores the previous runs data
# if the file doesn't exist, create a blank one
try:
# maybe change this location
dataFile = open("/opt/so/state/pillarWatch.txt", "r+")
except FileNotFoundError:
log.warn("pillarWatch engine: No previous pillarWatch data saved")
dataFile = open("/opt/so/state/pillarWatch.txt", "w+")
df = dataFile.read()
for i in fpa:
log.trace("pillarWatch engine: files: %s" % i['files'])
log.trace("pillarWatch engine: pillar: %s" % i['pillar'])
log.trace("pillarWatch engine: actions: %s" % i['actions'])
pillarFiles = i['files']
pillar = i['pillar']
actions = i['actions']
# these are the keys that we are going to look for as we traverse the pillarFiles
patterns = pillar.split(".")
# check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file
for pillarFile in reversed(pillarFiles):
currentPillarValue = ''
previousPillarValue = ''
# this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later
patternFound = 0
with open(pillarFile, "r") as file:
log.debug("pillarWatch engine: checking file: %s" % pillarFile)
for line in file:
log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file))
log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound])
# since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar
# ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look
# for pipeline. once pipeline is found, it will record the value
if re.search('^' + patterns[patternFound] + ':', line.strip()):
# strip the newline because it makes the logs u-g-l-y
log.debug("pillarWatch engine: found: %s" % line.strip('\n'))
patternFound += 1
# we have found the final key in the pillar that we are looking for, get the previous value then the current value
if patternFound == len(patterns):
# at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars
previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE'
# check the contents of the dataFile that stores the previousPillarValue(s).
# find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value
for l in df.splitlines():
if pillar in l:
previousPillarValue = str(l.split(":")[1].strip())
currentPillarValue = str(line.split(":")[1]).strip()
log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue))
log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue))
# if the pillar we are checking for changes has been defined in the dataFile,
# replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it.
if pillar in df:
df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df)
else:
df += pillar + ': ' + currentPillarValue + '\n'
log.trace("pillarWatch engine: df: %s" % df)
# we have found the pillar so we dont need to loop through the file anymore
break
# if key and value was found in the first file, then we don't want to look in
# any more files since we use the first file as the source of truth.
if patternFound == len(patterns):
break
# if the pillar value changed, then we find what actions we should take
log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue")
if currentPillarValue != previousPillarValue:
log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue))
# check if the previous pillar value is defined in the pillar from -> to actions
if previousPillarValue in actions['from']:
# check if the new / current pillar value is defined under to
if currentPillarValue in actions['from'][previousPillarValue]['to']:
ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue]
# if the new / current pillar value isn't defined under to, is there a wildcard defined
elif '*' in actions['from'][previousPillarValue]['to']:
ACTIONS=actions['from'][previousPillarValue]['to']['*']
# no action was defined for us to take when we see the pillar change
else:
ACTIONS=['NO DEFINED ACTION FOR US TO TAKE']
# if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from
elif '*' in actions['from']:
# is the new pillar value defined for the wildcard match
if currentPillarValue in actions['from']['*']['to']:
ACTIONS=actions['from']['*']['to'][currentPillarValue]
# if the new pillar doesn't have an action, was a wildcard defined
elif '*' in actions['from']['*']['to']:
# need more logic here for to and from
ACTIONS=actions['from']['*']['to']['*']
else:
ACTIONS=['NO DEFINED ACTION FOR US TO TAKE']
# a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard
else:
ACTIONS=['NO DEFINED ACTION FOR US TO TAKE']
log.debug("pillarWatch engine: all defined actions: %s" % actions['from'])
log.debug("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue))
for action in ACTIONS:
log.info("pillarWatch engine: action: %s" % action)
if action != 'NO DEFINED ACTION FOR US TO TAKE':
for saltModule, args in action.items():
log.debug("pillarWatch engine: saltModule: %s" % saltModule)
log.debug("pillarWatch engine: args: %s" % args)
#__salt__[saltModule](**args)
actionReturn = __salt__[saltModule](**args)
log.info("pillarWatch engine: actionReturn: %s" % actionReturn)
dataFile.seek(0)
dataFile.write(df)
dataFile.truncate()
dataFile.close()

View File

@@ -0,0 +1,120 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# -*- coding: utf-8 -*-
import logging
import re
log = logging.getLogger(__name__)
# will need this in future versions of this engine
import salt.client
local = salt.client.LocalClient()
def start(fpa, interval=10):
def getValue(content, key):
pieces = key.split(".", 1)
if len(pieces) > 1:
getValue(content[pieces[0]], pieces[1])
else:
#log.info("ck: %s" % content[key])
return content[key]
#content.pop(key, None)
log.info("valWatch engine: ##### checking watched values for changes #####")
# try to open the file that stores the previous runs data
# if the file doesn't exist, create a blank one
try:
# maybe change this location
dataFile = open("/opt/so/state/valWatch.txt", "r+")
except FileNotFoundError:
log.warn("valWatch engine: No previous valWatch data saved")
dataFile = open("/opt/so/state/valWatch.txt", "w+")
df = dataFile.read()
for i in fpa:
log.info("valWatch engine: i: %s" % i)
log.trace("valWatch engine: map: %s" % i['map'])
log.trace("valWatch engine: value: %s" % i['value'])
log.trace("valWatch engine: targets: %s" % i['targets'])
log.trace("valWatch engine: actions: %s" % i['actions'])
mapFile = i['map']
value = i['value']
targets = i['targets']
# target type
ttype = i['ttype']
actions = i['actions']
# these are the keys that we are going to look for as we traverse the pillarFiles
patterns = value.split(".")
mainDict = patterns.pop(0)
# patterns = value.split(".")
for target in targets:
# tell targets to render mapfile and return value split
mapRender = local.cmd(target, fun='jinja.load_map', arg=[mapFile, mainDict], tgt_type=ttype)
currentValue = ''
previousValue = ''
# this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later
patternFound = 0
#with open(pillarFile, "r") as file:
# log.debug("pillarWatch engine: checking file: %s" % pillarFile)
mapRenderKeys = list(mapRender.keys())
if len(mapRenderKeys) > 0:
log.info(mapRenderKeys)
log.info("valWatch engine: mapRender: %s" % mapRender)
minion = mapRenderKeys[0]
currentValue = getValue(mapRender[minion],value.split('.', 1)[1])
log.info("valWatch engine: currentValue: %s: %s: %s" % (minion, value, currentValue))
for l in df.splitlines():
if value in l:
previousPillarValue = str(l.split(":")[1].strip())
log.info("valWatch engine: previousValue: %s: %s: %s" % (minion, value, previousValue))
'''
for key in mapRender[minion]:
log.info("pillarWatch engine: inspecting key: %s in mainDict: %s" % (key, mainDict))
log.info("pillarWatch engine: looking for: %s" % patterns[patternFound])
# since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar
# ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look
# for pipeline. once pipeline is found, it will record the value
#if re.search(patterns[patternFound], key):
if patterns[patternFound] == key:
# strip the newline because it makes the logs u-g-l-y
log.info("pillarWatch engine: found: %s" % key)
patternFound += 1
# we have found the final key in the pillar that we are looking for, get the previous value then the current value
if patternFound == len(patterns):
# at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars
previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE'
# check the contents of the dataFile that stores the previousPillarValue(s).
# find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value
for l in df.splitlines():
if value in l:
previousPillarValue = str(l.split(":")[1].strip())
currentPillarValue = mapRender[minion][key]
log.info("pillarWatch engine: %s currentPillarValue: %s" % (value, currentPillarValue))
log.info("pillarWatch engine: %s previousPillarValue: %s" % (value, previousPillarValue))
# if the pillar we are checking for changes has been defined in the dataFile,
# replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it.
if value in df:
df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df)
else:
df += value + ': ' + currentPillarValue + '\n'
log.info("pillarWatch engine: df: %s" % df)
# we have found the pillar so we dont need to loop through the file anymore
break
# if key and value was found in the first file, then we don't want to look in
# any more files since we use the first file as the source of truth.
if patternFound == len(patterns):
break
'''
dataFile.seek(0)
dataFile.write(df)
dataFile.truncate()
dataFile.close()

View File

@@ -0,0 +1,141 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# -*- coding: utf-8 -*-
import logging
import re
from pathlib import Path
import os
import glob
import json
from time import sleep
import sys
log = logging.getLogger(__name__)
import salt.client
local = salt.client.LocalClient()
def start(watched, interval=10):
# this 20 second sleep allows enough time for the minion to reconnect during testing of the script when the salt-master is restarted
sleep(20)
log.info("valueWatch engine: started")
# this dict will be used to store the files that we are watching and their modification times for the current iteration though a loop
fileModTimesCurrent = {}
# same as fileModTimesCurrent, but stores the previous values through the loop.
# the combination of these two variables is used to determine if a files has changed.
fileModTimesPrevious = {}
#
currentValues = {}
def getValue(content, key):
pieces = key.split(".", 1)
if len(pieces) > 1:
getValue(content[pieces[0]], pieces[1])
else:
#log.info("ck: %s" % content[key])
return content[key]
#content.pop(key, None)
def updateModTimesCurrent(files):
# this dict will be used to store the files that we are watching and their modification times for the current iteration though a loop
fileModTimesCurrent.clear()
for f in files:
#log.warn(f)
fileName = Path(f).name
filePath = Path(f).parent
if '*' in fileName:
#log.info(fileName)
#log.info(filePath)
slsFiles = glob.glob(f)
for slsFile in slsFiles:
#log.info(slsFile)
fileModTimesCurrent.update({slsFile: os.path.getmtime(slsFile)})
else:
fileModTimesCurrent.update({f: os.path.getmtime(f)})
def compareFileModTimes():
ret = []
for f in fileModTimesCurrent:
log.info(f)
if f in fileModTimesPrevious:
log.info("valueWatch engine: fileModTimesCurrent: %s" % fileModTimesCurrent[f])
log.info("valueWatch engine: fileModTimesPrevious: %s" % fileModTimesPrevious[f])
if fileModTimesCurrent[f] != fileModTimesPrevious[f]:
log.error("valueWatch engine: fileModTimesCurrent[f] != fileModTimesPrevious[f]")
log.error("valueWatch engine: " + str(fileModTimesCurrent[f]) + " != " + str(fileModTimesPrevious[f]))
ret.append(f)
return ret
# this will set the current value of 'value' from engines.conf and save it to the currentValues dict
def updateCurrentValues():
for target in targets:
log.info("valueWatch engine: refreshing pillars on %s" % target)
refreshPillar = local.cmd(target, fun='saltutil.refresh_pillar', tgt_type=ttype)
log.info("valueWatch engine: pillar refresh results: %s" % refreshPillar)
# check if the result was True for the pillar refresh
# will need to add a recheck incase the minion was just temorarily unavailable
try:
if next(iter(refreshPillar.values())):
sleep(5)
# render the map file for the variable passed in from value.
mapRender = local.cmd(target, fun='jinja.load_map', arg=[mapFile, mainDict], tgt_type=ttype)
log.info("mR: %s" % mapRender)
currentValue = ''
previousValue = ''
mapRenderKeys = list(mapRender.keys())
if len(mapRenderKeys) > 0:
log.info(mapRenderKeys)
log.info("valueWatch engine: mapRender: %s" % mapRender)
minion = mapRenderKeys[0]
# if not isinstance(mapRender[minion], bool):
currentValue = getValue(mapRender[minion],value.split('.', 1)[1])
log.info("valueWatch engine: currentValue: %s: %s: %s" % (minion, value, currentValue))
currentValues.update({value: {minion: currentValue}})
# we have rendered the value so we don't need to have any more target render it
break
except StopIteration:
log.info("valueWatch engine: target %s did not respond or does not exist" % target)
log.info("valueWatch engine: currentValues: %s" % currentValues)
# run the main loop
while True:
log.info("valueWatch engine: checking watched files for changes")
for v in watched:
value = v['value']
files = v['files']
mapFile = v['map']
targets = v['targets']
ttype = v['ttype']
actions = v['actions']
patterns = value.split(".")
mainDict = patterns.pop(0)
log.info("valueWatch engine: value: %s" % value)
# the call to this function will update fileModtimesCurrent
updateModTimesCurrent(files)
#log.trace("valueWatch engine: fileModTimesCurrent: %s" % fileModTimesCurrent)
#log.trace("valueWatch engine: fileModTimesPrevious: %s" % fileModTimesPrevious)
# compare with the previous checks file modification times
modFilesDiff = compareFileModTimes()
# if there were changes in the pillar files, then we need to have the minion render the map file to determine if the value changed
if modFilesDiff:
log.info("valueWatch engine: change in files detetected, updating currentValues: %s" % modFilesDiff)
updateCurrentValues()
elif value not in currentValues:
log.info("valueWatch engine: %s not in currentValues, updating currentValues." % value)
updateCurrentValues()
else:
log.info("valueWatch engine: no files changed, no update for currentValues")
# save this iteration's values to previous so we can compare next run
fileModTimesPrevious.update(fileModTimesCurrent)
sleep(interval)

View File

@@ -4,3 +4,127 @@ engines_dirs:
engines:
- checkmine:
interval: 60
- valueWatch:
watched:
- value: GLOBALMERGED.pipeline
files:
- /opt/so/saltstack/local/pillar/global/soc_global.sls
- /opt/so/saltstack/local/pillar/global/adv_global.sls
map: global/map.jinja
targets:
- G@role:so-notanodetype
- G@role:so-manager
- G@role:so-searchnode
ttype: compound
actions:
from:
'*':
to:
KAFKA:
- cmd.run:
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
KAFKA:
to:
'*':
- cmd.run:
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
# - value: FIREWALL_MERGED
# files:
# - /opt/so/saltstack/local/pillar/firewall/soc_firewall.sls
# - /opt/so/saltstack/local/pillar/firewall/adv_firewall.sls
# - /opt/so/saltstack/local/pillar/minions/*.sls
# map: firewall/map.jinja
# targets:
# - so-*
# ttype: compound
# actions:
# from:
# '*':
# to:
# '*':
# - cmd.run:
# cmd: date
interval: 10
- pillarWatch:
fpa:
# these files will be checked in reversed order to replicate the same hierarchy as the pillar top file
- files:
- /opt/so/saltstack/local/pillar/global/soc_global.sls
- /opt/so/saltstack/local/pillar/global/adv_global.sls
pillar: global.pipeline
actions:
from:
'*':
to:
KAFKA:
- cmd.run:
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
# - cmd.run:
# cmd: salt-call saltutil.kill_all_jobs
# - cmd.run:
# cmd: salt-call state.highstate &
KAFKA:
to:
'*':
- cmd.run:
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
# - cmd.run:
# cmd: salt-call saltutil.kill_all_jobs
# - cmd.run:
# cmd: salt-call state.highstate &
- files:
- /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
- /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
pillar: idstools.config.ruleset
actions:
from:
'*':
to:
'*':
- cmd.run:
cmd: /usr/sbin/so-rule-update
interval: 10
- valWatch:
fpa:
- value: GLOBALMERGED.pipeline
map: global/map.jinja
targets:
- so-manager
- so-managersearch
- so-standalone
actions:
from:
'*':
to:
KAFKA:
- cmd.run:
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
# - cmd.run:
# cmd: salt-call saltutil.kill_all_jobs
# - cmd.run:
# cmd: salt-call state.highstate &
KAFKA:
to:
'*':
- cmd.run:
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
# - cmd.run:
# cmd: salt-call saltutil.kill_all_jobs
# - cmd.run:
# cmd: salt-call state.highstate &
# - files:
# - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
# - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
# pillar: idstools.config.ruleset
# actions:
# from:
# '*':
# to:
# '*':
# - cmd.run:
# cmd: /usr/sbin/so-rule-update
interval: 10

View File

@@ -27,6 +27,11 @@ checkmine_engine:
- source: salt://salt/engines/master/checkmine.py
- makedirs: True
pillarWatch_engine:
file.managed:
- name: /etc/salt/engines/pillarWatch.py
- source: salt://salt/engines/master/pillarWatch.py
engines_config:
file.managed:
- name: /etc/salt/master.d/engines.conf

View File

@@ -13,6 +13,9 @@ include:
- systemd.reload
- repo.client
- salt.mine_functions
{% if GLOBALS.role in GLOBALS.manager_roles %}
- ca
{% endif %}
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
@@ -98,5 +101,8 @@ salt_minion_service:
- file: mine_functions
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
- file: set_log_levels
{% endif %}
{% if GLOBALS.role in GLOBALS.manager_roles %}
- file: /etc/salt/minion.d/signing_policies.conf
{% endif %}
- order: last

View File

@@ -664,6 +664,230 @@ elastickeyperms:
{%- endif %}
{% if grains['role'] in ['so-manager', 'so-receiver', 'so-searchnode'] %}
kafka_key:
x509.private_key_managed:
- name: /etc/pki/kafka.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/kafka.key') -%}
- prereq:
- x509: /etc/pki/kafka.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
kafka_crt:
x509.certificate_managed:
- name: /etc/pki/kafka.crt
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka
- private_key: /etc/pki/kafka.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:changeit"
- onchanges:
- x509: /etc/pki/kafka.key
elasticfleet_kafka_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-kafka.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
- prereq:
- x509: elasticfleet_kafka_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-kafka.crt
- ca_server: {{ ca_server }}
- signing_policy: kafka
- private_key: /etc/pki/elasticfleet-kafka.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-kafka.key -topk8 -out /etc/pki/elasticfleet-kafka.p8 -nocrypt"
- onchanges:
- x509: elasticfleet_kafka_key
kafka_logstash_key:
x509.private_key_managed:
- name: /etc/pki/kafka-logstash.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%}
- prereq:
- x509: /etc/pki/kafka-logstash.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
kafka_logstash_crt:
x509.certificate_managed:
- name: /etc/pki/kafka-logstash.crt
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka
- private_key: /etc/pki/kafka-logstash.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit"
- onchanges:
- x509: /etc/pki/kafka-logstash.key
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-receiver'] %}
kafka_client_key:
x509.private_key_managed:
- name: /etc/pki/kafka-client.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%}
- prereq:
- x509: /etc/pki/kafka-client.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
kafka_client_crt:
x509.certificate_managed:
- name: /etc/pki/kafka-client.crt
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka
- private_key: /etc/pki/kafka-client.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
kafka_client_key_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-client.key
- mode: 640
- user: 960
- group: 939
kafka_client_crt_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-client.crt
- mode: 640
- user: 960
- group: 939
{% endif %}
kafka_key_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka.key
- mode: 640
- user: 960
- group: 939
kafka_crt_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka.crt
- mode: 640
- user: 960
- group: 939
kafka_logstash_key_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-logstash.key
- mode: 640
- user: 960
- group: 939
kafka_logstash_crt_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-logstash.crt
- mode: 640
- user: 960
- group: 939
kafka_logstash_pkcs12_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-logstash.p12
- mode: 640
- user: 960
- group: 931
kafka_pkcs8_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka.p8
- mode: 640
- user: 960
- group: 939
kafka_pkcs12_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka.p12
- mode: 640
- user: 960
- group: 939
elasticfleet_kafka_cert_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.crt
- mode: 640
- user: 960
- group: 939
elasticfleet_kafka_key_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.key
- mode: 640
- user: 960
- group: 939
{% endif %}
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -71,3 +71,20 @@ fleet_crt:
fbcertdir:
file.absent:
- name: /opt/so/conf/filebeat/etc/pki
kafka_crt:
file.absent:
- name: /etc/pki/kafka.crt
kafka_key:
file.absent:
- name: /etc/pki/kafka.key
kafka_logstash_crt:
file.absent:
- name: /etc/pki/kafka-logstash.crt
kafka_logstash_key:
file.absent:
- name: /etc/pki/kafka-logstash.key
kafka_logstash_keystore:
file.absent:
- name: /etc/pki/kafka-logstash.p12

View File

@@ -106,6 +106,7 @@ base:
- utility
- elasticfleet
- stig
- kafka
'*_standalone and G@saltversion:{{saltversion}}':
- match: compound
@@ -235,6 +236,7 @@ base:
- logstash
- redis
- elasticfleet.install_agent_grid
- kafka
'*_idh and G@saltversion:{{saltversion}}':
- match: compound

View File

@@ -23,7 +23,7 @@
'manager_ip': INIT.PILLAR.global.managerip,
'md_engine': INIT.PILLAR.global.mdengine,
'pcap_engine': GLOBALMERGED.pcapengine,
'pipeline': INIT.PILLAR.global.pipeline,
'pipeline': GLOBALMERGED.pipeline,
'so_version': INIT.PILLAR.global.soversion,
'so_docker_gateway': DOCKER.gateway,
'so_docker_range': DOCKER.range,

View File

@@ -803,6 +803,7 @@ create_manager_pillars() {
patch_pillar
nginx_pillar
kibana_pillar
kafka_pillar
}
create_repo() {
@@ -1191,6 +1192,18 @@ kibana_pillar() {
logCmd "touch $kibana_pillar_file"
}
kafka_pillar() {
KAFKACLUSTERID=$(get_random_value 22)
KAFKAPASS=$(get_random_value)
logCmd "mkdir -p $local_salt_dir/pillar/kakfa"
logCmd "touch $adv_kafka_pillar_file"
logCmd "touch $kafka_pillar_file"
printf '%s\n'\
"kafka:"\
" cluster_id: $KAFKACLUSTERID"\
" certpass: $KAFKAPASS" > $kafka_pillar_file
}
logrotate_pillar() {
logCmd "mkdir -p $local_salt_dir/pillar/logrotate"
logCmd "touch $adv_logrotate_pillar_file"
@@ -1325,7 +1338,6 @@ create_global() {
# Continue adding other details
echo " imagerepo: '$IMAGEREPO'" >> $global_pillar_file
echo " pipeline: 'redis'" >> $global_pillar_file
echo " repo_host: '$HOSTNAME'" >> $global_pillar_file
echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file
echo " registry_host: '$HOSTNAME'" >> $global_pillar_file
@@ -1391,7 +1403,7 @@ make_some_dirs() {
mkdir -p $local_salt_dir/salt/firewall/portgroups
mkdir -p $local_salt_dir/salt/firewall/ports
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global kafka;do
mkdir -p $local_salt_dir/pillar/$THEDIR
touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls
touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls

View File

@@ -624,6 +624,16 @@ if ! [[ -f $install_opt_file ]]; then
set_minion_info
whiptail_end_settings
elif [[ $is_kafka ]]; then
info "Setting up as node type Kafka broker"
#check_requirements "kafka"
networking_needful
collect_mngr_hostname
add_mngr_ip_to_hosts
check_manager_connection
set_minion_info
whiptail_end_settings
fi
if [[ $waitforstate ]]; then

View File

@@ -178,6 +178,12 @@ export redis_pillar_file
adv_redis_pillar_file="$local_salt_dir/pillar/redis/adv_redis.sls"
export adv_redis_pillar_file
kafka_pillar_file="local_salt_dir/pillar/kafka/soc_kafka.sls"
export kafka_pillar_file
adv_kafka_pillar_file="$local_salt_dir/pillar/kafka/adv_kafka.sls"
export kafka_pillar_file
idh_pillar_file="$local_salt_dir/pillar/idh/soc_idh.sls"
export idh_pillar_file

View File

@@ -674,7 +674,7 @@ whiptail_install_type_dist_existing() {
Note: Heavy nodes (HEAVYNODE) are NOT recommended for most users.
EOM
install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 6 \
install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 7 \
"SENSOR" "Create a forward only sensor " \
"SEARCHNODE" "Add a search node with parsing " \
"FLEET" "Dedicated Elastic Fleet Node " \