mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Compare commits
70 Commits
2.4.70-202
...
kaffytaffy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d91dd0dd3c | ||
|
|
a0388fd568 | ||
|
|
05244cfd75 | ||
|
|
6c5e0579cf | ||
|
|
1f6eb9cdc3 | ||
|
|
610dd2c08d | ||
|
|
506bbd314d | ||
|
|
4caa6a10b5 | ||
|
|
4b79623ce3 | ||
|
|
c4994a208b | ||
|
|
bb983d4ba2 | ||
|
|
c014508519 | ||
|
|
fcfbb1e857 | ||
|
|
911ee579a9 | ||
|
|
a6ff92b099 | ||
|
|
d73ba7dd3e | ||
|
|
04ddcd5c93 | ||
|
|
af29ae1968 | ||
|
|
fbd3cff90d | ||
|
|
0ed9894b7e | ||
|
|
a54a72c269 | ||
|
|
f514e5e9bb | ||
|
|
3955587372 | ||
|
|
6b28dc72e8 | ||
|
|
ca7253a589 | ||
|
|
af53dcda1b | ||
|
|
d3bd56b131 | ||
|
|
e9e61ea2d8 | ||
|
|
86b984001d | ||
|
|
fa7f8104c8 | ||
|
|
bd5fe43285 | ||
|
|
d38051e806 | ||
|
|
daa5342986 | ||
|
|
c48436ccbf | ||
|
|
7aa00faa6c | ||
|
|
6217a7b9a9 | ||
|
|
d67ebabc95 | ||
|
|
65274e89d7 | ||
|
|
721e04f793 | ||
|
|
433309ef1a | ||
|
|
735cfb4c29 | ||
|
|
6202090836 | ||
|
|
436cbc1f06 | ||
|
|
40b08d737c | ||
|
|
4c5b42b898 | ||
|
|
7a6b72ebac | ||
|
|
1b8584d4bb | ||
|
|
13105c4ab3 | ||
|
|
dc27bbb01d | ||
|
|
b863060df1 | ||
|
|
18f95e867f | ||
|
|
ed6137a76a | ||
|
|
c3f02a698e | ||
|
|
db106f8ca1 | ||
|
|
8e47cc73a5 | ||
|
|
639bf05081 | ||
|
|
4e142e0212 | ||
|
|
c9bf1c86c6 | ||
|
|
82830c8173 | ||
|
|
7f5741c43b | ||
|
|
643d4831c1 | ||
|
|
b032eed22a | ||
|
|
1b49c8540e | ||
|
|
f7534a0ae3 | ||
|
|
780ad9eb10 | ||
|
|
e25bc8efe4 | ||
|
|
26abe90671 | ||
|
|
446f1ffdf5 | ||
|
|
8cf29682bb | ||
|
|
86dc7cc804 |
30
pillar/kafka/nodes.sls
Normal file
30
pillar/kafka/nodes.sls
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %}
|
||||||
|
{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %}
|
||||||
|
|
||||||
|
{% set existing_ids = [] %}
|
||||||
|
{% for node in pillar_kafkanodes.values() %}
|
||||||
|
{% if node.get('id') %}
|
||||||
|
{% do existing_ids.append(node['nodeid']) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{% set all_possible_ids = range(1, 256)|list %}
|
||||||
|
|
||||||
|
{% set available_ids = [] %}
|
||||||
|
{% for id in all_possible_ids %}
|
||||||
|
{% if id not in existing_ids %}
|
||||||
|
{% do available_ids.append(id) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{% set final_nodes = pillar_kafkanodes.copy() %}
|
||||||
|
|
||||||
|
{% for minionid, ip in current_kafkanodes.items() %}
|
||||||
|
{% set hostname = minionid.split('_')[0] %}
|
||||||
|
{% if hostname not in final_nodes %}
|
||||||
|
{% set new_id = available_ids.pop(0) %}
|
||||||
|
{% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
kafka:
|
||||||
|
nodes: {{ final_nodes|tojson }}
|
||||||
@@ -61,6 +61,9 @@ base:
|
|||||||
- backup.adv_backup
|
- backup.adv_backup
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
|
- kafka.nodes
|
||||||
|
- kafka.soc_kafka
|
||||||
|
- kafka.adv_kafka
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
|
|
||||||
'*_sensor':
|
'*_sensor':
|
||||||
@@ -176,6 +179,9 @@ base:
|
|||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
|
- kafka.nodes
|
||||||
|
- kafka.soc_kafka
|
||||||
|
- kafka.adv_kafka
|
||||||
|
|
||||||
'*_heavynode':
|
'*_heavynode':
|
||||||
- elasticsearch.auth
|
- elasticsearch.auth
|
||||||
@@ -232,6 +238,9 @@ base:
|
|||||||
- redis.adv_redis
|
- redis.adv_redis
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
|
- kafka.nodes
|
||||||
|
- kafka.soc_kafka
|
||||||
|
- kafka.adv_kafka
|
||||||
|
|
||||||
'*_import':
|
'*_import':
|
||||||
- secrets
|
- secrets
|
||||||
|
|||||||
@@ -101,7 +101,8 @@
|
|||||||
'utility',
|
'utility',
|
||||||
'schedule',
|
'schedule',
|
||||||
'docker_clean',
|
'docker_clean',
|
||||||
'stig'
|
'stig',
|
||||||
|
'kafka'
|
||||||
],
|
],
|
||||||
'so-managersearch': [
|
'so-managersearch': [
|
||||||
'salt.master',
|
'salt.master',
|
||||||
@@ -122,7 +123,8 @@
|
|||||||
'utility',
|
'utility',
|
||||||
'schedule',
|
'schedule',
|
||||||
'docker_clean',
|
'docker_clean',
|
||||||
'stig'
|
'stig',
|
||||||
|
'kafka'
|
||||||
],
|
],
|
||||||
'so-searchnode': [
|
'so-searchnode': [
|
||||||
'ssl',
|
'ssl',
|
||||||
@@ -156,7 +158,8 @@
|
|||||||
'schedule',
|
'schedule',
|
||||||
'tcpreplay',
|
'tcpreplay',
|
||||||
'docker_clean',
|
'docker_clean',
|
||||||
'stig'
|
'stig',
|
||||||
|
'kafka'
|
||||||
],
|
],
|
||||||
'so-sensor': [
|
'so-sensor': [
|
||||||
'ssl',
|
'ssl',
|
||||||
@@ -187,7 +190,9 @@
|
|||||||
'telegraf',
|
'telegraf',
|
||||||
'firewall',
|
'firewall',
|
||||||
'schedule',
|
'schedule',
|
||||||
'docker_clean'
|
'docker_clean',
|
||||||
|
'kafka',
|
||||||
|
'elasticsearch.ca'
|
||||||
],
|
],
|
||||||
'so-desktop': [
|
'so-desktop': [
|
||||||
'ssl',
|
'ssl',
|
||||||
|
|||||||
@@ -70,3 +70,17 @@ x509_signing_policies:
|
|||||||
- authorityKeyIdentifier: keyid,issuer:always
|
- authorityKeyIdentifier: keyid,issuer:always
|
||||||
- days_valid: 820
|
- days_valid: 820
|
||||||
- copypath: /etc/pki/issued_certs/
|
- copypath: /etc/pki/issued_certs/
|
||||||
|
kafka:
|
||||||
|
- minions: '*'
|
||||||
|
- signing_private_key: /etc/pki/ca.key
|
||||||
|
- signing_cert: /etc/pki/ca.crt
|
||||||
|
- C: US
|
||||||
|
- ST: Utah
|
||||||
|
- L: Salt Lake City
|
||||||
|
- basicConstraints: "critical CA:false"
|
||||||
|
- keyUsage: "digitalSignature, keyEncipherment"
|
||||||
|
- subjectKeyIdentifier: hash
|
||||||
|
- authorityKeyIdentifier: keyid,issuer:always
|
||||||
|
- extendedKeyUsage: "serverAuth, clientAuth"
|
||||||
|
- days_valid: 820
|
||||||
|
- copypath: /etc/pki/issued_certs/
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ container_list() {
|
|||||||
"so-idh"
|
"so-idh"
|
||||||
"so-idstools"
|
"so-idstools"
|
||||||
"so-influxdb"
|
"so-influxdb"
|
||||||
|
"so-kafka"
|
||||||
"so-kibana"
|
"so-kibana"
|
||||||
"so-kratos"
|
"so-kratos"
|
||||||
"so-logstash"
|
"so-logstash"
|
||||||
|
|||||||
@@ -185,3 +185,11 @@ docker:
|
|||||||
custom_bind_mounts: []
|
custom_bind_mounts: []
|
||||||
extra_hosts: []
|
extra_hosts: []
|
||||||
extra_env: []
|
extra_env: []
|
||||||
|
'so-kafka':
|
||||||
|
final_octet: 88
|
||||||
|
port_bindings:
|
||||||
|
- 0.0.0.0:9092:9092
|
||||||
|
- 0.0.0.0:9093:9093
|
||||||
|
custom_bind_mounts: []
|
||||||
|
extra_hosts: []
|
||||||
|
extra_env: []
|
||||||
|
|||||||
@@ -65,3 +65,4 @@ docker:
|
|||||||
so-steno: *dockerOptions
|
so-steno: *dockerOptions
|
||||||
so-suricata: *dockerOptions
|
so-suricata: *dockerOptions
|
||||||
so-zeek: *dockerOptions
|
so-zeek: *dockerOptions
|
||||||
|
so-kafka: *dockerOptions
|
||||||
@@ -4,7 +4,7 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA
|
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA
|
||||||
|
|||||||
@@ -27,6 +27,7 @@
|
|||||||
'so-elastic-fleet',
|
'so-elastic-fleet',
|
||||||
'so-elastic-fleet-package-registry',
|
'so-elastic-fleet-package-registry',
|
||||||
'so-influxdb',
|
'so-influxdb',
|
||||||
|
'so-kafka',
|
||||||
'so-kibana',
|
'so-kibana',
|
||||||
'so-kratos',
|
'so-kratos',
|
||||||
'so-logstash',
|
'so-logstash',
|
||||||
@@ -80,6 +81,7 @@
|
|||||||
{% set NODE_CONTAINERS = [
|
{% set NODE_CONTAINERS = [
|
||||||
'so-logstash',
|
'so-logstash',
|
||||||
'so-redis',
|
'so-redis',
|
||||||
|
'so-kafka'
|
||||||
] %}
|
] %}
|
||||||
|
|
||||||
{% elif GLOBALS.role == 'so-idh' %}
|
{% elif GLOBALS.role == 'so-idh' %}
|
||||||
|
|||||||
@@ -90,6 +90,11 @@ firewall:
|
|||||||
tcp:
|
tcp:
|
||||||
- 8086
|
- 8086
|
||||||
udp: []
|
udp: []
|
||||||
|
kafka:
|
||||||
|
tcp:
|
||||||
|
- 9092
|
||||||
|
- 9093
|
||||||
|
udp: []
|
||||||
kibana:
|
kibana:
|
||||||
tcp:
|
tcp:
|
||||||
- 5601
|
- 5601
|
||||||
@@ -364,6 +369,7 @@ firewall:
|
|||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
- localrules
|
- localrules
|
||||||
- sensoroni
|
- sensoroni
|
||||||
|
- kafka
|
||||||
fleet:
|
fleet:
|
||||||
portgroups:
|
portgroups:
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
@@ -399,6 +405,7 @@ firewall:
|
|||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
- sensoroni
|
- sensoroni
|
||||||
|
- kafka
|
||||||
searchnode:
|
searchnode:
|
||||||
portgroups:
|
portgroups:
|
||||||
- redis
|
- redis
|
||||||
@@ -412,6 +419,7 @@ firewall:
|
|||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
- sensoroni
|
- sensoroni
|
||||||
|
- kafka
|
||||||
heavynode:
|
heavynode:
|
||||||
portgroups:
|
portgroups:
|
||||||
- redis
|
- redis
|
||||||
@@ -1275,14 +1283,17 @@ firewall:
|
|||||||
- beats_5044
|
- beats_5044
|
||||||
- beats_5644
|
- beats_5644
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
|
- kafka
|
||||||
searchnode:
|
searchnode:
|
||||||
portgroups:
|
portgroups:
|
||||||
- redis
|
- redis
|
||||||
- beats_5644
|
- beats_5644
|
||||||
|
- kafka
|
||||||
managersearch:
|
managersearch:
|
||||||
portgroups:
|
portgroups:
|
||||||
- redis
|
- redis
|
||||||
- beats_5644
|
- beats_5644
|
||||||
|
- kafka
|
||||||
self:
|
self:
|
||||||
portgroups:
|
portgroups:
|
||||||
- redis
|
- redis
|
||||||
|
|||||||
@@ -115,6 +115,9 @@ firewall:
|
|||||||
influxdb:
|
influxdb:
|
||||||
tcp: *tcpsettings
|
tcp: *tcpsettings
|
||||||
udp: *udpsettings
|
udp: *udpsettings
|
||||||
|
kafka:
|
||||||
|
tcp: *tcpsettings
|
||||||
|
udp: *udpsettings
|
||||||
kibana:
|
kibana:
|
||||||
tcp: *tcpsettings
|
tcp: *tcpsettings
|
||||||
udp: *udpsettings
|
udp: *udpsettings
|
||||||
@@ -932,7 +935,6 @@ firewall:
|
|||||||
portgroups: *portgroupshost
|
portgroups: *portgroupshost
|
||||||
customhostgroup9:
|
customhostgroup9:
|
||||||
portgroups: *portgroupshost
|
portgroups: *portgroupshost
|
||||||
|
|
||||||
idh:
|
idh:
|
||||||
chain:
|
chain:
|
||||||
DOCKER-USER:
|
DOCKER-USER:
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
global:
|
global:
|
||||||
pcapengine: STENO
|
pcapengine: STENO
|
||||||
|
pipeline: REDIS
|
||||||
@@ -36,9 +36,10 @@ global:
|
|||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
pipeline:
|
pipeline:
|
||||||
description: Sets which pipeline technology for events to use. Currently only Redis is supported.
|
description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license.
|
||||||
|
regex: ^(REDIS|KAFKA)$
|
||||||
|
regexFailureMessage: You must enter either REDIS or KAFKA.
|
||||||
global: True
|
global: True
|
||||||
readonly: True
|
|
||||||
advanced: True
|
advanced: True
|
||||||
repo_host:
|
repo_host:
|
||||||
description: Specify the host where operating system packages will be served from.
|
description: Specify the host where operating system packages will be served from.
|
||||||
|
|||||||
106
salt/kafka/config.sls
Normal file
106
salt/kafka/config.sls
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
|
{% set kafka_ips_logstash = [] %}
|
||||||
|
{% set kafka_ips_kraft = [] %}
|
||||||
|
{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %}
|
||||||
|
{% set kafka_ip = GLOBALS.node_ip %}
|
||||||
|
|
||||||
|
{# Create list for kafka <-> logstash/searchnode communcations #}
|
||||||
|
{% for node, node_data in kafkanodes.items() %}
|
||||||
|
{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %}
|
||||||
|
{% endfor %}
|
||||||
|
{% set kafka_server_list = "','".join(kafka_ips_logstash) %}
|
||||||
|
|
||||||
|
{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #}
|
||||||
|
{% for node, node_data in kafkanodes.items() %}
|
||||||
|
{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %}
|
||||||
|
{% endfor %}
|
||||||
|
{% set kraft_server_list = "','".join(kafka_ips_kraft) %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- ssl
|
||||||
|
|
||||||
|
kafka_group:
|
||||||
|
group.present:
|
||||||
|
- name: kafka
|
||||||
|
- gid: 960
|
||||||
|
|
||||||
|
kafka:
|
||||||
|
user.present:
|
||||||
|
- uid: 960
|
||||||
|
- gid: 960
|
||||||
|
|
||||||
|
{# Future tools to query kafka directly / show consumer groups
|
||||||
|
kafka_sbin_tools:
|
||||||
|
file.recurse:
|
||||||
|
- name: /usr/sbin
|
||||||
|
- source: salt://kafka/tools/sbin
|
||||||
|
- user: 960
|
||||||
|
- group: 960
|
||||||
|
- file_mode: 755 #}
|
||||||
|
|
||||||
|
kafka_sbin_jinja_tools:
|
||||||
|
file.recurse:
|
||||||
|
- name: /usr/sbin
|
||||||
|
- source: salt://kafka/tools/sbin_jinja
|
||||||
|
- user: 960
|
||||||
|
- group: 960
|
||||||
|
- file_mode: 755
|
||||||
|
- template: jinja
|
||||||
|
- defaults:
|
||||||
|
GLOBALS: {{ GLOBALS }}
|
||||||
|
|
||||||
|
kakfa_log_dir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/log/kafka
|
||||||
|
- user: 960
|
||||||
|
- group: 960
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
kafka_data_dir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/kafka/data
|
||||||
|
- user: 960
|
||||||
|
- group: 960
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
kafka_generate_keystore:
|
||||||
|
cmd.run:
|
||||||
|
- name: "/usr/sbin/so-kafka-generate-keystore"
|
||||||
|
- onchanges:
|
||||||
|
- x509: /etc/pki/kafka.key
|
||||||
|
|
||||||
|
kafka_keystore_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka.jks
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
{% for sc in ['server', 'client'] %}
|
||||||
|
kafka_kraft_{{sc}}_properties:
|
||||||
|
file.managed:
|
||||||
|
- source: salt://kafka/etc/{{sc}}.properties.jinja
|
||||||
|
- name: /opt/so/conf/kafka/{{sc}}.properties
|
||||||
|
- template: jinja
|
||||||
|
- user: 960
|
||||||
|
- group: 960
|
||||||
|
- makedirs: True
|
||||||
|
- show_changes: False
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
39
salt/kafka/defaults.yaml
Normal file
39
salt/kafka/defaults.yaml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
kafka:
|
||||||
|
enabled: False
|
||||||
|
config:
|
||||||
|
server:
|
||||||
|
advertised_x_listeners:
|
||||||
|
auto_x_create_x_topics_x_enable: true
|
||||||
|
controller_x_listener_x_names: CONTROLLER
|
||||||
|
controller_x_quorum_x_voters:
|
||||||
|
inter_x_broker_x_listener_x_name: BROKER
|
||||||
|
listeners: BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093
|
||||||
|
listener_x_security_x_protocol_x_map: CONTROLLER:SSL,BROKER:SSL
|
||||||
|
log_x_dirs: /nsm/kafka/data
|
||||||
|
log_x_retention_x_check_x_interval_x_ms: 300000
|
||||||
|
log_x_retention_x_hours: 168
|
||||||
|
log_x_segment_x_bytes: 1073741824
|
||||||
|
node_x_id:
|
||||||
|
num_x_io_x_threads: 8
|
||||||
|
num_x_network_x_threads: 3
|
||||||
|
num_x_partitions: 1
|
||||||
|
num_x_recovery_x_threads_x_per_x_data_x_dir: 1
|
||||||
|
offsets_x_topic_x_replication_x_factor: 1
|
||||||
|
process_x_roles: broker
|
||||||
|
socket_x_receive_x_buffer_x_bytes: 102400
|
||||||
|
socket_x_request_x_max_x_bytes: 104857600
|
||||||
|
socket_x_send_x_buffer_x_bytes: 102400
|
||||||
|
ssl_x_keystore_x_location: /etc/pki/kafka.jks
|
||||||
|
ssl_x_keystore_x_password: changeit
|
||||||
|
ssl_x_keystore_x_type: JKS
|
||||||
|
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
|
||||||
|
ssl_x_truststore_x_password: changeit
|
||||||
|
transaction_x_state_x_log_x_min_x_isr: 1
|
||||||
|
transaction_x_state_x_log_x_replication_x_factor: 1
|
||||||
|
client:
|
||||||
|
security_x_protocol: SSL
|
||||||
|
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
|
||||||
|
ssl_x_truststore_x_password: changeit
|
||||||
|
ssl_x_keystore_x_location: /etc/pki/kafka.jks
|
||||||
|
ssl_x_keystore_x_type: JKS
|
||||||
|
ssl_x_keystore_x_password: changeit
|
||||||
16
salt/kafka/disabled.sls
Normal file
16
salt/kafka/disabled.sls
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
include:
|
||||||
|
- kafka.sostatus
|
||||||
|
|
||||||
|
so-kafka:
|
||||||
|
docker_container.absent:
|
||||||
|
- force: True
|
||||||
|
|
||||||
|
so-kafka_so-status.disabled:
|
||||||
|
file.comment:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- regex: ^so-kafka$
|
||||||
64
salt/kafka/enabled.sls
Normal file
64
salt/kafka/enabled.sls
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||||
|
{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- elasticsearch.ca
|
||||||
|
- kafka.sostatus
|
||||||
|
- kafka.config
|
||||||
|
- kafka.storage
|
||||||
|
|
||||||
|
so-kafka:
|
||||||
|
docker_container.running:
|
||||||
|
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }}
|
||||||
|
- hostname: so-kafka
|
||||||
|
- name: so-kafka
|
||||||
|
- networks:
|
||||||
|
- sobridge:
|
||||||
|
- ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
|
||||||
|
- user: kafka
|
||||||
|
- environment:
|
||||||
|
- KAFKA_HEAP_OPTS=-Xmx2G -Xms1G
|
||||||
|
- extra_hosts:
|
||||||
|
{% for node in KAFKANODES %}
|
||||||
|
- {{ node }}:{{ KAFKANODES[node].ip }}
|
||||||
|
{% endfor %}
|
||||||
|
{% if DOCKER.containers['so-kafka'].extra_hosts %}
|
||||||
|
{% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %}
|
||||||
|
- {{ XTRAHOST }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
- port_bindings:
|
||||||
|
{% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
|
||||||
|
- {{ BINDING }}
|
||||||
|
{% endfor %}
|
||||||
|
- binds:
|
||||||
|
- /etc/pki/kafka.jks:/etc/pki/kafka.jks
|
||||||
|
- /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts
|
||||||
|
- /nsm/kafka/data/:/nsm/kafka/data/:rw
|
||||||
|
- /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties
|
||||||
|
- /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties
|
||||||
|
- watch:
|
||||||
|
{% for sc in ['server', 'client'] %}
|
||||||
|
- file: kafka_kraft_{{sc}}_properties
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
delete_so-kafka_so-status.disabled:
|
||||||
|
file.uncomment:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- regex: ^so-kafka$
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
7
salt/kafka/etc/client.properties.jinja
Normal file
7
salt/kafka/etc/client.properties.jinja
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{% from 'kafka/map.jinja' import KAFKAMERGED -%}
|
||||||
|
{{ KAFKAMERGED.config.client | yaml(False) | replace("_x_", ".") }}
|
||||||
7
salt/kafka/etc/server.properties.jinja
Normal file
7
salt/kafka/etc/server.properties.jinja
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{% from 'kafka/map.jinja' import KAFKAMERGED -%}
|
||||||
|
{{ KAFKAMERGED.config.server | yaml(False) | replace("_x_", ".") }}
|
||||||
14
salt/kafka/init.sls
Normal file
14
salt/kafka/init.sls
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'kafka/map.jinja' import KAFKAMERGED %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
{% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %}
|
||||||
|
- kafka.enabled
|
||||||
|
{% else %}
|
||||||
|
- kafka.disabled
|
||||||
|
{% endif %}
|
||||||
20
salt/kafka/map.jinja
Normal file
20
salt/kafka/map.jinja
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %}
|
||||||
|
{% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
|
{% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %}
|
||||||
|
{% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %}
|
||||||
|
|
||||||
|
{% set nodes = salt['pillar.get']('kafka:nodes', {}) %}
|
||||||
|
{% set combined = [] %}
|
||||||
|
{% for hostname, data in nodes.items() %}
|
||||||
|
{% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %}
|
||||||
|
{% endfor %}
|
||||||
|
{% set kraft_controller_quorum_voters = ','.join(combined) %}
|
||||||
|
|
||||||
|
{% do KAFKAMERGED.config.server.update({'controller_x_quorum_x_voters': kraft_controller_quorum_voters}) %}
|
||||||
170
salt/kafka/soc_kafka.yaml
Normal file
170
salt/kafka/soc_kafka.yaml
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
kafka:
|
||||||
|
enabled:
|
||||||
|
description: Enable or disable Kafka.
|
||||||
|
helpLink: kafka.html
|
||||||
|
cluster_id:
|
||||||
|
description: The ID of the Kafka cluster.
|
||||||
|
readonly: True
|
||||||
|
advanced: True
|
||||||
|
sensitive: True
|
||||||
|
helpLink: kafka.html
|
||||||
|
config:
|
||||||
|
server:
|
||||||
|
advertised_x_listeners:
|
||||||
|
description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication.
|
||||||
|
title: advertised.listeners
|
||||||
|
helpLink: kafka.html
|
||||||
|
auto_x_create_x_topics_x_enable:
|
||||||
|
description: Enable the auto creation of topics.
|
||||||
|
title: auto.create.topics.enable
|
||||||
|
forcedType: bool
|
||||||
|
helpLink: kafka.html
|
||||||
|
controller_x_listener_x_names:
|
||||||
|
description: Set listeners used by the controller in a comma-seperated list.
|
||||||
|
title: controller.listener.names
|
||||||
|
helpLink: kafka.html
|
||||||
|
controller_x_quorum_x_voters:
|
||||||
|
description: A comma-seperated list of ID and endpoint information mapped for a set of voters.
|
||||||
|
title: controller.quorum.voters
|
||||||
|
helpLink: kafka.html
|
||||||
|
inter_x_broker_x_listener_x_name:
|
||||||
|
description: The name of the listener used for inter-broker communication.
|
||||||
|
title: inter.broker.listener.name
|
||||||
|
helpLink: kafka.html
|
||||||
|
listeners:
|
||||||
|
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
|
||||||
|
helpLink: kafka.html
|
||||||
|
listener_x_security_x_protocol_x_map:
|
||||||
|
description: Comma-seperated mapping of listener name and security protocols.
|
||||||
|
title: listener.security.protocol.map
|
||||||
|
helpLink: kafka.html
|
||||||
|
log_x_dirs:
|
||||||
|
description: Where Kafka logs are stored within the Docker container.
|
||||||
|
title: log.dirs
|
||||||
|
helpLink: kafka.html
|
||||||
|
log_x_retention_x_check_x_interval_x_ms:
|
||||||
|
description: Frequency at which log files are checked if they are qualified for deletion.
|
||||||
|
title: log.retention.check.interval.ms
|
||||||
|
helpLink: kafka.html
|
||||||
|
log_x_retention_x_hours:
|
||||||
|
description: How long, in hours, a log file is kept.
|
||||||
|
title: log.retention.hours
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
log_x_segment_x_bytes:
|
||||||
|
description: The maximum allowable size for a log file.
|
||||||
|
title: log.segment.bytes
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
node_x_id:
|
||||||
|
description: The node ID corresponds to the roles performed by this process whenever process.roles is populated.
|
||||||
|
title: node.id
|
||||||
|
forcedType: int
|
||||||
|
readonly: True
|
||||||
|
helpLink: kafka.html
|
||||||
|
num_x_io_x_threads:
|
||||||
|
description: The number of threads used by Kafka.
|
||||||
|
title: num.io.threads
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
num_x_network_x_threads:
|
||||||
|
description: The number of threads used for network communication.
|
||||||
|
title: num.network.threads
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
num_x_partitions:
|
||||||
|
description: The number of log partitions assigned per topic.
|
||||||
|
title: num.partitions
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
num_x_recovery_x_threads_x_per_x_data_x_dir:
|
||||||
|
description: The number of threads used for log recuperation at startup and purging at shutdown. This ammount of threads is used per data directory.
|
||||||
|
title: num.recovery.threads.per.data.dir
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
offsets_x_topic_x_replication_x_factor:
|
||||||
|
description: The offsets topic replication factor.
|
||||||
|
title: offsets.topic.replication.factor
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
process_x_roles:
|
||||||
|
description: The roles the process performs. Use a comma-seperated list is multiple.
|
||||||
|
title: process.roles
|
||||||
|
helpLink: kafka.html
|
||||||
|
socket_x_receive_x_buffer_x_bytes:
|
||||||
|
description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default.
|
||||||
|
title: socket.receive.buffer.bytes
|
||||||
|
#forcedType: int - soc needs to allow -1 as an int before we can use this
|
||||||
|
helpLink: kafka.html
|
||||||
|
socket_x_request_x_max_x_bytes:
|
||||||
|
description: The maximum bytes allowed for a request to the socket.
|
||||||
|
title: socket.request.max.bytes
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
socket_x_send_x_buffer_x_bytes:
|
||||||
|
description: Size, in bytes of the SO_SNDBUF buffer. A value of -1 will use the OS default.
|
||||||
|
title: socket.send.buffer.byte
|
||||||
|
#forcedType: int - soc needs to allow -1 as an int before we can use this
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_keystore_x_location:
|
||||||
|
description: The key store file location within the Docker container.
|
||||||
|
title: ssl.keystore.location
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_keystore_x_password:
|
||||||
|
description: The key store file password. Invalid for PEM format.
|
||||||
|
title: ssl.keystore.password
|
||||||
|
sensitive: True
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_keystore_x_type:
|
||||||
|
description: The key store file format.
|
||||||
|
title: ssl.keystore.type
|
||||||
|
regex: ^(JKS|PKCS12|PEM)$
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_truststore_x_location:
|
||||||
|
description: The trust store file location within the Docker container.
|
||||||
|
title: ssl.truststore.location
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_truststore_x_password:
|
||||||
|
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||||
|
title: ssl.truststore.password
|
||||||
|
sensitive: True
|
||||||
|
helpLink: kafka.html
|
||||||
|
transaction_x_state_x_log_x_min_x_isr:
|
||||||
|
description: Overrides min.insync.replicas for the transaction topic. When a producer configures acks to "all" (or "-1"), this setting determines the minimum number of replicas required to acknowledge a write as successful. Failure to meet this minimum triggers an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used in conjunction, min.insync.replicas and acks enable stronger durability guarantees. For instance, creating a topic with a replication factor of 3, setting min.insync.replicas to 2, and using acks of "all" ensures that the producer raises an exception if a majority of replicas fail to receive a write.
|
||||||
|
title: transaction.state.log.min.isr
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
transaction_x_state_x_log_x_replication_x_factor:
|
||||||
|
description: Set the replication factor higher for the transaction topic to ensure availability. Internal topic creation will not proceed until the cluster size satisfies this replication factor prerequisite.
|
||||||
|
title: transaction.state.log.replication.factor
|
||||||
|
forcedType: int
|
||||||
|
helpLink: kafka.html
|
||||||
|
client:
|
||||||
|
security_x_protocol:
|
||||||
|
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
|
||||||
|
title: security.protocol
|
||||||
|
regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_keystore_x_location:
|
||||||
|
description: The key store file location within the Docker container.
|
||||||
|
title: ssl.keystore.location
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_keystore_x_password:
|
||||||
|
description: The key store file password. Invalid for PEM format.
|
||||||
|
title: ssl.keystore.password
|
||||||
|
sensitive: True
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_keystore_x_type:
|
||||||
|
description: The key store file format.
|
||||||
|
title: ssl.keystore.type
|
||||||
|
regex: ^(JKS|PKCS12|PEM)$
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_truststore_x_location:
|
||||||
|
description: The trust store file location within the Docker container.
|
||||||
|
title: ssl.truststore.location
|
||||||
|
helpLink: kafka.html
|
||||||
|
ssl_x_truststore_x_password:
|
||||||
|
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||||
|
title: ssl.truststore.password
|
||||||
|
sensitive: True
|
||||||
|
helpLink: kafka.html
|
||||||
21
salt/kafka/sostatus.sls
Normal file
21
salt/kafka/sostatus.sls
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
|
||||||
|
append_so-kafka_so-status.conf:
|
||||||
|
file.append:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- text: so-kafka
|
||||||
|
- unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
38
salt/kafka/storage.sls
Normal file
38
salt/kafka/storage.sls
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %}
|
||||||
|
|
||||||
|
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||||
|
{% if kafka_cluster_id is none %}
|
||||||
|
generate_kafka_cluster_id:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-kafka-clusterid
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #}
|
||||||
|
{% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
|
||||||
|
kafka_storage_init:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties
|
||||||
|
kafka_rm_kafkainit:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
docker rm so-kafkainit
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
13
salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore
Normal file
13
salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
# Generate a new keystore
|
||||||
|
docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt
|
||||||
|
docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks
|
||||||
|
docker rm so-kafka-keystore
|
||||||
@@ -78,6 +78,7 @@ so-logstash:
|
|||||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
||||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||||
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
||||||
|
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if GLOBALS.role == 'so-eval' %}
|
{% if GLOBALS.role == 'so-eval' %}
|
||||||
- /nsm/zeek:/nsm/zeek:ro
|
- /nsm/zeek:/nsm/zeek:ro
|
||||||
|
|||||||
@@ -4,9 +4,10 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
|
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
|
||||||
|
{% from 'kafka/map.jinja' import KAFKAMERGED %}
|
||||||
|
|
||||||
include:
|
include:
|
||||||
{% if LOGSTASH_MERGED.enabled %}
|
{% if LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %}
|
||||||
- logstash.enabled
|
- logstash.enabled
|
||||||
{% else %}
|
{% else %}
|
||||||
- logstash.disabled
|
- logstash.disabled
|
||||||
|
|||||||
@@ -0,0 +1,35 @@
|
|||||||
|
{% set kafka_brokers = salt['pillar.get']('logstash:nodes:receiver', {}) %}
|
||||||
|
{% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %}
|
||||||
|
{% set broker_ips = [] %}
|
||||||
|
{% for node, node_data in kafka_brokers.items() %}
|
||||||
|
{% do broker_ips.append(node_data['ip'] + ":9092") %}
|
||||||
|
{% endfor %}
|
||||||
|
{% for node, node_data in kafka_on_mngr.items() %}
|
||||||
|
{% do broker_ips.append(node_data['ip'] + ":9092") %}
|
||||||
|
{% endfor %}
|
||||||
|
{% set bootstrap_servers = "','".join(broker_ips) %}
|
||||||
|
|
||||||
|
input {
|
||||||
|
kafka {
|
||||||
|
codec => json
|
||||||
|
topics => ['default-logs', 'kratos-logs', 'soc-logs', 'strelka-logs', 'suricata-logs', 'zeek-logs']
|
||||||
|
group_id => 'searchnodes'
|
||||||
|
client_id => '{{ GLOBALS.hostname }}'
|
||||||
|
security_protocol => 'SSL'
|
||||||
|
bootstrap_servers => '{{ bootstrap_servers }}'
|
||||||
|
ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12'
|
||||||
|
ssl_keystore_password => 'changeit'
|
||||||
|
ssl_keystore_type => 'PKCS12'
|
||||||
|
ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts'
|
||||||
|
ssl_truststore_password => 'changeit'
|
||||||
|
decorate_events => true
|
||||||
|
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
filter {
|
||||||
|
if ![metadata] {
|
||||||
|
mutate {
|
||||||
|
rename => { "@metadata" => "metadata" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
29
salt/manager/tools/sbin/so-kafka-clusterid
Normal file
29
salt/manager/tools/sbin/so-kafka-clusterid
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### THIS SCRIPT AND SALT STATE REFERENCES TO THIS SCRIPT TO BE REMOVED ONCE INITIAL TESTING IS DONE - THESE VALUES WILL GENERATED IN SETUP AND SOUP
|
||||||
|
|
||||||
|
|
||||||
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
|
|
||||||
|
if [[ -f /usr/sbin/so-common ]]; then
|
||||||
|
source /usr/sbin/so-common
|
||||||
|
else
|
||||||
|
source $(dirname $0)/../../../common/tools/sbin/so-common
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
|
||||||
|
kafka_cluster_id=$(get_random_value 22)
|
||||||
|
echo 'kafka: ' > $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||||
|
echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||||
|
|
||||||
|
if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
|
||||||
|
kafkapass=$(get_random_value)
|
||||||
|
echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||||
|
fi
|
||||||
@@ -438,7 +438,25 @@ post_to_2.4.60() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
post_to_2.4.70() {
|
post_to_2.4.70() {
|
||||||
echo "Nothing to apply"
|
# Global pipeline changes to REDIS or KAFKA
|
||||||
|
echo "Removing global.pipeline pillar configuration"
|
||||||
|
sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||||||
|
|
||||||
|
# Kafka configuration
|
||||||
|
mkdir -p /opt/so/saltstack/local/pillar/kafka
|
||||||
|
touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
|
||||||
|
touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
|
||||||
|
echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
|
||||||
|
|
||||||
|
if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
|
||||||
|
kafka_cluster_id=$(get_random_value 22)
|
||||||
|
echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||||
|
|
||||||
|
if ! grep -q "^ certpass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
|
||||||
|
kafkapass=$(get_random_value)
|
||||||
|
echo ' certpass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||||
|
fi
|
||||||
|
|
||||||
POSTVERSION=2.4.70
|
POSTVERSION=2.4.70
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,9 +4,10 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'redis/map.jinja' import REDISMERGED %}
|
{% from 'redis/map.jinja' import REDISMERGED %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
include:
|
include:
|
||||||
{% if REDISMERGED.enabled %}
|
{% if GLOBALS.pipeline == "REDIS" and REDISMERGED.enabled %}
|
||||||
- redis.enabled
|
- redis.enabled
|
||||||
{% else %}
|
{% else %}
|
||||||
- redis.disabled
|
- redis.disabled
|
||||||
|
|||||||
125
salt/salt/engines/master/pillarWatch.py
Normal file
125
salt/salt/engines/master/pillarWatch.py
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# will need this in future versions of this engine
|
||||||
|
#import salt.client
|
||||||
|
#local = salt.client.LocalClient()
|
||||||
|
|
||||||
|
def start(fpa, interval=10):
|
||||||
|
log.info("pillarWatch engine: ##### checking watched pillars for changes #####")
|
||||||
|
|
||||||
|
# try to open the file that stores the previous runs data
|
||||||
|
# if the file doesn't exist, create a blank one
|
||||||
|
try:
|
||||||
|
# maybe change this location
|
||||||
|
dataFile = open("/opt/so/state/pillarWatch.txt", "r+")
|
||||||
|
except FileNotFoundError:
|
||||||
|
log.warn("pillarWatch engine: No previous pillarWatch data saved")
|
||||||
|
dataFile = open("/opt/so/state/pillarWatch.txt", "w+")
|
||||||
|
|
||||||
|
df = dataFile.read()
|
||||||
|
for i in fpa:
|
||||||
|
log.trace("pillarWatch engine: files: %s" % i['files'])
|
||||||
|
log.trace("pillarWatch engine: pillar: %s" % i['pillar'])
|
||||||
|
log.trace("pillarWatch engine: actions: %s" % i['actions'])
|
||||||
|
pillarFiles = i['files']
|
||||||
|
pillar = i['pillar']
|
||||||
|
actions = i['actions']
|
||||||
|
# these are the keys that we are going to look for as we traverse the pillarFiles
|
||||||
|
patterns = pillar.split(".")
|
||||||
|
# check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file
|
||||||
|
for pillarFile in reversed(pillarFiles):
|
||||||
|
currentPillarValue = ''
|
||||||
|
previousPillarValue = ''
|
||||||
|
# this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later
|
||||||
|
patternFound = 0
|
||||||
|
with open(pillarFile, "r") as file:
|
||||||
|
log.debug("pillarWatch engine: checking file: %s" % pillarFile)
|
||||||
|
for line in file:
|
||||||
|
log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file))
|
||||||
|
log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound])
|
||||||
|
# since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar
|
||||||
|
# ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look
|
||||||
|
# for pipeline. once pipeline is found, it will record the value
|
||||||
|
if re.search('^' + patterns[patternFound] + ':', line.strip()):
|
||||||
|
# strip the newline because it makes the logs u-g-l-y
|
||||||
|
log.debug("pillarWatch engine: found: %s" % line.strip('\n'))
|
||||||
|
patternFound += 1
|
||||||
|
# we have found the final key in the pillar that we are looking for, get the previous value then the current value
|
||||||
|
if patternFound == len(patterns):
|
||||||
|
# at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars
|
||||||
|
previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE'
|
||||||
|
# check the contents of the dataFile that stores the previousPillarValue(s).
|
||||||
|
# find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value
|
||||||
|
for l in df.splitlines():
|
||||||
|
if pillar in l:
|
||||||
|
previousPillarValue = str(l.split(":")[1].strip())
|
||||||
|
currentPillarValue = str(line.split(":")[1]).strip()
|
||||||
|
log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue))
|
||||||
|
log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue))
|
||||||
|
# if the pillar we are checking for changes has been defined in the dataFile,
|
||||||
|
# replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it.
|
||||||
|
if pillar in df:
|
||||||
|
df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df)
|
||||||
|
else:
|
||||||
|
df += pillar + ': ' + currentPillarValue + '\n'
|
||||||
|
log.trace("pillarWatch engine: df: %s" % df)
|
||||||
|
# we have found the pillar so we dont need to loop through the file anymore
|
||||||
|
break
|
||||||
|
# if key and value was found in the first file, then we don't want to look in
|
||||||
|
# any more files since we use the first file as the source of truth.
|
||||||
|
if patternFound == len(patterns):
|
||||||
|
break
|
||||||
|
# if the pillar value changed, then we find what actions we should take
|
||||||
|
log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue")
|
||||||
|
if currentPillarValue != previousPillarValue:
|
||||||
|
log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue))
|
||||||
|
# check if the previous pillar value is defined in the pillar from -> to actions
|
||||||
|
if previousPillarValue in actions['from']:
|
||||||
|
# check if the new / current pillar value is defined under to
|
||||||
|
if currentPillarValue in actions['from'][previousPillarValue]['to']:
|
||||||
|
ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue]
|
||||||
|
# if the new / current pillar value isn't defined under to, is there a wildcard defined
|
||||||
|
elif '*' in actions['from'][previousPillarValue]['to']:
|
||||||
|
ACTIONS=actions['from'][previousPillarValue]['to']['*']
|
||||||
|
# no action was defined for us to take when we see the pillar change
|
||||||
|
else:
|
||||||
|
ACTIONS=['NO DEFINED ACTION FOR US TO TAKE']
|
||||||
|
# if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from
|
||||||
|
elif '*' in actions['from']:
|
||||||
|
# is the new pillar value defined for the wildcard match
|
||||||
|
if currentPillarValue in actions['from']['*']['to']:
|
||||||
|
ACTIONS=actions['from']['*']['to'][currentPillarValue]
|
||||||
|
# if the new pillar doesn't have an action, was a wildcard defined
|
||||||
|
elif '*' in actions['from']['*']['to']:
|
||||||
|
# need more logic here for to and from
|
||||||
|
ACTIONS=actions['from']['*']['to']['*']
|
||||||
|
else:
|
||||||
|
ACTIONS=['NO DEFINED ACTION FOR US TO TAKE']
|
||||||
|
# a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard
|
||||||
|
else:
|
||||||
|
ACTIONS=['NO DEFINED ACTION FOR US TO TAKE']
|
||||||
|
log.debug("pillarWatch engine: all defined actions: %s" % actions['from'])
|
||||||
|
log.debug("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue))
|
||||||
|
for action in ACTIONS:
|
||||||
|
log.info("pillarWatch engine: action: %s" % action)
|
||||||
|
if action != 'NO DEFINED ACTION FOR US TO TAKE':
|
||||||
|
for saltModule, args in action.items():
|
||||||
|
log.debug("pillarWatch engine: saltModule: %s" % saltModule)
|
||||||
|
log.debug("pillarWatch engine: args: %s" % args)
|
||||||
|
#__salt__[saltModule](**args)
|
||||||
|
actionReturn = __salt__[saltModule](**args)
|
||||||
|
log.info("pillarWatch engine: actionReturn: %s" % actionReturn)
|
||||||
|
|
||||||
|
dataFile.seek(0)
|
||||||
|
dataFile.write(df)
|
||||||
|
dataFile.truncate()
|
||||||
|
dataFile.close()
|
||||||
120
salt/salt/engines/master/valWatch.py
Normal file
120
salt/salt/engines/master/valWatch.py
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# will need this in future versions of this engine
|
||||||
|
import salt.client
|
||||||
|
local = salt.client.LocalClient()
|
||||||
|
|
||||||
|
def start(fpa, interval=10):
|
||||||
|
|
||||||
|
def getValue(content, key):
|
||||||
|
pieces = key.split(".", 1)
|
||||||
|
if len(pieces) > 1:
|
||||||
|
getValue(content[pieces[0]], pieces[1])
|
||||||
|
else:
|
||||||
|
#log.info("ck: %s" % content[key])
|
||||||
|
return content[key]
|
||||||
|
#content.pop(key, None)
|
||||||
|
|
||||||
|
log.info("valWatch engine: ##### checking watched values for changes #####")
|
||||||
|
|
||||||
|
# try to open the file that stores the previous runs data
|
||||||
|
# if the file doesn't exist, create a blank one
|
||||||
|
try:
|
||||||
|
# maybe change this location
|
||||||
|
dataFile = open("/opt/so/state/valWatch.txt", "r+")
|
||||||
|
except FileNotFoundError:
|
||||||
|
log.warn("valWatch engine: No previous valWatch data saved")
|
||||||
|
dataFile = open("/opt/so/state/valWatch.txt", "w+")
|
||||||
|
|
||||||
|
df = dataFile.read()
|
||||||
|
for i in fpa:
|
||||||
|
log.info("valWatch engine: i: %s" % i)
|
||||||
|
log.trace("valWatch engine: map: %s" % i['map'])
|
||||||
|
log.trace("valWatch engine: value: %s" % i['value'])
|
||||||
|
log.trace("valWatch engine: targets: %s" % i['targets'])
|
||||||
|
log.trace("valWatch engine: actions: %s" % i['actions'])
|
||||||
|
mapFile = i['map']
|
||||||
|
value = i['value']
|
||||||
|
targets = i['targets']
|
||||||
|
# target type
|
||||||
|
ttype = i['ttype']
|
||||||
|
actions = i['actions']
|
||||||
|
# these are the keys that we are going to look for as we traverse the pillarFiles
|
||||||
|
patterns = value.split(".")
|
||||||
|
mainDict = patterns.pop(0)
|
||||||
|
# patterns = value.split(".")
|
||||||
|
for target in targets:
|
||||||
|
# tell targets to render mapfile and return value split
|
||||||
|
mapRender = local.cmd(target, fun='jinja.load_map', arg=[mapFile, mainDict], tgt_type=ttype)
|
||||||
|
|
||||||
|
currentValue = ''
|
||||||
|
previousValue = ''
|
||||||
|
# this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later
|
||||||
|
patternFound = 0
|
||||||
|
#with open(pillarFile, "r") as file:
|
||||||
|
# log.debug("pillarWatch engine: checking file: %s" % pillarFile)
|
||||||
|
mapRenderKeys = list(mapRender.keys())
|
||||||
|
if len(mapRenderKeys) > 0:
|
||||||
|
log.info(mapRenderKeys)
|
||||||
|
log.info("valWatch engine: mapRender: %s" % mapRender)
|
||||||
|
minion = mapRenderKeys[0]
|
||||||
|
currentValue = getValue(mapRender[minion],value.split('.', 1)[1])
|
||||||
|
log.info("valWatch engine: currentValue: %s: %s: %s" % (minion, value, currentValue))
|
||||||
|
for l in df.splitlines():
|
||||||
|
if value in l:
|
||||||
|
previousPillarValue = str(l.split(":")[1].strip())
|
||||||
|
log.info("valWatch engine: previousValue: %s: %s: %s" % (minion, value, previousValue))
|
||||||
|
|
||||||
|
'''
|
||||||
|
for key in mapRender[minion]:
|
||||||
|
log.info("pillarWatch engine: inspecting key: %s in mainDict: %s" % (key, mainDict))
|
||||||
|
log.info("pillarWatch engine: looking for: %s" % patterns[patternFound])
|
||||||
|
# since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar
|
||||||
|
# ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look
|
||||||
|
# for pipeline. once pipeline is found, it will record the value
|
||||||
|
#if re.search(patterns[patternFound], key):
|
||||||
|
if patterns[patternFound] == key:
|
||||||
|
# strip the newline because it makes the logs u-g-l-y
|
||||||
|
log.info("pillarWatch engine: found: %s" % key)
|
||||||
|
patternFound += 1
|
||||||
|
# we have found the final key in the pillar that we are looking for, get the previous value then the current value
|
||||||
|
if patternFound == len(patterns):
|
||||||
|
# at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars
|
||||||
|
previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE'
|
||||||
|
# check the contents of the dataFile that stores the previousPillarValue(s).
|
||||||
|
# find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value
|
||||||
|
for l in df.splitlines():
|
||||||
|
if value in l:
|
||||||
|
previousPillarValue = str(l.split(":")[1].strip())
|
||||||
|
currentPillarValue = mapRender[minion][key]
|
||||||
|
log.info("pillarWatch engine: %s currentPillarValue: %s" % (value, currentPillarValue))
|
||||||
|
log.info("pillarWatch engine: %s previousPillarValue: %s" % (value, previousPillarValue))
|
||||||
|
# if the pillar we are checking for changes has been defined in the dataFile,
|
||||||
|
# replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it.
|
||||||
|
if value in df:
|
||||||
|
df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df)
|
||||||
|
else:
|
||||||
|
df += value + ': ' + currentPillarValue + '\n'
|
||||||
|
log.info("pillarWatch engine: df: %s" % df)
|
||||||
|
# we have found the pillar so we dont need to loop through the file anymore
|
||||||
|
break
|
||||||
|
# if key and value was found in the first file, then we don't want to look in
|
||||||
|
# any more files since we use the first file as the source of truth.
|
||||||
|
if patternFound == len(patterns):
|
||||||
|
break
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
dataFile.seek(0)
|
||||||
|
dataFile.write(df)
|
||||||
|
dataFile.truncate()
|
||||||
|
dataFile.close()
|
||||||
141
salt/salt/engines/master/valueWatch.py
Normal file
141
salt/salt/engines/master/valueWatch.py
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
import json
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
import salt.client
|
||||||
|
local = salt.client.LocalClient()
|
||||||
|
|
||||||
|
def start(watched, interval=10):
|
||||||
|
# this 20 second sleep allows enough time for the minion to reconnect during testing of the script when the salt-master is restarted
|
||||||
|
sleep(20)
|
||||||
|
log.info("valueWatch engine: started")
|
||||||
|
# this dict will be used to store the files that we are watching and their modification times for the current iteration though a loop
|
||||||
|
fileModTimesCurrent = {}
|
||||||
|
# same as fileModTimesCurrent, but stores the previous values through the loop.
|
||||||
|
# the combination of these two variables is used to determine if a files has changed.
|
||||||
|
fileModTimesPrevious = {}
|
||||||
|
#
|
||||||
|
currentValues = {}
|
||||||
|
|
||||||
|
def getValue(content, key):
|
||||||
|
pieces = key.split(".", 1)
|
||||||
|
if len(pieces) > 1:
|
||||||
|
getValue(content[pieces[0]], pieces[1])
|
||||||
|
else:
|
||||||
|
#log.info("ck: %s" % content[key])
|
||||||
|
return content[key]
|
||||||
|
#content.pop(key, None)
|
||||||
|
|
||||||
|
def updateModTimesCurrent(files):
|
||||||
|
# this dict will be used to store the files that we are watching and their modification times for the current iteration though a loop
|
||||||
|
fileModTimesCurrent.clear()
|
||||||
|
for f in files:
|
||||||
|
#log.warn(f)
|
||||||
|
fileName = Path(f).name
|
||||||
|
filePath = Path(f).parent
|
||||||
|
if '*' in fileName:
|
||||||
|
#log.info(fileName)
|
||||||
|
#log.info(filePath)
|
||||||
|
slsFiles = glob.glob(f)
|
||||||
|
for slsFile in slsFiles:
|
||||||
|
#log.info(slsFile)
|
||||||
|
fileModTimesCurrent.update({slsFile: os.path.getmtime(slsFile)})
|
||||||
|
else:
|
||||||
|
fileModTimesCurrent.update({f: os.path.getmtime(f)})
|
||||||
|
|
||||||
|
def compareFileModTimes():
|
||||||
|
ret = []
|
||||||
|
for f in fileModTimesCurrent:
|
||||||
|
log.info(f)
|
||||||
|
if f in fileModTimesPrevious:
|
||||||
|
log.info("valueWatch engine: fileModTimesCurrent: %s" % fileModTimesCurrent[f])
|
||||||
|
log.info("valueWatch engine: fileModTimesPrevious: %s" % fileModTimesPrevious[f])
|
||||||
|
if fileModTimesCurrent[f] != fileModTimesPrevious[f]:
|
||||||
|
log.error("valueWatch engine: fileModTimesCurrent[f] != fileModTimesPrevious[f]")
|
||||||
|
log.error("valueWatch engine: " + str(fileModTimesCurrent[f]) + " != " + str(fileModTimesPrevious[f]))
|
||||||
|
ret.append(f)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
# this will set the current value of 'value' from engines.conf and save it to the currentValues dict
|
||||||
|
def updateCurrentValues():
|
||||||
|
for target in targets:
|
||||||
|
log.info("valueWatch engine: refreshing pillars on %s" % target)
|
||||||
|
refreshPillar = local.cmd(target, fun='saltutil.refresh_pillar', tgt_type=ttype)
|
||||||
|
log.info("valueWatch engine: pillar refresh results: %s" % refreshPillar)
|
||||||
|
# check if the result was True for the pillar refresh
|
||||||
|
# will need to add a recheck incase the minion was just temorarily unavailable
|
||||||
|
try:
|
||||||
|
if next(iter(refreshPillar.values())):
|
||||||
|
sleep(5)
|
||||||
|
# render the map file for the variable passed in from value.
|
||||||
|
mapRender = local.cmd(target, fun='jinja.load_map', arg=[mapFile, mainDict], tgt_type=ttype)
|
||||||
|
log.info("mR: %s" % mapRender)
|
||||||
|
currentValue = ''
|
||||||
|
previousValue = ''
|
||||||
|
mapRenderKeys = list(mapRender.keys())
|
||||||
|
if len(mapRenderKeys) > 0:
|
||||||
|
log.info(mapRenderKeys)
|
||||||
|
log.info("valueWatch engine: mapRender: %s" % mapRender)
|
||||||
|
minion = mapRenderKeys[0]
|
||||||
|
# if not isinstance(mapRender[minion], bool):
|
||||||
|
currentValue = getValue(mapRender[minion],value.split('.', 1)[1])
|
||||||
|
log.info("valueWatch engine: currentValue: %s: %s: %s" % (minion, value, currentValue))
|
||||||
|
currentValues.update({value: {minion: currentValue}})
|
||||||
|
# we have rendered the value so we don't need to have any more target render it
|
||||||
|
break
|
||||||
|
except StopIteration:
|
||||||
|
log.info("valueWatch engine: target %s did not respond or does not exist" % target)
|
||||||
|
|
||||||
|
log.info("valueWatch engine: currentValues: %s" % currentValues)
|
||||||
|
|
||||||
|
|
||||||
|
# run the main loop
|
||||||
|
while True:
|
||||||
|
log.info("valueWatch engine: checking watched files for changes")
|
||||||
|
for v in watched:
|
||||||
|
value = v['value']
|
||||||
|
files = v['files']
|
||||||
|
mapFile = v['map']
|
||||||
|
targets = v['targets']
|
||||||
|
ttype = v['ttype']
|
||||||
|
actions = v['actions']
|
||||||
|
|
||||||
|
patterns = value.split(".")
|
||||||
|
mainDict = patterns.pop(0)
|
||||||
|
|
||||||
|
log.info("valueWatch engine: value: %s" % value)
|
||||||
|
# the call to this function will update fileModtimesCurrent
|
||||||
|
updateModTimesCurrent(files)
|
||||||
|
#log.trace("valueWatch engine: fileModTimesCurrent: %s" % fileModTimesCurrent)
|
||||||
|
#log.trace("valueWatch engine: fileModTimesPrevious: %s" % fileModTimesPrevious)
|
||||||
|
|
||||||
|
# compare with the previous checks file modification times
|
||||||
|
modFilesDiff = compareFileModTimes()
|
||||||
|
# if there were changes in the pillar files, then we need to have the minion render the map file to determine if the value changed
|
||||||
|
if modFilesDiff:
|
||||||
|
log.info("valueWatch engine: change in files detetected, updating currentValues: %s" % modFilesDiff)
|
||||||
|
updateCurrentValues()
|
||||||
|
elif value not in currentValues:
|
||||||
|
log.info("valueWatch engine: %s not in currentValues, updating currentValues." % value)
|
||||||
|
updateCurrentValues()
|
||||||
|
else:
|
||||||
|
log.info("valueWatch engine: no files changed, no update for currentValues")
|
||||||
|
|
||||||
|
# save this iteration's values to previous so we can compare next run
|
||||||
|
fileModTimesPrevious.update(fileModTimesCurrent)
|
||||||
|
sleep(interval)
|
||||||
@@ -4,3 +4,127 @@ engines_dirs:
|
|||||||
engines:
|
engines:
|
||||||
- checkmine:
|
- checkmine:
|
||||||
interval: 60
|
interval: 60
|
||||||
|
- valueWatch:
|
||||||
|
watched:
|
||||||
|
- value: GLOBALMERGED.pipeline
|
||||||
|
files:
|
||||||
|
- /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||||||
|
- /opt/so/saltstack/local/pillar/global/adv_global.sls
|
||||||
|
map: global/map.jinja
|
||||||
|
targets:
|
||||||
|
- G@role:so-notanodetype
|
||||||
|
- G@role:so-manager
|
||||||
|
- G@role:so-searchnode
|
||||||
|
ttype: compound
|
||||||
|
actions:
|
||||||
|
from:
|
||||||
|
'*':
|
||||||
|
to:
|
||||||
|
KAFKA:
|
||||||
|
- cmd.run:
|
||||||
|
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
|
||||||
|
KAFKA:
|
||||||
|
to:
|
||||||
|
'*':
|
||||||
|
- cmd.run:
|
||||||
|
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
|
||||||
|
|
||||||
|
# - value: FIREWALL_MERGED
|
||||||
|
# files:
|
||||||
|
# - /opt/so/saltstack/local/pillar/firewall/soc_firewall.sls
|
||||||
|
# - /opt/so/saltstack/local/pillar/firewall/adv_firewall.sls
|
||||||
|
# - /opt/so/saltstack/local/pillar/minions/*.sls
|
||||||
|
# map: firewall/map.jinja
|
||||||
|
# targets:
|
||||||
|
# - so-*
|
||||||
|
# ttype: compound
|
||||||
|
# actions:
|
||||||
|
# from:
|
||||||
|
# '*':
|
||||||
|
# to:
|
||||||
|
# '*':
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: date
|
||||||
|
interval: 10
|
||||||
|
|
||||||
|
|
||||||
|
- pillarWatch:
|
||||||
|
fpa:
|
||||||
|
# these files will be checked in reversed order to replicate the same hierarchy as the pillar top file
|
||||||
|
- files:
|
||||||
|
- /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||||||
|
- /opt/so/saltstack/local/pillar/global/adv_global.sls
|
||||||
|
pillar: global.pipeline
|
||||||
|
actions:
|
||||||
|
from:
|
||||||
|
'*':
|
||||||
|
to:
|
||||||
|
KAFKA:
|
||||||
|
- cmd.run:
|
||||||
|
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: salt-call saltutil.kill_all_jobs
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: salt-call state.highstate &
|
||||||
|
KAFKA:
|
||||||
|
to:
|
||||||
|
'*':
|
||||||
|
- cmd.run:
|
||||||
|
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: salt-call saltutil.kill_all_jobs
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: salt-call state.highstate &
|
||||||
|
- files:
|
||||||
|
- /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
|
||||||
|
- /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
|
||||||
|
pillar: idstools.config.ruleset
|
||||||
|
actions:
|
||||||
|
from:
|
||||||
|
'*':
|
||||||
|
to:
|
||||||
|
'*':
|
||||||
|
- cmd.run:
|
||||||
|
cmd: /usr/sbin/so-rule-update
|
||||||
|
interval: 10
|
||||||
|
- valWatch:
|
||||||
|
fpa:
|
||||||
|
- value: GLOBALMERGED.pipeline
|
||||||
|
map: global/map.jinja
|
||||||
|
targets:
|
||||||
|
- so-manager
|
||||||
|
- so-managersearch
|
||||||
|
- so-standalone
|
||||||
|
|
||||||
|
actions:
|
||||||
|
from:
|
||||||
|
'*':
|
||||||
|
to:
|
||||||
|
KAFKA:
|
||||||
|
- cmd.run:
|
||||||
|
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: salt-call saltutil.kill_all_jobs
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: salt-call state.highstate &
|
||||||
|
KAFKA:
|
||||||
|
to:
|
||||||
|
'*':
|
||||||
|
- cmd.run:
|
||||||
|
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: salt-call saltutil.kill_all_jobs
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: salt-call state.highstate &
|
||||||
|
# - files:
|
||||||
|
# - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
|
||||||
|
# - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
|
||||||
|
# pillar: idstools.config.ruleset
|
||||||
|
# actions:
|
||||||
|
# from:
|
||||||
|
# '*':
|
||||||
|
# to:
|
||||||
|
# '*':
|
||||||
|
# - cmd.run:
|
||||||
|
# cmd: /usr/sbin/so-rule-update
|
||||||
|
interval: 10
|
||||||
|
|||||||
@@ -27,6 +27,11 @@ checkmine_engine:
|
|||||||
- source: salt://salt/engines/master/checkmine.py
|
- source: salt://salt/engines/master/checkmine.py
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
|
pillarWatch_engine:
|
||||||
|
file.managed:
|
||||||
|
- name: /etc/salt/engines/pillarWatch.py
|
||||||
|
- source: salt://salt/engines/master/pillarWatch.py
|
||||||
|
|
||||||
engines_config:
|
engines_config:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /etc/salt/master.d/engines.conf
|
- name: /etc/salt/master.d/engines.conf
|
||||||
|
|||||||
@@ -13,6 +13,9 @@ include:
|
|||||||
- systemd.reload
|
- systemd.reload
|
||||||
- repo.client
|
- repo.client
|
||||||
- salt.mine_functions
|
- salt.mine_functions
|
||||||
|
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||||
|
- ca
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
|
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
|
||||||
|
|
||||||
@@ -98,5 +101,8 @@ salt_minion_service:
|
|||||||
- file: mine_functions
|
- file: mine_functions
|
||||||
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
||||||
- file: set_log_levels
|
- file: set_log_levels
|
||||||
|
{% endif %}
|
||||||
|
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||||
|
- file: /etc/salt/minion.d/signing_policies.conf
|
||||||
{% endif %}
|
{% endif %}
|
||||||
- order: last
|
- order: last
|
||||||
|
|||||||
@@ -664,6 +664,230 @@ elastickeyperms:
|
|||||||
|
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
|
{% if grains['role'] in ['so-manager', 'so-receiver', 'so-searchnode'] %}
|
||||||
|
|
||||||
|
kafka_key:
|
||||||
|
x509.private_key_managed:
|
||||||
|
- name: /etc/pki/kafka.key
|
||||||
|
- keysize: 4096
|
||||||
|
- backup: True
|
||||||
|
- new: True
|
||||||
|
{% if salt['file.file_exists']('/etc/pki/kafka.key') -%}
|
||||||
|
- prereq:
|
||||||
|
- x509: /etc/pki/kafka.crt
|
||||||
|
{%- endif %}
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
kafka_crt:
|
||||||
|
x509.certificate_managed:
|
||||||
|
- name: /etc/pki/kafka.crt
|
||||||
|
- ca_server: {{ ca_server }}
|
||||||
|
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||||
|
- signing_policy: kafka
|
||||||
|
- private_key: /etc/pki/kafka.key
|
||||||
|
- CN: {{ GLOBALS.hostname }}
|
||||||
|
- days_remaining: 0
|
||||||
|
- days_valid: 820
|
||||||
|
- backup: True
|
||||||
|
- timeout: 30
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
cmd.run:
|
||||||
|
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:changeit"
|
||||||
|
- onchanges:
|
||||||
|
- x509: /etc/pki/kafka.key
|
||||||
|
|
||||||
|
elasticfleet_kafka_key:
|
||||||
|
x509.private_key_managed:
|
||||||
|
- name: /etc/pki/elasticfleet-kafka.key
|
||||||
|
- keysize: 4096
|
||||||
|
- backup: True
|
||||||
|
- new: True
|
||||||
|
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
|
||||||
|
- prereq:
|
||||||
|
- x509: elasticfleet_kafka_crt
|
||||||
|
{%- endif %}
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
elasticfleet_kafka_crt:
|
||||||
|
x509.certificate_managed:
|
||||||
|
- name: /etc/pki/elasticfleet-kafka.crt
|
||||||
|
- ca_server: {{ ca_server }}
|
||||||
|
- signing_policy: kafka
|
||||||
|
- private_key: /etc/pki/elasticfleet-kafka.key
|
||||||
|
- CN: {{ GLOBALS.hostname }}
|
||||||
|
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||||
|
- days_remaining: 0
|
||||||
|
- days_valid: 820
|
||||||
|
- backup: True
|
||||||
|
- timeout: 30
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
cmd.run:
|
||||||
|
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-kafka.key -topk8 -out /etc/pki/elasticfleet-kafka.p8 -nocrypt"
|
||||||
|
- onchanges:
|
||||||
|
- x509: elasticfleet_kafka_key
|
||||||
|
|
||||||
|
kafka_logstash_key:
|
||||||
|
x509.private_key_managed:
|
||||||
|
- name: /etc/pki/kafka-logstash.key
|
||||||
|
- keysize: 4096
|
||||||
|
- backup: True
|
||||||
|
- new: True
|
||||||
|
{% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%}
|
||||||
|
- prereq:
|
||||||
|
- x509: /etc/pki/kafka-logstash.crt
|
||||||
|
{%- endif %}
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
kafka_logstash_crt:
|
||||||
|
x509.certificate_managed:
|
||||||
|
- name: /etc/pki/kafka-logstash.crt
|
||||||
|
- ca_server: {{ ca_server }}
|
||||||
|
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||||
|
- signing_policy: kafka
|
||||||
|
- private_key: /etc/pki/kafka-logstash.key
|
||||||
|
- CN: {{ GLOBALS.hostname }}
|
||||||
|
- days_remaining: 0
|
||||||
|
- days_valid: 820
|
||||||
|
- backup: True
|
||||||
|
- timeout: 30
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
cmd.run:
|
||||||
|
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit"
|
||||||
|
- onchanges:
|
||||||
|
- x509: /etc/pki/kafka-logstash.key
|
||||||
|
|
||||||
|
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-receiver'] %}
|
||||||
|
kafka_client_key:
|
||||||
|
x509.private_key_managed:
|
||||||
|
- name: /etc/pki/kafka-client.key
|
||||||
|
- keysize: 4096
|
||||||
|
- backup: True
|
||||||
|
- new: True
|
||||||
|
{% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%}
|
||||||
|
- prereq:
|
||||||
|
- x509: /etc/pki/kafka-client.crt
|
||||||
|
{%- endif %}
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
kafka_client_crt:
|
||||||
|
x509.certificate_managed:
|
||||||
|
- name: /etc/pki/kafka-client.crt
|
||||||
|
- ca_server: {{ ca_server }}
|
||||||
|
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||||
|
- signing_policy: kafka
|
||||||
|
- private_key: /etc/pki/kafka-client.key
|
||||||
|
- CN: {{ GLOBALS.hostname }}
|
||||||
|
- days_remaining: 0
|
||||||
|
- days_valid: 820
|
||||||
|
- backup: True
|
||||||
|
- timeout: 30
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
kafka_client_key_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka-client.key
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
kafka_client_crt_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka-client.crt
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
kafka_key_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka.key
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
kafka_crt_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka.crt
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
kafka_logstash_key_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka-logstash.key
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
kafka_logstash_crt_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka-logstash.crt
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
kafka_logstash_pkcs12_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka-logstash.p12
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 931
|
||||||
|
|
||||||
|
kafka_pkcs8_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka.p8
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
kafka_pkcs12_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/kafka.p12
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
elasticfleet_kafka_cert_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/elasticfleet-kafka.crt
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
elasticfleet_kafka_key_perms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/elasticfleet-kafka.key
|
||||||
|
- mode: 640
|
||||||
|
- user: 960
|
||||||
|
- group: 939
|
||||||
|
{% endif %}
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
{{sls}}_state_not_allowed:
|
||||||
|
|||||||
@@ -71,3 +71,20 @@ fleet_crt:
|
|||||||
fbcertdir:
|
fbcertdir:
|
||||||
file.absent:
|
file.absent:
|
||||||
- name: /opt/so/conf/filebeat/etc/pki
|
- name: /opt/so/conf/filebeat/etc/pki
|
||||||
|
|
||||||
|
kafka_crt:
|
||||||
|
file.absent:
|
||||||
|
- name: /etc/pki/kafka.crt
|
||||||
|
kafka_key:
|
||||||
|
file.absent:
|
||||||
|
- name: /etc/pki/kafka.key
|
||||||
|
|
||||||
|
kafka_logstash_crt:
|
||||||
|
file.absent:
|
||||||
|
- name: /etc/pki/kafka-logstash.crt
|
||||||
|
kafka_logstash_key:
|
||||||
|
file.absent:
|
||||||
|
- name: /etc/pki/kafka-logstash.key
|
||||||
|
kafka_logstash_keystore:
|
||||||
|
file.absent:
|
||||||
|
- name: /etc/pki/kafka-logstash.p12
|
||||||
|
|||||||
@@ -106,6 +106,7 @@ base:
|
|||||||
- utility
|
- utility
|
||||||
- elasticfleet
|
- elasticfleet
|
||||||
- stig
|
- stig
|
||||||
|
- kafka
|
||||||
|
|
||||||
'*_standalone and G@saltversion:{{saltversion}}':
|
'*_standalone and G@saltversion:{{saltversion}}':
|
||||||
- match: compound
|
- match: compound
|
||||||
@@ -235,6 +236,7 @@ base:
|
|||||||
- logstash
|
- logstash
|
||||||
- redis
|
- redis
|
||||||
- elasticfleet.install_agent_grid
|
- elasticfleet.install_agent_grid
|
||||||
|
- kafka
|
||||||
|
|
||||||
'*_idh and G@saltversion:{{saltversion}}':
|
'*_idh and G@saltversion:{{saltversion}}':
|
||||||
- match: compound
|
- match: compound
|
||||||
|
|||||||
@@ -23,7 +23,7 @@
|
|||||||
'manager_ip': INIT.PILLAR.global.managerip,
|
'manager_ip': INIT.PILLAR.global.managerip,
|
||||||
'md_engine': INIT.PILLAR.global.mdengine,
|
'md_engine': INIT.PILLAR.global.mdengine,
|
||||||
'pcap_engine': GLOBALMERGED.pcapengine,
|
'pcap_engine': GLOBALMERGED.pcapengine,
|
||||||
'pipeline': INIT.PILLAR.global.pipeline,
|
'pipeline': GLOBALMERGED.pipeline,
|
||||||
'so_version': INIT.PILLAR.global.soversion,
|
'so_version': INIT.PILLAR.global.soversion,
|
||||||
'so_docker_gateway': DOCKER.gateway,
|
'so_docker_gateway': DOCKER.gateway,
|
||||||
'so_docker_range': DOCKER.range,
|
'so_docker_range': DOCKER.range,
|
||||||
|
|||||||
@@ -803,6 +803,7 @@ create_manager_pillars() {
|
|||||||
patch_pillar
|
patch_pillar
|
||||||
nginx_pillar
|
nginx_pillar
|
||||||
kibana_pillar
|
kibana_pillar
|
||||||
|
kafka_pillar
|
||||||
}
|
}
|
||||||
|
|
||||||
create_repo() {
|
create_repo() {
|
||||||
@@ -1191,6 +1192,18 @@ kibana_pillar() {
|
|||||||
logCmd "touch $kibana_pillar_file"
|
logCmd "touch $kibana_pillar_file"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kafka_pillar() {
|
||||||
|
KAFKACLUSTERID=$(get_random_value 22)
|
||||||
|
KAFKAPASS=$(get_random_value)
|
||||||
|
logCmd "mkdir -p $local_salt_dir/pillar/kakfa"
|
||||||
|
logCmd "touch $adv_kafka_pillar_file"
|
||||||
|
logCmd "touch $kafka_pillar_file"
|
||||||
|
printf '%s\n'\
|
||||||
|
"kafka:"\
|
||||||
|
" cluster_id: $KAFKACLUSTERID"\
|
||||||
|
" certpass: $KAFKAPASS" > $kafka_pillar_file
|
||||||
|
}
|
||||||
|
|
||||||
logrotate_pillar() {
|
logrotate_pillar() {
|
||||||
logCmd "mkdir -p $local_salt_dir/pillar/logrotate"
|
logCmd "mkdir -p $local_salt_dir/pillar/logrotate"
|
||||||
logCmd "touch $adv_logrotate_pillar_file"
|
logCmd "touch $adv_logrotate_pillar_file"
|
||||||
@@ -1325,7 +1338,6 @@ create_global() {
|
|||||||
|
|
||||||
# Continue adding other details
|
# Continue adding other details
|
||||||
echo " imagerepo: '$IMAGEREPO'" >> $global_pillar_file
|
echo " imagerepo: '$IMAGEREPO'" >> $global_pillar_file
|
||||||
echo " pipeline: 'redis'" >> $global_pillar_file
|
|
||||||
echo " repo_host: '$HOSTNAME'" >> $global_pillar_file
|
echo " repo_host: '$HOSTNAME'" >> $global_pillar_file
|
||||||
echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file
|
echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file
|
||||||
echo " registry_host: '$HOSTNAME'" >> $global_pillar_file
|
echo " registry_host: '$HOSTNAME'" >> $global_pillar_file
|
||||||
@@ -1391,7 +1403,7 @@ make_some_dirs() {
|
|||||||
mkdir -p $local_salt_dir/salt/firewall/portgroups
|
mkdir -p $local_salt_dir/salt/firewall/portgroups
|
||||||
mkdir -p $local_salt_dir/salt/firewall/ports
|
mkdir -p $local_salt_dir/salt/firewall/ports
|
||||||
|
|
||||||
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do
|
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global kafka;do
|
||||||
mkdir -p $local_salt_dir/pillar/$THEDIR
|
mkdir -p $local_salt_dir/pillar/$THEDIR
|
||||||
touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls
|
touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls
|
||||||
touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls
|
touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls
|
||||||
|
|||||||
@@ -624,6 +624,16 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
set_minion_info
|
set_minion_info
|
||||||
whiptail_end_settings
|
whiptail_end_settings
|
||||||
|
|
||||||
|
elif [[ $is_kafka ]]; then
|
||||||
|
info "Setting up as node type Kafka broker"
|
||||||
|
#check_requirements "kafka"
|
||||||
|
networking_needful
|
||||||
|
collect_mngr_hostname
|
||||||
|
add_mngr_ip_to_hosts
|
||||||
|
check_manager_connection
|
||||||
|
set_minion_info
|
||||||
|
whiptail_end_settings
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $waitforstate ]]; then
|
if [[ $waitforstate ]]; then
|
||||||
|
|||||||
@@ -178,6 +178,12 @@ export redis_pillar_file
|
|||||||
adv_redis_pillar_file="$local_salt_dir/pillar/redis/adv_redis.sls"
|
adv_redis_pillar_file="$local_salt_dir/pillar/redis/adv_redis.sls"
|
||||||
export adv_redis_pillar_file
|
export adv_redis_pillar_file
|
||||||
|
|
||||||
|
kafka_pillar_file="local_salt_dir/pillar/kafka/soc_kafka.sls"
|
||||||
|
export kafka_pillar_file
|
||||||
|
|
||||||
|
adv_kafka_pillar_file="$local_salt_dir/pillar/kafka/adv_kafka.sls"
|
||||||
|
export kafka_pillar_file
|
||||||
|
|
||||||
idh_pillar_file="$local_salt_dir/pillar/idh/soc_idh.sls"
|
idh_pillar_file="$local_salt_dir/pillar/idh/soc_idh.sls"
|
||||||
export idh_pillar_file
|
export idh_pillar_file
|
||||||
|
|
||||||
|
|||||||
@@ -674,7 +674,7 @@ whiptail_install_type_dist_existing() {
|
|||||||
Note: Heavy nodes (HEAVYNODE) are NOT recommended for most users.
|
Note: Heavy nodes (HEAVYNODE) are NOT recommended for most users.
|
||||||
EOM
|
EOM
|
||||||
|
|
||||||
install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 6 \
|
install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 7 \
|
||||||
"SENSOR" "Create a forward only sensor " \
|
"SENSOR" "Create a forward only sensor " \
|
||||||
"SEARCHNODE" "Add a search node with parsing " \
|
"SEARCHNODE" "Add a search node with parsing " \
|
||||||
"FLEET" "Dedicated Elastic Fleet Node " \
|
"FLEET" "Dedicated Elastic Fleet Node " \
|
||||||
|
|||||||
Reference in New Issue
Block a user