mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Remove references to kafkanode
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
This commit is contained in:
@@ -19,5 +19,4 @@ role:
|
||||
receiver:
|
||||
standalone:
|
||||
searchnode:
|
||||
sensor:
|
||||
kafkanode:
|
||||
sensor:
|
||||
@@ -1,4 +1,4 @@
|
||||
{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %}
|
||||
{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-receiver or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %}
|
||||
{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %}
|
||||
|
||||
{% set existing_ids = [] %}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
{% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet or G@role:so-kafkanode ',
|
||||
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='compound') | dictsort()
|
||||
%}
|
||||
|
||||
@@ -233,15 +233,8 @@ base:
|
||||
- redis.adv_redis
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_kafkanode':
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- secrets
|
||||
- kafka.nodes
|
||||
- secrets
|
||||
|
||||
'*_import':
|
||||
- secrets
|
||||
|
||||
@@ -188,16 +188,8 @@
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-kafkanode': [
|
||||
'kafka',
|
||||
'logstash',
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'kafka'
|
||||
],
|
||||
'so-desktop': [
|
||||
'ssl',
|
||||
@@ -214,7 +206,7 @@
|
||||
{% do allowed_states.append('strelka') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-kafkanode'] %}
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch') %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -81,11 +81,7 @@
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-logstash',
|
||||
'so-redis',
|
||||
] %}
|
||||
{% elif GLOBALS.role == 'so-kafkanode' %}
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-logstash',
|
||||
'so-kafka',
|
||||
'so-kafka'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role == 'so-idh' %}
|
||||
|
||||
@@ -19,7 +19,6 @@ firewall:
|
||||
manager: []
|
||||
managersearch: []
|
||||
receiver: []
|
||||
kafkanode: []
|
||||
searchnode: []
|
||||
self: []
|
||||
sensor: []
|
||||
@@ -443,15 +442,6 @@ firewall:
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
kafkanode:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
@@ -530,9 +520,6 @@ firewall:
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
kafkanode:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
@@ -647,15 +634,6 @@ firewall:
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
kafkanode:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
@@ -1305,14 +1283,17 @@ firewall:
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_data
|
||||
- kafka
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
- beats_5644
|
||||
- kafka
|
||||
managersearch:
|
||||
portgroups:
|
||||
- redis
|
||||
- beats_5644
|
||||
- kafka
|
||||
self:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -1383,73 +1364,6 @@ firewall:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
kafkanode:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
searchnode:
|
||||
portgroups:
|
||||
- kafka
|
||||
kafkanode:
|
||||
portgroups:
|
||||
- kafka
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- ssh
|
||||
dockernet:
|
||||
portgroups:
|
||||
- all
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
idh:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
|
||||
@@ -34,7 +34,6 @@ firewall:
|
||||
heavynode: *hostgroupsettings
|
||||
idh: *hostgroupsettings
|
||||
import: *hostgroupsettings
|
||||
kafkanode: *hostgroupsettings
|
||||
localhost: *ROhostgroupsettingsadv
|
||||
manager: *hostgroupsettings
|
||||
managersearch: *hostgroupsettings
|
||||
@@ -361,8 +360,6 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
endgame:
|
||||
portgroups: *portgroupsdocker
|
||||
kafkanode:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
desktop:
|
||||
@@ -454,8 +451,6 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
syslog:
|
||||
portgroups: *portgroupsdocker
|
||||
kafkanode:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
desktop:
|
||||
@@ -940,63 +935,6 @@ firewall:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup9:
|
||||
portgroups: *portgroupshost
|
||||
kafkanode:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
searchnode:
|
||||
portgroups: *portgroupsdocker
|
||||
kafkanode:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup2:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup3:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup4:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup5:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup6:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup7:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup8:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup9:
|
||||
portgroups: *portgroupsdocker
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups: *portgroupshost
|
||||
dockernet:
|
||||
portgroups: *portgroupshost
|
||||
localhost:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup1:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup2:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup3:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup4:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup5:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup6:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup7:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup8:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup9:
|
||||
portgroups: *portgroupshost
|
||||
|
||||
idh:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %}
|
||||
{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %}
|
||||
|
||||
include:
|
||||
- kafka.sostatus
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
include:
|
||||
- ssl
|
||||
{% if GLOBALS.role not in ['so-receiver','so-fleet', 'so-kafkanode'] %}
|
||||
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
|
||||
- elasticsearch
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -19,8 +19,6 @@ logstash:
|
||||
- search
|
||||
fleet:
|
||||
- fleet
|
||||
kafkanode:
|
||||
- kafkanode
|
||||
defined_pipelines:
|
||||
fleet:
|
||||
- so/0012_input_elastic_agent.conf.jinja
|
||||
@@ -39,8 +37,6 @@ logstash:
|
||||
- so/0900_input_redis.conf.jinja
|
||||
- so/9805_output_elastic_agent.conf.jinja
|
||||
- so/9900_output_endgame.conf.jinja
|
||||
kafkanode:
|
||||
- so/0899_output_kafka.conf.jinja
|
||||
custom0: []
|
||||
custom1: []
|
||||
custom2: []
|
||||
|
||||
@@ -75,7 +75,7 @@ so-logstash:
|
||||
{% else %}
|
||||
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode', 'so-kafkanode' ] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
||||
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %}
|
||||
{% set kafka_brokers = salt['pillar.get']('logstash:nodes:receiver', {}) %}
|
||||
{% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %}
|
||||
{% set broker_ips = [] %}
|
||||
{% for node, node_data in kafka_brokers.items() %}
|
||||
{% do broker_ips.append(node_data['ip'] + ":9092") %}
|
||||
{% endfor %}
|
||||
|
||||
{# For testing kafka stuff from manager not dedicated kafkanodes #}
|
||||
{% for node, node_data in kafka_on_mngr.items() %}
|
||||
{% do broker_ips.append(node_data['ip'] + ":9092") %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -16,7 +16,6 @@ logstash:
|
||||
manager: *assigned_pipelines
|
||||
managersearch: *assigned_pipelines
|
||||
fleet: *assigned_pipelines
|
||||
kafkanode: *assigned_pipelines
|
||||
defined_pipelines:
|
||||
receiver: &defined_pipelines
|
||||
description: List of pipeline configurations assign to this group.
|
||||
@@ -27,7 +26,6 @@ logstash:
|
||||
fleet: *defined_pipelines
|
||||
manager: *defined_pipelines
|
||||
search: *defined_pipelines
|
||||
kafkanode: *defined_pipelines
|
||||
custom0: *defined_pipelines
|
||||
custom1: *defined_pipelines
|
||||
custom2: *defined_pipelines
|
||||
|
||||
@@ -79,9 +79,6 @@ fi
|
||||
'RECEIVER')
|
||||
so-firewall includehost receiver "$IP" --apply
|
||||
;;
|
||||
'KAFKANODE')
|
||||
so-firewall includehost kafkanode "$IP" --apply
|
||||
;;
|
||||
'DESKTOP')
|
||||
so-firewall includehost desktop "$IP" --apply
|
||||
;;
|
||||
|
||||
@@ -565,11 +565,6 @@ function createRECEIVER() {
|
||||
add_telegraf_to_minion
|
||||
}
|
||||
|
||||
function createKAFKANODE() {
|
||||
add_logstash_to_minion
|
||||
# add_telegraf_to_minion
|
||||
}
|
||||
|
||||
function createDESKTOP() {
|
||||
add_desktop_to_minion
|
||||
add_telegraf_to_minion
|
||||
|
||||
@@ -664,10 +664,7 @@ elastickeyperms:
|
||||
|
||||
{%- endif %}
|
||||
|
||||
# Roles will need to be modified. Below is just for testing encrypted kafka pipelines
|
||||
# Remove so-manager. Just inplace for testing
|
||||
{% if grains['role'] in ['so-manager', 'so-kafkanode', 'so-searchnode'] %}
|
||||
# Create a cert for Redis encryption
|
||||
{% if grains['role'] in ['so-manager', 'so-searchnode', 'so-receiver'] %}
|
||||
kafka_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka.key
|
||||
|
||||
@@ -235,16 +235,7 @@ base:
|
||||
- firewall
|
||||
- logstash
|
||||
- redis
|
||||
- elasticfleet.install_agent_grid
|
||||
|
||||
'*_kafkanode and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- kafka
|
||||
- logstash
|
||||
- ssl
|
||||
- telegraf
|
||||
- firewall
|
||||
- docker_clean
|
||||
- elasticfleet.install_agent_grid
|
||||
|
||||
'*_idh and G@saltversion:{{saltversion}}':
|
||||
|
||||
@@ -1143,7 +1143,7 @@ get_redirect() {
|
||||
get_minion_type() {
|
||||
local minion_type
|
||||
case "$install_type" in
|
||||
'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER' | 'DESKTOP' | 'KAFKANODE')
|
||||
'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER' | 'DESKTOP')
|
||||
minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]')
|
||||
;;
|
||||
esac
|
||||
@@ -1505,8 +1505,6 @@ process_installtype() {
|
||||
is_import=true
|
||||
elif [ "$install_type" = 'RECEIVER' ]; then
|
||||
is_receiver=true
|
||||
elif [ "$install_type" = 'KAFKANODE' ]; then
|
||||
is_kafka=true
|
||||
elif [ "$install_type" = 'DESKTOP' ]; then
|
||||
is_desktop=true
|
||||
fi
|
||||
|
||||
@@ -681,7 +681,6 @@ whiptail_install_type_dist_existing() {
|
||||
"HEAVYNODE" "Sensor + Search Node " \
|
||||
"IDH" "Intrusion Detection Honeypot Node " \
|
||||
"RECEIVER" "Receiver Node " \
|
||||
"KAFKANODE" "Kafka Broker + Kraft controller" \
|
||||
3>&1 1>&2 2>&3
|
||||
# "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" \ # TODO
|
||||
# "WARMNODE" "Add Warm Node to existing Hot or Search node" \ # TODO
|
||||
@@ -712,8 +711,6 @@ whiptail_install_type_dist_existing() {
|
||||
is_import=true
|
||||
elif [ "$install_type" = 'RECEIVER' ]; then
|
||||
is_receiver=true
|
||||
elif [ "$install_type" = 'KAFKANODE' ]; then
|
||||
is_kafka=true
|
||||
elif [ "$install_type" = 'DESKTOP' ]; then
|
||||
is_desktop=true
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user