Merge pull request #14998 from Security-Onion-Solutions/vlb2

manager do hypervisor things
This commit is contained in:
Josh Patterson
2025-09-05 17:13:37 -04:00
committed by GitHub
7 changed files with 49 additions and 22 deletions

View File

@@ -1230,6 +1230,10 @@ firewall:
portgroups: portgroups:
- elasticsearch_node - elasticsearch_node
- elasticsearch_rest - elasticsearch_rest
managerhype:
portgroups:
- elasticsearch_node
- elasticsearch_rest
standalone: standalone:
portgroups: portgroups:
- elasticsearch_node - elasticsearch_node
@@ -1377,6 +1381,10 @@ firewall:
portgroups: portgroups:
- elasticsearch_node - elasticsearch_node
- elasticsearch_rest - elasticsearch_rest
managerhype:
portgroups:
- elasticsearch_node
- elasticsearch_rest
standalone: standalone:
portgroups: portgroups:
- elasticsearch_node - elasticsearch_node
@@ -1579,6 +1587,9 @@ firewall:
portgroups: portgroups:
- redis - redis
- elastic_agent_data - elastic_agent_data
managerhype:
portgroups:
- elastic_agent_data
self: self:
portgroups: portgroups:
- redis - redis
@@ -1696,6 +1707,9 @@ firewall:
managersearch: managersearch:
portgroups: portgroups:
- openssh - openssh
managerhype:
portgroups:
- openssh
standalone: standalone:
portgroups: portgroups:
- openssh - openssh
@@ -1758,6 +1772,8 @@ firewall:
portgroups: [] portgroups: []
managersearch: managersearch:
portgroups: [] portgroups: []
managerhype:
portgroups: []
standalone: standalone:
portgroups: [] portgroups: []
customhostgroup0: customhostgroup0:

View File

@@ -25,7 +25,7 @@
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %} {% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
{% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %} {% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
{% if role in ['manager', 'managersearch', 'standalone'] %} {% if role.startswith('manager') or role == 'standalone' %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
{% endif %} {% endif %}
@@ -38,8 +38,8 @@
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
{% endif %} {% endif %}
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] %} {% if role.startswith('manager') or role in ['standalone', 'receiver'] %}
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %} {% for r in ['manager', 'managersearch', 'managerhype', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %} {% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
{% endif %} {% endif %}
@@ -48,7 +48,7 @@
{% if KAFKA_EXTERNAL_ACCESS %} {% if KAFKA_EXTERNAL_ACCESS %}
{# Kafka external access only applies for Kafka nodes with the broker role. #} {# Kafka external access only applies for Kafka nodes with the broker role. #}
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] and 'broker' in kafka_node_type %} {% if role.startswith('manager') or role in ['standalone', 'receiver'] and 'broker' in kafka_node_type %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %}
{% endif %} {% endif %}
{% endif %} {% endif %}

View File

@@ -3,8 +3,7 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'libvirt/map.jinja' import LIBVIRTMERGED %} # We do not import GLOBALS in this state because it is called during setup
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
down_original_mgmt_interface: down_original_mgmt_interface:
cmd.run: cmd.run:
@@ -30,6 +29,8 @@ wait_for_br0_ip:
- onchanges: - onchanges:
- cmd: down_original_mgmt_interface - cmd: down_original_mgmt_interface
{% if grains.role == 'so-hypervisor' %}
update_mine_functions: update_mine_functions:
file.managed: file.managed:
- name: /etc/salt/minion.d/mine_functions.conf - name: /etc/salt/minion.d/mine_functions.conf
@@ -38,6 +39,10 @@ update_mine_functions:
mine_functions: mine_functions:
network.ip_addrs: network.ip_addrs:
- interface: br0 - interface: br0
{%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %}
x509.get_pem_entries:
- glob_path: '/etc/pki/ca.crt'
{% endif %}
- onchanges: - onchanges:
- cmd: wait_for_br0_ip - cmd: wait_for_br0_ip
@@ -47,3 +52,5 @@ restart_salt_minion_service:
- enable: True - enable: True
- listen: - listen:
- file: update_mine_functions - file: update_mine_functions
{% endif %}

View File

@@ -17,7 +17,7 @@
{% for node_type, node_details in redis_node_data.items() | sort %} {% for node_type, node_details in redis_node_data.items() | sort %}
{% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch', 'so-fleet'] %} {% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch', 'so-fleet'] %}
{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %} {% if node_type.startswith('manager') or node_type in ['standalone', 'receiver'] %}
{% for hostname in redis_node_data[node_type].keys() %} {% for hostname in redis_node_data[node_type].keys() %}
{% do LOGSTASH_REDIS_NODES.append({hostname:node_details[hostname].ip}) %} {% do LOGSTASH_REDIS_NODES.append({hostname:node_details[hostname].ip}) %}
{% endfor %} {% endfor %}
@@ -47,7 +47,7 @@
{% endif %} {% endif %}
{# Disable logstash on manager & receiver nodes unless it has an override configured #} {# Disable logstash on manager & receiver nodes unless it has an override configured #}
{% if not KAFKA_LOGSTASH %} {% if not KAFKA_LOGSTASH %}
{% if GLOBALS.role in ['so-manager', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %} {% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %}
{% do LOGSTASH_MERGED.update({'enabled': False}) %} {% do LOGSTASH_MERGED.update({'enabled': False}) %}
{% endif %} {% endif %}
{% endif %} {% endif %}

View File

@@ -95,7 +95,7 @@ enable_startup_states:
- unless: pgrep so-setup - unless: pgrep so-setup
# prior to 2.4.30 this managed file would restart the salt-minion service when updated # prior to 2.4.30 this managed file would restart the salt-minion service when updated
# since this file is currently only adding a sleep timer on service start # since this file is currently only adding a delay service start
# it is not required to restart the service # it is not required to restart the service
salt_minion_service_unit_file: salt_minion_service_unit_file:
file.managed: file.managed:

View File

@@ -29,7 +29,7 @@ title() {
} }
fail_setup() { fail_setup() {
error "Setup encounted an unrecoverable failure, exiting" error "Setup encountered an unrecoverable failure, exiting"
touch /root/failure touch /root/failure
exit 1 exit 1
} }
@@ -1187,15 +1187,18 @@ get_minion_type() {
} }
hypervisor_local_states() { hypervisor_local_states() {
# these states need to run before the first highstate so that we dont deal with the salt-minion restarting # these states need to run before the first highstate so that we dont deal with the salt-minion restarting
# and we need these setup prior to the highstate # and we need these setup prior to the highstate
info "Check if hypervisor or managerhype" info "Check if hypervisor or managerhype"
if [ $is_hypervisor ] || [ $is_managerhype ]; then if [ $is_hypervisor ] || [ $is_managerhype ]; then
info "Running libvirt states for hypervisor" info "Running libvirt states for hypervisor"
logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info" logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True"
info "Setting up bridge for $MNIC" info "Setting up bridge for $MNIC"
salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar="{\"host\": {\"mainint\": \"$MNIC\"}}" salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True
fi if [ $is_managerhype ]; then
logCmd "salt-call state.apply salt.minion queue=True"
fi
fi
} }
install_cleanup() { install_cleanup() {
@@ -1642,7 +1645,7 @@ reserve_ports() {
reinstall_init() { reinstall_init() {
info "Putting system in state to run setup again" info "Putting system in state to run setup again"
if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|MANAGERHYPE|STANDALONE|FLEET|IMPORT)$ ]]; then
local salt_services=( "salt-master" "salt-minion" ) local salt_services=( "salt-master" "salt-minion" )
else else
local salt_services=( "salt-minion" ) local salt_services=( "salt-minion" )

View File

@@ -654,9 +654,10 @@ whiptail_install_type_dist_new() {
Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations. Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations.
EOM EOM
install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 2 \ install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 3 \
"MANAGER" "New grid, requires separate search node(s) " \ "MANAGER" "New grid, requires separate search node(s) " \
"MANAGERSEARCH" "New grid, separate search node(s) are optional " \ "MANAGERSEARCH" "New grid, separate search node(s) are optional " \
"MANAGERHYPE" "Manager with hypervisor - Security Onion Pro required " \
3>&1 1>&2 2>&3 3>&1 1>&2 2>&3
) )