mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Merge pull request #14998 from Security-Onion-Solutions/vlb2
manager do hypervisor things
This commit is contained in:
@@ -1230,6 +1230,10 @@ firewall:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
standalone:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
@@ -1377,6 +1381,10 @@ firewall:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
standalone:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
@@ -1579,6 +1587,9 @@ firewall:
|
||||
portgroups:
|
||||
- redis
|
||||
- elastic_agent_data
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elastic_agent_data
|
||||
self:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -1696,6 +1707,9 @@ firewall:
|
||||
managersearch:
|
||||
portgroups:
|
||||
- openssh
|
||||
managerhype:
|
||||
portgroups:
|
||||
- openssh
|
||||
standalone:
|
||||
portgroups:
|
||||
- openssh
|
||||
@@ -1758,6 +1772,8 @@ firewall:
|
||||
portgroups: []
|
||||
managersearch:
|
||||
portgroups: []
|
||||
managerhype:
|
||||
portgroups: []
|
||||
standalone:
|
||||
portgroups: []
|
||||
customhostgroup0:
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||
{% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
|
||||
|
||||
{% if role in ['manager', 'managersearch', 'standalone'] %}
|
||||
{% if role.startswith('manager') or role == 'standalone' %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
@@ -38,8 +38,8 @@
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if role.startswith('manager') or role in ['standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'managerhype', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
||||
{% endif %}
|
||||
@@ -48,11 +48,11 @@
|
||||
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
{# Kafka external access only applies for Kafka nodes with the broker role. #}
|
||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
||||
{% if role.startswith('manager') or role in ['standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'libvirt/map.jinja' import LIBVIRTMERGED %}
|
||||
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
|
||||
# We do not import GLOBALS in this state because it is called during setup
|
||||
|
||||
down_original_mgmt_interface:
|
||||
cmd.run:
|
||||
@@ -30,6 +29,8 @@ wait_for_br0_ip:
|
||||
- onchanges:
|
||||
- cmd: down_original_mgmt_interface
|
||||
|
||||
{% if grains.role == 'so-hypervisor' %}
|
||||
|
||||
update_mine_functions:
|
||||
file.managed:
|
||||
- name: /etc/salt/minion.d/mine_functions.conf
|
||||
@@ -38,6 +39,10 @@ update_mine_functions:
|
||||
mine_functions:
|
||||
network.ip_addrs:
|
||||
- interface: br0
|
||||
{%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %}
|
||||
x509.get_pem_entries:
|
||||
- glob_path: '/etc/pki/ca.crt'
|
||||
{% endif %}
|
||||
- onchanges:
|
||||
- cmd: wait_for_br0_ip
|
||||
|
||||
@@ -47,3 +52,5 @@ restart_salt_minion_service:
|
||||
- enable: True
|
||||
- listen:
|
||||
- file: update_mine_functions
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
{% for node_type, node_details in redis_node_data.items() | sort %}
|
||||
{% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch', 'so-fleet'] %}
|
||||
{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %}
|
||||
{% if node_type.startswith('manager') or node_type in ['standalone', 'receiver'] %}
|
||||
{% for hostname in redis_node_data[node_type].keys() %}
|
||||
{% do LOGSTASH_REDIS_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
@@ -47,7 +47,7 @@
|
||||
{% endif %}
|
||||
{# Disable logstash on manager & receiver nodes unless it has an override configured #}
|
||||
{% if not KAFKA_LOGSTASH %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %}
|
||||
{% do LOGSTASH_MERGED.update({'enabled': False}) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -95,7 +95,7 @@ enable_startup_states:
|
||||
- unless: pgrep so-setup
|
||||
|
||||
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
|
||||
# since this file is currently only adding a sleep timer on service start
|
||||
# since this file is currently only adding a delay service start
|
||||
# it is not required to restart the service
|
||||
salt_minion_service_unit_file:
|
||||
file.managed:
|
||||
|
||||
@@ -29,7 +29,7 @@ title() {
|
||||
}
|
||||
|
||||
fail_setup() {
|
||||
error "Setup encounted an unrecoverable failure, exiting"
|
||||
error "Setup encountered an unrecoverable failure, exiting"
|
||||
touch /root/failure
|
||||
exit 1
|
||||
}
|
||||
@@ -1187,15 +1187,18 @@ get_minion_type() {
|
||||
}
|
||||
|
||||
hypervisor_local_states() {
|
||||
# these states need to run before the first highstate so that we dont deal with the salt-minion restarting
|
||||
# and we need these setup prior to the highstate
|
||||
info "Check if hypervisor or managerhype"
|
||||
if [ $is_hypervisor ] || [ $is_managerhype ]; then
|
||||
info "Running libvirt states for hypervisor"
|
||||
logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info"
|
||||
info "Setting up bridge for $MNIC"
|
||||
salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar="{\"host\": {\"mainint\": \"$MNIC\"}}"
|
||||
fi
|
||||
# these states need to run before the first highstate so that we dont deal with the salt-minion restarting
|
||||
# and we need these setup prior to the highstate
|
||||
info "Check if hypervisor or managerhype"
|
||||
if [ $is_hypervisor ] || [ $is_managerhype ]; then
|
||||
info "Running libvirt states for hypervisor"
|
||||
logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True"
|
||||
info "Setting up bridge for $MNIC"
|
||||
salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True
|
||||
if [ $is_managerhype ]; then
|
||||
logCmd "salt-call state.apply salt.minion queue=True"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
install_cleanup() {
|
||||
@@ -1642,7 +1645,7 @@ reserve_ports() {
|
||||
reinstall_init() {
|
||||
info "Putting system in state to run setup again"
|
||||
|
||||
if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then
|
||||
if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|MANAGERHYPE|STANDALONE|FLEET|IMPORT)$ ]]; then
|
||||
local salt_services=( "salt-master" "salt-minion" )
|
||||
else
|
||||
local salt_services=( "salt-minion" )
|
||||
|
||||
@@ -654,9 +654,10 @@ whiptail_install_type_dist_new() {
|
||||
Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations.
|
||||
EOM
|
||||
|
||||
install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 2 \
|
||||
install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 3 \
|
||||
"MANAGER" "New grid, requires separate search node(s) " \
|
||||
"MANAGERSEARCH" "New grid, separate search node(s) are optional " \
|
||||
"MANAGERHYPE" "Manager with hypervisor - Security Onion Pro required " \
|
||||
3>&1 1>&2 2>&3
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user