From c7e7a0a871aafb9dcbdd085d264001be59379103 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Thu, 14 Aug 2025 16:36:09 -0400 Subject: [PATCH 1/7] add more detail to fail_setup output --- setup/so-functions | 44 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 522446be4..75ec2019e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -29,8 +29,46 @@ title() { } fail_setup() { - error "Setup encounted an unrecoverable failure, exiting" - touch /root/failure + local failure_reason="${1:-Unknown failure}" + + # Capture call stack information + local calling_function="${FUNCNAME[1]:-main}" + local calling_line="${BASH_LINENO[0]:-unknown}" + local calling_file="${BASH_SOURCE[1]:-unknown}" + + # Build call stack trace + local call_stack="" + local i=1 + while [[ $i -lt ${#FUNCNAME[@]} ]]; do + local func="${FUNCNAME[$i]}" + local file="${BASH_SOURCE[$i]##*/}" # Get basename only + local line="${BASH_LINENO[$((i-1))]}" + + if [[ -n "$call_stack" ]]; then + call_stack="$call_stack -> " + fi + call_stack="$call_stack$func($file:$line)" + ((i++)) + done + + # Enhanced error logging with call stack + error "FAILURE: Called from $calling_function() at line $calling_line" + error "REASON: $failure_reason" + error "STACK: $call_stack" + error "Setup encountered an unrecoverable failure: $failure_reason" + + # Create detailed failure file with enhanced information + { + echo "SETUP_FAILURE_TIMESTAMP=$(date -u '+%Y-%m-%d %H:%M:%S UTC')" + echo "SETUP_FAILURE_REASON=$failure_reason" + echo "SETUP_CALLING_FUNCTION=$calling_function" + echo "SETUP_CALLING_LINE=$calling_line" + echo "SETUP_CALLING_FILE=${calling_file##*/}" + echo "SETUP_CALL_STACK=$call_stack" + echo "SETUP_LOG_LOCATION=$setup_log" + echo "SETUP_FAILURE_DETAILS=Check $setup_log for complete error details" + } > /root/failure + exit 1 } @@ -1194,7 +1232,7 @@ hypervisor_local_states() { info "Running libvirt states for hypervisor" logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info" info "Setting up bridge for $MNIC" - salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar="{\"host\": {\"mainint\": \"$MNIC\"}}" + salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar="{\"host\": {\"mainint\": \"$MNIC\"}}" fi } From cbdd369a1882d902f837c4e17244de3259d7a17d Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Mon, 25 Aug 2025 08:39:55 -0400 Subject: [PATCH 2/7] ensure x509 in mine --- salt/libvirt/bridge.sls | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/libvirt/bridge.sls b/salt/libvirt/bridge.sls index 5ff5d670c..c9e8650a3 100644 --- a/salt/libvirt/bridge.sls +++ b/salt/libvirt/bridge.sls @@ -3,6 +3,8 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +# We do not import GLOBALS in this state because it is called during setup + {% from 'libvirt/map.jinja' import LIBVIRTMERGED %} {% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %} @@ -38,6 +40,10 @@ update_mine_functions: mine_functions: network.ip_addrs: - interface: br0 + {%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %} + x509.get_pem_entries: + - glob_path: '/etc/pki/ca.crt' + {% endif %} - onchanges: - cmd: wait_for_br0_ip From e10d00d114031a7b76c0d14e5d9cf3e1332a1af4 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Tue, 26 Aug 2025 14:54:37 -0400 Subject: [PATCH 3/7] support for managerhype --- salt/libvirt/bridge.sls | 7 ++++--- salt/salt/minion.sls | 2 +- setup/so-functions | 23 +++++++++++++---------- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/salt/libvirt/bridge.sls b/salt/libvirt/bridge.sls index c9e8650a3..b8f720993 100644 --- a/salt/libvirt/bridge.sls +++ b/salt/libvirt/bridge.sls @@ -5,9 +5,6 @@ # We do not import GLOBALS in this state because it is called during setup -{% from 'libvirt/map.jinja' import LIBVIRTMERGED %} -{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %} - down_original_mgmt_interface: cmd.run: - name: "nmcli con down {{ pillar.host.mainint }}" @@ -32,6 +29,8 @@ wait_for_br0_ip: - onchanges: - cmd: down_original_mgmt_interface +{% if grains.role == 'so-hypervisor' %} + update_mine_functions: file.managed: - name: /etc/salt/minion.d/mine_functions.conf @@ -53,3 +52,5 @@ restart_salt_minion_service: - enable: True - listen: - file: update_mine_functions + +{% endif %} diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index b0e078e79..b85fad1c0 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -95,7 +95,7 @@ enable_startup_states: - unless: pgrep so-setup # prior to 2.4.30 this managed file would restart the salt-minion service when updated -# since this file is currently only adding a sleep timer on service start +# since this file is currently only adding a delay service start # it is not required to restart the service salt_minion_service_unit_file: file.managed: diff --git a/setup/so-functions b/setup/so-functions index 522446be4..4b84b752b 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1187,15 +1187,18 @@ get_minion_type() { } hypervisor_local_states() { - # these states need to run before the first highstate so that we dont deal with the salt-minion restarting - # and we need these setup prior to the highstate - info "Check if hypervisor or managerhype" - if [ $is_hypervisor ] || [ $is_managerhype ]; then - info "Running libvirt states for hypervisor" - logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info" - info "Setting up bridge for $MNIC" - salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar="{\"host\": {\"mainint\": \"$MNIC\"}}" - fi + # these states need to run before the first highstate so that we dont deal with the salt-minion restarting + # and we need these setup prior to the highstate + info "Check if hypervisor or managerhype" + if [ $is_hypervisor ] || [ $is_managerhype ]; then + info "Running libvirt states for hypervisor" + logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True" + info "Setting up bridge for $MNIC" + salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar="{\"host\": {\"mainint\": \"$MNIC\"}} queue=True" + if [ $is_managerhype ]; then + logCmd "salt-call state.apply salt.minion queue=True" + fi + fi } install_cleanup() { @@ -1642,7 +1645,7 @@ reserve_ports() { reinstall_init() { info "Putting system in state to run setup again" - if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then + if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|MANAGERHYPE|STANDALONE|FLEET|IMPORT)$ ]]; then local salt_services=( "salt-master" "salt-minion" ) else local salt_services=( "salt-minion" ) From e5920b646525e5a393ea2dad585514b85a2eab1a Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Thu, 28 Aug 2025 09:21:20 -0400 Subject: [PATCH 4/7] add managerhype back to whiptail --- setup/so-whiptail | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 4c92f6a48..57bd10b8c 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -654,9 +654,10 @@ whiptail_install_type_dist_new() { Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations. EOM - install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 2 \ + install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 3 \ "MANAGER" "New grid, requires separate search node(s) " \ "MANAGERSEARCH" "New grid, separate search node(s) are optional " \ + "MANAGERHYPE" "Manager with hypervisor - Security Onion Pro required " \ 3>&1 1>&2 2>&3 ) From 38ef4a6046c5613e968057670a9dbe0ad1ba20d0 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Thu, 4 Sep 2025 11:02:27 -0400 Subject: [PATCH 5/7] pass pillar properly --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index e905a51e7..dbe198958 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1232,7 +1232,7 @@ hypervisor_local_states() { info "Running libvirt states for hypervisor" logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True" info "Setting up bridge for $MNIC" - salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar="{\"host\": {\"mainint\": \"$MNIC\"}} queue=True" + salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True if [ $is_managerhype ]; then logCmd "salt-call state.apply salt.minion queue=True" fi From 4afc986f484789214fd923a9b633c3f06e218f2c Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Fri, 5 Sep 2025 13:14:47 -0400 Subject: [PATCH 6/7] firewall and logstash pipeline for managerhype --- salt/firewall/defaults.yaml | 16 ++++++++++++++++ salt/firewall/map.jinja | 10 +++++----- salt/logstash/map.jinja | 4 ++-- 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 0c43b8c0b..a11492e88 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -1230,6 +1230,10 @@ firewall: portgroups: - elasticsearch_node - elasticsearch_rest + managerhype: + portgroups: + - elasticsearch_node + - elasticsearch_rest standalone: portgroups: - elasticsearch_node @@ -1377,6 +1381,10 @@ firewall: portgroups: - elasticsearch_node - elasticsearch_rest + managerhype: + portgroups: + - elasticsearch_node + - elasticsearch_rest standalone: portgroups: - elasticsearch_node @@ -1579,6 +1587,9 @@ firewall: portgroups: - redis - elastic_agent_data + managerhype: + portgroups: + - elastic_agent_data self: portgroups: - redis @@ -1696,6 +1707,9 @@ firewall: managersearch: portgroups: - openssh + managerhype: + portgroups: + - openssh standalone: portgroups: - openssh @@ -1758,6 +1772,8 @@ firewall: portgroups: [] managersearch: portgroups: [] + managerhype: + portgroups: [] standalone: portgroups: [] customhostgroup0: diff --git a/salt/firewall/map.jinja b/salt/firewall/map.jinja index 4347d2b31..8bd0512ec 100644 --- a/salt/firewall/map.jinja +++ b/salt/firewall/map.jinja @@ -25,7 +25,7 @@ {% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %} {% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %} -{% if role in ['manager', 'managersearch', 'standalone'] %} +{% if role.startswith('manager') or role == 'standalone' %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %} {% endif %} @@ -38,8 +38,8 @@ {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %} {% endif %} -{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] %} -{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %} +{% if role.startswith('manager') or role in ['standalone', 'receiver'] %} +{% for r in ['manager', 'managersearch', 'managerhype', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %} {% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %} {% endif %} @@ -48,11 +48,11 @@ {% if KAFKA_EXTERNAL_ACCESS %} {# Kafka external access only applies for Kafka nodes with the broker role. #} -{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] and 'broker' in kafka_node_type %} +{% if role.startswith('manager') or role in ['standalone', 'receiver'] and 'broker' in kafka_node_type %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %} {% endif %} {% endif %} {% endif %} -{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %} \ No newline at end of file +{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %} diff --git a/salt/logstash/map.jinja b/salt/logstash/map.jinja index 95ec6b85d..5aad1daa9 100644 --- a/salt/logstash/map.jinja +++ b/salt/logstash/map.jinja @@ -17,7 +17,7 @@ {% for node_type, node_details in redis_node_data.items() | sort %} {% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch', 'so-fleet'] %} -{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %} +{% if node_type.startswith('manager') or node_type in ['standalone', 'receiver'] %} {% for hostname in redis_node_data[node_type].keys() %} {% do LOGSTASH_REDIS_NODES.append({hostname:node_details[hostname].ip}) %} {% endfor %} @@ -47,7 +47,7 @@ {% endif %} {# Disable logstash on manager & receiver nodes unless it has an override configured #} {% if not KAFKA_LOGSTASH %} -{% if GLOBALS.role in ['so-manager', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %} +{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %} {% do LOGSTASH_MERGED.update({'enabled': False}) %} {% endif %} {% endif %} From 207572f2f94c48fd51fbdf18f7dad47830d849c3 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Fri, 5 Sep 2025 14:16:03 -0400 Subject: [PATCH 7/7] remove debug added to fail_setup --- setup/so-functions | 42 ++---------------------------------------- 1 file changed, 2 insertions(+), 40 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index dbe198958..9ab11a904 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -29,46 +29,8 @@ title() { } fail_setup() { - local failure_reason="${1:-Unknown failure}" - - # Capture call stack information - local calling_function="${FUNCNAME[1]:-main}" - local calling_line="${BASH_LINENO[0]:-unknown}" - local calling_file="${BASH_SOURCE[1]:-unknown}" - - # Build call stack trace - local call_stack="" - local i=1 - while [[ $i -lt ${#FUNCNAME[@]} ]]; do - local func="${FUNCNAME[$i]}" - local file="${BASH_SOURCE[$i]##*/}" # Get basename only - local line="${BASH_LINENO[$((i-1))]}" - - if [[ -n "$call_stack" ]]; then - call_stack="$call_stack -> " - fi - call_stack="$call_stack$func($file:$line)" - ((i++)) - done - - # Enhanced error logging with call stack - error "FAILURE: Called from $calling_function() at line $calling_line" - error "REASON: $failure_reason" - error "STACK: $call_stack" - error "Setup encountered an unrecoverable failure: $failure_reason" - - # Create detailed failure file with enhanced information - { - echo "SETUP_FAILURE_TIMESTAMP=$(date -u '+%Y-%m-%d %H:%M:%S UTC')" - echo "SETUP_FAILURE_REASON=$failure_reason" - echo "SETUP_CALLING_FUNCTION=$calling_function" - echo "SETUP_CALLING_LINE=$calling_line" - echo "SETUP_CALLING_FILE=${calling_file##*/}" - echo "SETUP_CALL_STACK=$call_stack" - echo "SETUP_LOG_LOCATION=$setup_log" - echo "SETUP_FAILURE_DETAILS=Check $setup_log for complete error details" - } > /root/failure - + error "Setup encountered an unrecoverable failure, exiting" + touch /root/failure exit 1 }