Compare commits

...

45 Commits

Author SHA1 Message Date
Jorge Reyes
4014741562 Merge pull request #15113 from Security-Onion-Solutions/reyesj2/es-8188
generate new elastic agents in post soup
2025-10-07 13:11:55 -05:00
Jorge Reyes
76f500f701 temp patch for soup'n 2025-10-06 16:51:18 -05:00
Jorge Reyes
dcfe6a1674 Merge pull request #15110 from Security-Onion-Solutions/reyesj2/es-8188
Elastic 8.18.8 elastic agent build
2025-10-06 16:26:34 -05:00
Jorge Reyes
325e7ff44e Merge pull request #15109 from Security-Onion-Solutions/reyesj2/es-8188
es upgrade 8.18.8 pipeline updates
2025-10-06 16:23:55 -05:00
Jorge Reyes
ece25176cd Merge pull request #15108 from Security-Onion-Solutions/reyesj2/es-8188
es 8.18.8
2025-10-06 12:57:21 -05:00
Jorge Reyes
5186603dbd Merge pull request #15107 from Security-Onion-Solutions/2.4/dev
2.4/dev
2025-10-06 12:42:47 -05:00
Jorge Reyes
3db6542398 Merge pull request #15105 from Security-Onion-Solutions/reyesj2/logstashout
update logstash fleet output policy
2025-10-03 12:07:36 -05:00
reyesj2
9fd1b9aec1 make sure to pass in variables to json_string.. 2025-10-02 16:38:47 -05:00
reyesj2
e5563eb9b8 send full new ssl config 2025-10-02 15:29:55 -05:00
Josh Patterson
e8de9e3c26 Merge pull request #15103 from Security-Onion-Solutions/byoh
byoh
2025-10-02 15:50:34 -04:00
reyesj2
c8a3603577 update logstash fleet output policy 2025-10-02 14:47:38 -05:00
Josh Patterson
05321cf1ed add --force-cleanup to nvme raid script 2025-10-02 15:03:11 -04:00
Josh Patterson
7deef44ff6 check defaults or pillar file 2025-10-02 11:55:50 -04:00
Jorge Reyes
37bfd9eb30 Update VERSION 2025-10-01 15:36:54 -05:00
Josh Patterson
e3ac1dd1b4 Merge remote-tracking branch 'origin/2.4/dev' into byoh 2025-10-01 14:57:51 -04:00
Josh Patterson
86eca53d4b support for byodmodel 2025-10-01 14:57:25 -04:00
Jason Ertel
bfd3d822b1 Merge pull request #15092 from Security-Onion-Solutions/jertel/wip
updates for wiretap lib
2025-10-01 12:20:06 -04:00
Jason Ertel
030e4961d7 updates for wiretap lib 2025-10-01 12:13:56 -04:00
Matthew Wright
14bd92067b Merge pull request #15091 from Security-Onion-Solutions/mwright/soc_soc-fix
Made lowBalanceColorAlert global
2025-10-01 11:03:50 -04:00
Matthew Wright
066e227325 made lowBalanceColorAlert global 2025-10-01 11:01:10 -04:00
coreyogburn
f1cfb9cd91 Merge pull request #15087 from Security-Onion-Solutions/cogburn/health-timeout
New field for assistant health check
2025-09-30 15:49:52 -06:00
Corey Ogburn
5a2e704909 New field for assistant health check
The health check has a smaller, configurable timeout.
2025-09-30 15:33:20 -06:00
Jorge Reyes
f04e54d1d5 Merge pull request #15086 from Security-Onion-Solutions/reyesj2/fltpatch
less strict exits for fleet configuration
2025-09-30 15:26:50 -05:00
reyesj2
e9af46a8cb less strict exits for fleet configuration 2025-09-30 14:28:42 -05:00
Josh Patterson
b4b051908b Merge pull request #15082 from Security-Onion-Solutions/vlb2
fix hypervisor bridge setup
2025-09-29 17:19:22 -04:00
Jason Ertel
0148e5638c Merge pull request #15080 from Security-Onion-Solutions/jertel/wip
restart registry after upgrading images (in airgap mode)
2025-09-29 17:02:47 -04:00
Josh Patterson
c8814d0632 removed commented code 2025-09-29 16:58:45 -04:00
Jason Ertel
6c892fed78 restart registry after upgrading images (in airgap mode) 2025-09-29 16:47:05 -04:00
Josh Patterson
e775299480 so-user target minions with pillar elasticsearch:enabled:true 2025-09-26 15:43:49 -04:00
Josh Patterson
c4ca9c62aa Merge remote-tracking branch 'origin/2.4/dev' into vlb2 2025-09-26 12:52:37 -04:00
Jorge Reyes
c37aeff364 Merge pull request #15075 from Security-Onion-Solutions/reyesj2/esfleetpatch
update so-elastic-fleet-setup
2025-09-26 11:36:35 -05:00
reyesj2
cdac49052f Merge branch '2.4/dev' of github.com:Security-Onion-Solutions/securityonion into reyesj2/esfleetpatch 2025-09-26 11:32:44 -05:00
reyesj2
8e5fa9576c create disabled so-manager_elasticsearch output policy first, update it then verify it is the only active output 2025-09-26 11:32:25 -05:00
Josh Patterson
cd04d1e5a7 Merge remote-tracking branch 'origin/2.4/dev' into vlb2 2025-09-25 16:06:36 -04:00
Josh Patterson
1fb558cc77 managerhype br0 setup 2025-09-25 16:06:25 -04:00
Jason Ertel
7f1b76912c Merge pull request #15072 from Security-Onion-Solutions/jertel/wip
retry kratos pulls since this is the first image to install during setup
2025-09-25 15:45:02 -04:00
Jason Ertel
3a2ceb0b6f retry kratos pulls since this is the first image to install during setup 2025-09-25 15:40:00 -04:00
Matthew Wright
1345756fce Merge pull request #15071 from Security-Onion-Solutions/mwright/temp
Updated default investigation prompt
2025-09-25 15:18:20 -04:00
Matthew Wright
d81d9a0722 small tweak to investigation prompt 2025-09-25 14:45:06 -04:00
Josh Patterson
5d1edf6d86 Merge remote-tracking branch 'origin/2.4/dev' into vlb2 2025-09-24 17:32:08 -04:00
Josh Patterson
c836dd2acd set interface for network.ip_addrs for hypervisors 2025-09-24 16:50:29 -04:00
Josh Patterson
3a87af805f update service file, use salt.minion state to update mine_functions 2025-09-24 15:19:46 -04:00
Josh Patterson
4587301cca only update mine for managerhype during setup 2025-09-23 15:56:00 -04:00
Josh Patterson
14ddbd32ad salt-minion service file changes for hypervisor and managerhype 2025-09-22 16:38:40 -04:00
Josh Patterson
4599b95ae7 separate salt-minion service file 2025-09-22 16:37:16 -04:00
24 changed files with 329 additions and 84 deletions

View File

@@ -1 +1 @@
2.4.190 2.4.0-foxtrot

View File

@@ -323,8 +323,8 @@ get_elastic_agent_vars() {
if [ -f "$defaultsfile" ]; then if [ -f "$defaultsfile" ]; then
ELASTIC_AGENT_TARBALL_VERSION=$(egrep " +version: " $defaultsfile | awk -F: '{print $2}' | tr -d '[:space:]') ELASTIC_AGENT_TARBALL_VERSION=$(egrep " +version: " $defaultsfile | awk -F: '{print $2}' | tr -d '[:space:]')
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_URL="https://demo.jorgereyes.dev/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5" ELASTIC_AGENT_MD5_URL="https://demo.jorgereyes.dev/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5" ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent

View File

@@ -173,7 +173,7 @@ for PCAP in $INPUT_FILES; do
status "- assigning unique identifier to import: $HASH" status "- assigning unique identifier to import: $HASH"
pcap_data=$(pcapinfo "${PCAP}") pcap_data=$(pcapinfo "${PCAP}")
if ! echo "$pcap_data" | grep -q "First packet time:" || echo "$pcap_data" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then if ! echo "$pcap_data" | grep -q "Earliest packet time:" || echo "$pcap_data" |egrep -q "Latest packet time: 1970-01-01|Latest packet time: n/a"; then
status "- this PCAP file is invalid; skipping" status "- this PCAP file is invalid; skipping"
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1)) INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
else else
@@ -205,8 +205,8 @@ for PCAP in $INPUT_FILES; do
HASHES="${HASHES} ${HASH}" HASHES="${HASHES} ${HASH}"
fi fi
START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}') START=$(pcapinfo "${PCAP}" -a |grep "Earliest packet time:" | awk '{print $4}')
END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}') END=$(pcapinfo "${PCAP}" -e |grep "Latest packet time:" | awk '{print $4}')
status "- found PCAP data spanning dates $START through $END" status "- found PCAP data spanning dates $START through $END"
# compare $START to $START_OLDEST # compare $START to $START_OLDEST

View File

@@ -27,7 +27,7 @@ fleet_api() {
local QUERYPATH=$1 local QUERYPATH=$1
shift shift
curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --fail 2>/dev/null curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --retry-delay 10 --fail 2>/dev/null
} }
elastic_fleet_integration_check() { elastic_fleet_integration_check() {

View File

@@ -8,6 +8,7 @@
. /usr/sbin/so-elastic-fleet-common . /usr/sbin/so-elastic-fleet-common
ERROR=false
# Manage Elastic Defend Integration for Initial Endpoints Policy # Manage Elastic Defend Integration for Initial Endpoints Policy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
do do
@@ -17,13 +18,18 @@ do
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n" printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}" echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}"
exit 1 ERROR=true
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 ERROR=true
continue
fi fi
fi fi
done done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi

View File

@@ -17,7 +17,6 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# Third, configure Elastic Defend Integration seperately # Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints # Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do do
@@ -27,13 +26,15 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
echo -e "\nFailed to update integration for ${INTEGRATION##*/}" echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
fi fi
done done
@@ -47,13 +48,15 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
echo -e "\nFailed to update integration for ${INTEGRATION##*/}" echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
fi fi
done done
@@ -70,14 +73,16 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
echo -e "\nFailed to update integration for ${INTEGRATION##*/}" echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
fi fi
fi fi
@@ -97,14 +102,16 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n" printf "\n\nIntegration $NAME exists - Updating integration\n"
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
echo -e "\nFailed to update integration for ${INTEGRATION##*/}" echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
else else
printf "\n\nIntegration does not exist - Creating integration\n" printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then if [ "$NAME" != "elasticsearch-logs" ]; then
if ! elastic_fleet_integration_create "@$INTEGRATION"; then if ! elastic_fleet_integration_create "@$INTEGRATION"; then
echo -e "\nFailed to create integration for ${INTEGRATION##*/}" echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
exit 1 RETURN_CODE=1
continue
fi fi
fi fi
fi fi

View File

@@ -24,6 +24,7 @@ fi
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %}) default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
ERROR=false
for AGENT_POLICY in $agent_policies; do for AGENT_POLICY in $agent_policies; do
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
# this script upgrades default integration packages, exit 1 and let salt handle retrying # this script upgrades default integration packages, exit 1 and let salt handle retrying
@@ -73,11 +74,13 @@ for AGENT_POLICY in $agent_policies; do
echo "No errors detected. Proceeding with upgrade..." echo "No errors detected. Proceeding with upgrade..."
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'." echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
exit 1 ERROR=true
continue
fi fi
else else
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..." echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
exit 1 ERROR=true
continue
fi fi
fi fi
{%- if not AUTO_UPGRADE_INTEGRATIONS %} {%- if not AUTO_UPGRADE_INTEGRATIONS %}
@@ -86,4 +89,7 @@ for AGENT_POLICY in $agent_policies; do
fi fi
done done
done done
if [[ "$ERROR" == "true" ]]; then
exit 1
fi
echo echo

View File

@@ -15,8 +15,21 @@ if ! is_manager_node; then
fi fi
function update_logstash_outputs() { function update_logstash_outputs() {
# Generate updated JSON payload if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}') SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SECRETS "$SECRETS" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
else
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
fi
fi
# Update Logstash Outputs # Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq

View File

@@ -63,7 +63,7 @@ printf "\nAdd Manager Elasticsearch Output...\n"
ESCACRT=$(openssl x509 -in "$INTCA" -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]') ESCACRT=$(openssl x509 -in "$INTCA" -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
JSON_STRING=$(jq -n \ JSON_STRING=$(jq -n \
--arg ESCACRT "$ESCACRT" \ --arg ESCACRT "$ESCACRT" \
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ca_trusted_fingerprint": $ESCACRT}') '{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ca_trusted_fingerprint": $ESCACRT}')
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
echo -e "\nFailed to create so-elasticsearch_manager policy..." echo -e "\nFailed to create so-elasticsearch_manager policy..."
@@ -71,6 +71,13 @@ if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: applicatio
fi fi
printf "\n\n" printf "\n\n"
# so-manager_elasticsearch should exist and be disabled. Now update it before checking its the only default policy
MANAGER_OUTPUT_ENABLED=$(echo "$JSON_STRING" | jq 'del(.id) | .is_default = true | .is_default_monitoring = true')
if ! curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$MANAGER_OUTPUT_ENABLED"; then
echo -e "\n failed to update so-manager_elasticsearch"
exit 1
fi
# At this point there should only be two policies. fleet-default-output & so-manager_elasticsearch # At this point there should only be two policies. fleet-default-output & so-manager_elasticsearch
status "Verifying so-manager_elasticsearch policy is configured as the current default" status "Verifying so-manager_elasticsearch policy is configured as the current default"
@@ -79,7 +86,7 @@ if DEFAULTPOLICY=$(fleet_api "outputs/fleet-default-output"); then
fleet_default=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default') fleet_default=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default')
fleet_default_monitoring=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default_monitoring') fleet_default_monitoring=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default_monitoring')
# Check that fleet-default-output isn't configured as a default for anything ( both variables return false ) # Check that fleet-default-output isn't configured as a default for anything ( both variables return false )
if [[ ! $fleet_default ]] && [[ ! $fleet_default_monitoring ]]; then if [[ $fleet_default == "false" ]] && [[ $fleet_default_monitoring == "false" ]]; then
echo -e "\nso-manager_elasticsearch is configured as the current default policy..." echo -e "\nso-manager_elasticsearch is configured as the current default policy..."
else else
echo -e "\nVerification of so-manager_elasticsearch policy failed... The default 'fleet-default-output' output is still active..." echo -e "\nVerification of so-manager_elasticsearch policy failed... The default 'fleet-default-output' output is still active..."
@@ -120,7 +127,7 @@ JSON_STRING=$( jq -n \
--arg LOGSTASHCRT "$LOGSTASHCRT" \ --arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \ --arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCA "$LOGSTASHCA" \ --arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]},"proxy_id":null}' '{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }},"proxy_id":null}'
) )
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
echo -e "\nFailed to create logstash fleet output" echo -e "\nFailed to create logstash fleet output"

View File

@@ -13,6 +13,7 @@
{# Import defaults.yaml for model hardware capabilities #} {# Import defaults.yaml for model hardware capabilities #}
{% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %} {% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %}
{% set HYPERVISORMERGED = salt['pillar.get']('hypervisor', default=DEFAULTS.hypervisor, merge=True) %}
{# Get hypervisor nodes from pillar #} {# Get hypervisor nodes from pillar #}
{% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %} {% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %}
@@ -30,9 +31,10 @@
{% set model = '' %} {% set model = '' %}
{% if grains %} {% if grains %}
{% set minion_id = grains.keys() | first %} {% set minion_id = grains.keys() | first %}
{% set model = grains[minion_id].get('sosmodel', '') %} {% set model = grains[minion_id].get('sosmodel', grains[minion_id].get('byodmodel', '')) %}
{% endif %} {% endif %}
{% set model_config = DEFAULTS.hypervisor.model.get(model, {}) %}
{% set model_config = HYPERVISORMERGED.model.get(model, {}) %}
{# Get VM list from VMs file #} {# Get VM list from VMs file #}
{% set vms = {} %} {% set vms = {} %}

View File

@@ -30,7 +30,9 @@
# #
# WARNING: This script will DESTROY all data on the target drives! # WARNING: This script will DESTROY all data on the target drives!
# #
# USAGE: sudo ./so-nvme-raid1.sh # USAGE:
# sudo ./so-nvme-raid1.sh # Normal operation
# sudo ./so-nvme-raid1.sh --force-cleanup # Force cleanup of existing RAID
# #
################################################################# #################################################################
@@ -41,6 +43,19 @@ set -e
RAID_ARRAY_NAME="md0" RAID_ARRAY_NAME="md0"
RAID_DEVICE="/dev/${RAID_ARRAY_NAME}" RAID_DEVICE="/dev/${RAID_ARRAY_NAME}"
MOUNT_POINT="/nsm" MOUNT_POINT="/nsm"
FORCE_CLEANUP=false
# Parse command line arguments
for arg in "$@"; do
case $arg in
--force-cleanup)
FORCE_CLEANUP=true
shift
;;
*)
;;
esac
done
# Function to log messages # Function to log messages
log() { log() {
@@ -55,6 +70,91 @@ check_root() {
fi fi
} }
# Function to force cleanup all RAID components
force_cleanup_raid() {
log "=== FORCE CLEANUP MODE ==="
log "This will destroy all RAID configurations and data on target drives!"
# Stop all MD arrays
log "Stopping all MD arrays"
mdadm --stop --scan 2>/dev/null || true
# Wait for arrays to stop
sleep 2
# Remove any running md devices
for md in /dev/md*; do
if [ -b "$md" ]; then
log "Stopping $md"
mdadm --stop "$md" 2>/dev/null || true
fi
done
# Force cleanup both NVMe drives
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
log "Force cleaning $device"
# Kill any processes using the device
fuser -k "${device}"* 2>/dev/null || true
# Unmount any mounted partitions
for part in "${device}"*; do
if [ -b "$part" ]; then
umount -f "$part" 2>/dev/null || true
fi
done
# Force zero RAID superblocks on partitions
for part in "${device}"p*; do
if [ -b "$part" ]; then
log "Zeroing RAID superblock on $part"
mdadm --zero-superblock --force "$part" 2>/dev/null || true
fi
done
# Zero superblock on the device itself
log "Zeroing RAID superblock on $device"
mdadm --zero-superblock --force "$device" 2>/dev/null || true
# Remove LVM physical volumes
pvremove -ff -y "$device" 2>/dev/null || true
# Wipe all filesystem and partition signatures
log "Wiping all signatures from $device"
wipefs -af "$device" 2>/dev/null || true
# Overwrite the beginning of the drive (partition table area)
log "Clearing partition table on $device"
dd if=/dev/zero of="$device" bs=1M count=10 2>/dev/null || true
# Clear the end of the drive (backup partition table area)
local device_size=$(blockdev --getsz "$device" 2>/dev/null || echo "0")
if [ "$device_size" -gt 0 ]; then
dd if=/dev/zero of="$device" bs=512 seek=$(( device_size - 2048 )) count=2048 2>/dev/null || true
fi
# Force kernel to re-read partition table
blockdev --rereadpt "$device" 2>/dev/null || true
partprobe -s "$device" 2>/dev/null || true
done
# Clear mdadm configuration
log "Clearing mdadm configuration"
echo "DEVICE partitions" > /etc/mdadm.conf
# Remove any fstab entries for the RAID device or mount point
log "Cleaning fstab entries"
sed -i "\|${RAID_DEVICE}|d" /etc/fstab
sed -i "\|${MOUNT_POINT}|d" /etc/fstab
# Wait for system to settle
udevadm settle
sleep 5
log "Force cleanup complete!"
log "Proceeding with RAID setup..."
}
# Function to find MD arrays using specific devices # Function to find MD arrays using specific devices
find_md_arrays_using_devices() { find_md_arrays_using_devices() {
local target_devices=("$@") local target_devices=("$@")
@@ -205,10 +305,15 @@ check_existing_raid() {
fi fi
log "Error: $device appears to be part of an existing RAID array" log "Error: $device appears to be part of an existing RAID array"
log "To reuse this device, you must first:" log "Old RAID metadata detected but array is not running."
log "1. Unmount any filesystems" log ""
log "2. Stop the RAID array: mdadm --stop $array_name" log "To fix this, run the script with --force-cleanup:"
log "3. Zero the superblock: mdadm --zero-superblock ${device}p1" log " sudo $0 --force-cleanup"
log ""
log "Or manually clean up with:"
log "1. Stop any arrays: mdadm --stop --scan"
log "2. Zero superblocks: mdadm --zero-superblock --force ${device}p1"
log "3. Wipe signatures: wipefs -af $device"
exit 1 exit 1
fi fi
done done
@@ -238,7 +343,7 @@ ensure_devices_free() {
done done
# Clear MD superblock # Clear MD superblock
mdadm --zero-superblock "${device}"* 2>/dev/null || true mdadm --zero-superblock --force "${device}"* 2>/dev/null || true
# Remove LVM PV if exists # Remove LVM PV if exists
pvremove -ff -y "$device" 2>/dev/null || true pvremove -ff -y "$device" 2>/dev/null || true
@@ -263,6 +368,11 @@ main() {
# Check if running as root # Check if running as root
check_root check_root
# If force cleanup flag is set, do aggressive cleanup first
if [ "$FORCE_CLEANUP" = true ]; then
force_cleanup_raid
fi
# Check for existing RAID setup # Check for existing RAID setup
check_existing_raid check_existing_raid

View File

@@ -54,6 +54,9 @@ so-kratos:
- file: kratosconfig - file: kratosconfig
- file: kratoslogdir - file: kratoslogdir
- file: kratosdir - file: kratosdir
- retry:
attempts: 10
interval: 10
delete_so-kratos_so-status.disabled: delete_so-kratos_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -4,6 +4,9 @@
# Elastic License 2.0. # Elastic License 2.0.
# We do not import GLOBALS in this state because it is called during setup # We do not import GLOBALS in this state because it is called during setup
include:
- salt.minion.service_file
- salt.mine_functions
down_original_mgmt_interface: down_original_mgmt_interface:
cmd.run: cmd.run:
@@ -28,29 +31,14 @@ wait_for_br0_ip:
- timeout: 95 - timeout: 95
- onchanges: - onchanges:
- cmd: down_original_mgmt_interface - cmd: down_original_mgmt_interface
- onchanges_in:
{% if grains.role == 'so-hypervisor' %} - file: salt_minion_service_unit_file
- file: mine_functions
update_mine_functions:
file.managed:
- name: /etc/salt/minion.d/mine_functions.conf
- contents: |
mine_interval: 25
mine_functions:
network.ip_addrs:
- interface: br0
{%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %}
x509.get_pem_entries:
- glob_path: '/etc/pki/ca.crt'
{% endif %}
- onchanges:
- cmd: wait_for_br0_ip
restart_salt_minion_service: restart_salt_minion_service:
service.running: service.running:
- name: salt-minion - name: salt-minion
- enable: True - enable: True
- listen: - listen:
- file: update_mine_functions - file: salt_minion_service_unit_file
- file: mine_functions
{% endif %}

View File

@@ -387,7 +387,7 @@ function syncElastic() {
if [[ -z "$SKIP_STATE_APPLY" ]]; then if [[ -z "$SKIP_STATE_APPLY" ]]; then
echo "Elastic state will be re-applied to affected minions. This will run in the background and may take several minutes to complete." echo "Elastic state will be re-applied to affected minions. This will run in the background and may take several minutes to complete."
echo "Applying elastic state to elastic minions at $(date)" >> /opt/so/log/soc/sync.log 2>&1 echo "Applying elastic state to elastic minions at $(date)" >> /opt/so/log/soc/sync.log 2>&1
salt --async -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-searchnode or G@role:so-heavynode' state.apply elasticsearch queue=True >> /opt/so/log/soc/sync.log 2>&1 salt --async -C 'I@elasticsearch:enabled:true' state.apply elasticsearch queue=True >> /opt/so/log/soc/sync.log 2>&1
fi fi
else else
echo "Newly generated users/roles files are incomplete; aborting." echo "Newly generated users/roles files are incomplete; aborting."

View File

@@ -169,6 +169,8 @@ airgap_update_dockers() {
tar xf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker tar xf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker
echo "Add Registry back" echo "Add Registry back"
docker load -i "$AGDOCKER/registry_image.tar" docker load -i "$AGDOCKER/registry_image.tar"
echo "Restart registry container"
salt-call state.apply registry queue=True
fi fi
fi fi
} }
@@ -420,6 +422,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.150 ]] && up_to_2.4.160 [[ "$INSTALLEDVERSION" == 2.4.150 ]] && up_to_2.4.160
[[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170 [[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180 [[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
true true
} }
@@ -615,6 +618,16 @@ post_to_2.4.190() {
update_import_fleet_output update_import_fleet_output
fi fi
# Check if expected default policy is logstash (global.pipeline is REDIS or "")
pipeline=$(lookup_pillar "pipeline" "global")
if [[ -z "$pipeline" ]] || [[ "$pipeline" == "REDIS" ]]; then
# Check if this grid is currently affected by corrupt fleet output policy
if elastic-agent status | grep "config: key file not configured" > /dev/null 2>&1; then
echo "Elastic Agent shows an ssl error connecting to logstash output. Updating output policy..."
update_default_logstash_output
fi
fi
POSTVERSION=2.4.190 POSTVERSION=2.4.190
} }
@@ -1171,6 +1184,31 @@ update_import_fleet_output() {
fi fi
} }
update_default_logstash_output() {
echo "Updating fleet logstash output policy grid-logstash"
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
# Keep already configured hosts for this update, subsequent host updates come from so-elastic-fleet-outputs-update
HOSTS=$(echo "$logstash_policy" | jq -r '.item.hosts')
DEFAULT_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default')
DEFAULT_MONITORING_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default_monitoring')
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
JSON_STRING=$(jq -n \
--argjson HOSTS "$HOSTS" \
--arg DEFAULT_ENABLED "$DEFAULT_ENABLED" \
--arg DEFAULT_MONITORING_ENABLED "$DEFAULT_MONITORING_ENABLED" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $HOSTS,"is_default": $DEFAULT_ENABLED,"is_default_monitoring": $DEFAULT_MONITORING_ENABLED,"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }}}')
fi
if curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --retry 3 --retry-delay 10 --fail; then
echo "Successfully updated grid-logstash fleet output policy"
fi
}
update_salt_mine() { update_salt_mine() {
echo "Populating the mine with mine_functions for each host." echo "Populating the mine with mine_functions for each host."
set +e set +e

View File

@@ -161,6 +161,7 @@ DEFAULT_BASE_PATH = '/opt/so/saltstack/local/salt/hypervisor/hosts'
VALID_ROLES = ['sensor', 'searchnode', 'idh', 'receiver', 'heavynode', 'fleet'] VALID_ROLES = ['sensor', 'searchnode', 'idh', 'receiver', 'heavynode', 'fleet']
LICENSE_PATH = '/opt/so/saltstack/local/pillar/soc/license.sls' LICENSE_PATH = '/opt/so/saltstack/local/pillar/soc/license.sls'
DEFAULTS_PATH = '/opt/so/saltstack/default/salt/hypervisor/defaults.yaml' DEFAULTS_PATH = '/opt/so/saltstack/default/salt/hypervisor/defaults.yaml'
HYPERVISOR_PILLAR_PATH = '/opt/so/saltstack/local/pillar/hypervisor/soc_hypervisor.sls'
# Define the retention period for destroyed VMs (in hours) # Define the retention period for destroyed VMs (in hours)
DESTROYED_VM_RETENTION_HOURS = 48 DESTROYED_VM_RETENTION_HOURS = 48
@@ -271,7 +272,7 @@ def parse_hardware_indices(hw_value: Any) -> List[int]:
return indices return indices
def get_hypervisor_model(hypervisor: str) -> str: def get_hypervisor_model(hypervisor: str) -> str:
"""Get sosmodel from hypervisor grains.""" """Get sosmodel or byodmodel from hypervisor grains."""
try: try:
# Get cached grains using Salt runner # Get cached grains using Salt runner
grains = runner.cmd( grains = runner.cmd(
@@ -283,9 +284,9 @@ def get_hypervisor_model(hypervisor: str) -> str:
# Get the first minion ID that matches our hypervisor # Get the first minion ID that matches our hypervisor
minion_id = next(iter(grains.keys())) minion_id = next(iter(grains.keys()))
model = grains[minion_id].get('sosmodel') model = grains[minion_id].get('sosmodel', grains[minion_id].get('byodmodel', ''))
if not model: if not model:
raise ValueError(f"No sosmodel grain found for hypervisor {hypervisor}") raise ValueError(f"No sosmodel or byodmodel grain found for hypervisor {hypervisor}")
log.debug("Found model %s for hypervisor %s", model, hypervisor) log.debug("Found model %s for hypervisor %s", model, hypervisor)
return model return model
@@ -295,16 +296,48 @@ def get_hypervisor_model(hypervisor: str) -> str:
raise raise
def load_hardware_defaults(model: str) -> dict: def load_hardware_defaults(model: str) -> dict:
"""Load hardware configuration from defaults.yaml.""" """Load hardware configuration from defaults.yaml and optionally override with pillar configuration."""
config = None
config_source = None
try: try:
# First, try to load from defaults.yaml
log.debug("Checking for model %s in %s", model, DEFAULTS_PATH)
defaults = read_yaml_file(DEFAULTS_PATH) defaults = read_yaml_file(DEFAULTS_PATH)
if not defaults or 'hypervisor' not in defaults: if not defaults or 'hypervisor' not in defaults:
raise ValueError("Invalid defaults.yaml structure") raise ValueError("Invalid defaults.yaml structure")
if 'model' not in defaults['hypervisor']: if 'model' not in defaults['hypervisor']:
raise ValueError("No model configurations found in defaults.yaml") raise ValueError("No model configurations found in defaults.yaml")
if model not in defaults['hypervisor']['model']:
raise ValueError(f"Model {model} not found in defaults.yaml") # Check if model exists in defaults
return defaults['hypervisor']['model'][model] if model in defaults['hypervisor']['model']:
config = defaults['hypervisor']['model'][model]
config_source = DEFAULTS_PATH
log.debug("Found model %s in %s", model, DEFAULTS_PATH)
# Then, try to load from pillar file (if it exists)
try:
log.debug("Checking for model %s in %s", model, HYPERVISOR_PILLAR_PATH)
pillar_config = read_yaml_file(HYPERVISOR_PILLAR_PATH)
if pillar_config and 'hypervisor' in pillar_config:
if 'model' in pillar_config['hypervisor']:
if model in pillar_config['hypervisor']['model']:
# Override with pillar configuration
config = pillar_config['hypervisor']['model'][model]
config_source = HYPERVISOR_PILLAR_PATH
log.debug("Found model %s in %s (overriding defaults)", model, HYPERVISOR_PILLAR_PATH)
except FileNotFoundError:
log.debug("Pillar file %s not found, using defaults only", HYPERVISOR_PILLAR_PATH)
except Exception as e:
log.warning("Failed to read pillar file %s: %s (using defaults)", HYPERVISOR_PILLAR_PATH, str(e))
# If model was not found in either file, raise an error
if config is None:
raise ValueError(f"Model {model} not found in {DEFAULTS_PATH} or {HYPERVISOR_PILLAR_PATH}")
log.debug("Using hardware configuration for model %s from %s", model, config_source)
return config
except Exception as e: except Exception as e:
log.error("Failed to load hardware defaults: %s", str(e)) log.error("Failed to load hardware defaults: %s", str(e))
raise raise

View File

@@ -4,7 +4,10 @@
Elastic License 2.0. #} Elastic License 2.0. #}
{% set role = salt['grains.get']('role', '') %} {% set role = salt['grains.get']('role', '') %}
{% if role in ['so-hypervisor','so-managerhype'] and salt['network.ip_addrs']('br0')|length > 0 %} {# We are using usebr0 mostly for setup of the so-managerhype node and controlling when we use br0 vs the physical interface #}
{% set usebr0 = salt['pillar.get']('usebr0', True) %}
{% if role in ['so-hypervisor','so-managerhype'] and usebr0 %}
{% set interface = 'br0' %} {% set interface = 'br0' %}
{% else %} {% else %}
{% set interface = pillar.host.mainint %} {% set interface = pillar.host.mainint %}

View File

@@ -3,7 +3,7 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
# this state was seperated from salt.minion state since it is called during setup # this state was separated from salt.minion state since it is called during setup
# GLOBALS are imported in the salt.minion state and that is not available at that point in setup # GLOBALS are imported in the salt.minion state and that is not available at that point in setup
# this state is included in the salt.minion state # this state is included in the salt.minion state

View File

@@ -1,18 +1,22 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'salt/map.jinja' import UPGRADECOMMAND with context %} {% from 'salt/map.jinja' import UPGRADECOMMAND with context %}
{% from 'salt/map.jinja' import SALTVERSION %} {% from 'salt/map.jinja' import SALTVERSION %}
{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %} {% from 'salt/map.jinja' import INSTALLEDSALTVERSION %}
{% from 'salt/map.jinja' import SALTPACKAGES %} {% from 'salt/map.jinja' import SALTPACKAGES %}
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
{% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %} {% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %}
include: include:
- salt.python_modules - salt.python_modules
- salt.patch.x509_v2 - salt.patch.x509_v2
- salt - salt
- systemd.reload
- repo.client - repo.client
- salt.mine_functions - salt.mine_functions
- salt.minion.service_file
{% if GLOBALS.role in GLOBALS.manager_roles %} {% if GLOBALS.role in GLOBALS.manager_roles %}
- ca - ca
{% endif %} {% endif %}
@@ -94,17 +98,6 @@ enable_startup_states:
- regex: '^startup_states: highstate$' - regex: '^startup_states: highstate$'
- unless: pgrep so-setup - unless: pgrep so-setup
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
# since this file is currently only adding a delay service start
# it is not required to restart the service
salt_minion_service_unit_file:
file.managed:
- name: {{ SYSTEMD_UNIT_FILE }}
- source: salt://salt/service/salt-minion.service.jinja
- template: jinja
- onchanges_in:
- module: systemd_reload
{% endif %} {% endif %}
# this has to be outside the if statement above since there are <requisite>_in calls to this state # this has to be outside the if statement above since there are <requisite>_in calls to this state

View File

@@ -0,0 +1,26 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'salt/map.jinja' import SALTVERSION %}
{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %}
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
include:
- systemd.reload
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
# since this file is currently only adding a delay service start
# it is not required to restart the service
salt_minion_service_unit_file:
file.managed:
- name: {{ SYSTEMD_UNIT_FILE }}
- source: salt://salt/service/salt-minion.service.jinja
- template: jinja
- onchanges_in:
- module: systemd_reload
{% endif %}

View File

@@ -1493,6 +1493,7 @@ soc:
folder: securityonion-normalized folder: securityonion-normalized
assistant: assistant:
apiUrl: https://onionai.securityonion.net apiUrl: https://onionai.securityonion.net
healthTimeoutSeconds: 3
salt: salt:
queueDir: /opt/sensoroni/queue queueDir: /opt/sensoroni/queue
timeoutMs: 45000 timeoutMs: 45000
@@ -2545,7 +2546,7 @@ soc:
level: 'high' # info | low | medium | high | critical level: 'high' # info | low | medium | high | critical
assistant: assistant:
enabled: false enabled: false
investigationPrompt: Investigate Alert ID {socid} investigationPrompt: Investigate Alert ID {socId}
contextLimitSmall: 200000 contextLimitSmall: 200000
contextLimitLarge: 1000000 contextLimitLarge: 1000000
thresholdColorRatioLow: 0.5 thresholdColorRatioLow: 0.5

View File

@@ -585,6 +585,10 @@ soc:
description: The URL of the AI gateway. description: The URL of the AI gateway.
advanced: True advanced: True
global: True global: True
healthTimeoutSeconds:
description: Timeout in seconds for the Onion AI health check.
global: True
advanced: True
client: client:
assistant: assistant:
enabled: enabled:
@@ -615,6 +619,7 @@ soc:
advanced: True advanced: True
lowBalanceColorAlert: lowBalanceColorAlert:
description: Onion AI credit amount at which balance turns red. description: Onion AI credit amount at which balance turns red.
global: True
advanced: True advanced: True
apiTimeoutMs: apiTimeoutMs:
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.

View File

@@ -541,8 +541,15 @@ configure_minion() {
"log_file: /opt/so/log/salt/minion"\ "log_file: /opt/so/log/salt/minion"\
"#startup_states: highstate" >> "$minion_config" "#startup_states: highstate" >> "$minion_config"
info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "$MNIC"}}'" # At the time the so-managerhype node does not yet have the bridge configured.
salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="{'host': {'mainint': $MNIC}}" # The so-hypervisor node doesn't either, but it doesn't cause issues here.
local usebr0=false
if [ "$minion_type" == 'hypervisor' ]; then
usebr0=true
fi
local pillar_json="{\"host\": {\"mainint\": \"$MNIC\"}, \"usebr0\": $usebr0}"
info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='$pillar_json'"
salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="$pillar_json"
{ {
logCmd "systemctl enable salt-minion"; logCmd "systemctl enable salt-minion";
@@ -1195,9 +1202,6 @@ hypervisor_local_states() {
logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True" logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True"
info "Setting up bridge for $MNIC" info "Setting up bridge for $MNIC"
salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True
if [ $is_managerhype ]; then
logCmd "salt-call state.apply salt.minion queue=True"
fi
fi fi
} }

View File

@@ -762,6 +762,7 @@ if ! [[ -f $install_opt_file ]]; then
fi fi
logCmd "salt-call state.apply common.packages" logCmd "salt-call state.apply common.packages"
logCmd "salt-call state.apply common" logCmd "salt-call state.apply common"
hypervisor_local_states
# this will apply the salt.minion state first since salt.master includes salt.minion # this will apply the salt.minion state first since salt.master includes salt.minion
logCmd "salt-call state.apply salt.master" logCmd "salt-call state.apply salt.master"
# wait here until we get a response from the salt-master since it may have just restarted # wait here until we get a response from the salt-master since it may have just restarted
@@ -826,7 +827,6 @@ if ! [[ -f $install_opt_file ]]; then
checkin_at_boot checkin_at_boot
set_initial_firewall_access set_initial_firewall_access
logCmd "salt-call schedule.enable -linfo --local" logCmd "salt-call schedule.enable -linfo --local"
hypervisor_local_states
verify_setup verify_setup
else else
touch /root/accept_changes touch /root/accept_changes