soup cleanup

This commit is contained in:
Mike Reeves
2026-03-10 11:58:06 -04:00
parent d78a5867b8
commit 685e22bd68

View File

@@ -329,50 +329,6 @@ clone_to_tmp() {
fi
}
# there is a function like this in so-minion, but we cannot source it since args required for so-minion
create_ca_pillar() {
local ca_pillar_dir="/opt/so/saltstack/local/pillar/ca"
local ca_pillar_file="${ca_pillar_dir}/init.sls"
echo "Updating CA pillar configuration"
mkdir -p "$ca_pillar_dir"
echo "ca: {}" > "$ca_pillar_file"
so-yaml.py add "$ca_pillar_file" ca.server "$MINIONID"
chown -R socore:socore "$ca_pillar_dir"
}
disable_logstash_heavynodes() {
c=0
printf "\nChecking for heavynodes and disabling Logstash if they exist\n"
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ "$file" =~ "_heavynode.sls" && ! "$file" =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
if [ "$c" -eq 0 ]; then
c=$((c + 1))
FINAL_MESSAGE_QUEUE+=("Logstash has been disabled on all heavynodes. It can be re-enabled via Grid Configuration in SOC.")
fi
echo "Disabling Logstash for: $file"
so-yaml.py replace "$file" logstash.enabled False
fi
done
}
disable_redis_heavynodes() {
local c=0
printf "\nChecking for heavynodes and disabling Redis if they exist\n"
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ "$file" =~ "_heavynode.sls" && ! "$file" =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
c=1
echo "Disabling Redis for: $file"
so-yaml.py replace "$file" redis.enabled False
fi
done
if [[ "$c" != 0 ]]; then
FINAL_MESSAGE_QUEUE+=("Redis has been disabled on all heavynodes.")
fi
}
enable_highstate() {
echo "Enabling highstate."
salt-call state.enable highstate -l info --local
@@ -413,28 +369,6 @@ masterunlock() {
fi
}
phases_pillar_2_4_80() {
echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists"
set +e
PHASES=$(so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases)
case $? in
0)
so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases
read -r -d '' msg <<- EOF
Found elasticsearch.index_settings.global_overrides.index_template.phases was set to:
${PHASES}
Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases
To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases
A backup of all pillar files was saved to /nsm/backup/
EOF
FINAL_MESSAGE_QUEUE+=("$msg")
;;
2) echo "Pillar elasticsearch.index_settings.global_overrides.index_template.phases does not exist. No action taken." ;;
*) echo "so-yaml.py returned something other than 0 or 2 exit code" ;; # we shouldn't see this
esac
set -e
}
preupgrade_changes() {
# This function is to add any new pillar items if needed.
@@ -512,72 +446,6 @@ stop_salt_minion() {
set -e
}
add_hydra_pillars() {
mkdir -p /opt/so/saltstack/local/pillar/hydra
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
chmod 660 /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
touch /opt/so/saltstack/local/pillar/hydra/adv_hydra.sls
HYDRAKEY=$(get_random_value)
HYDRASALT=$(get_random_value)
printf '%s\n'\
"hydra:"\
" config:"\
" secrets:"\
" system:"\
" - '$HYDRAKEY'"\
" oidc:"\
" subject_identifiers:"\
" pairwise:"\
" salt: '$HYDRASALT'"\
"" > /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
}
add_detection_test_pillars() {
if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then
echo "Adding detection pillar values for automated testing"
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.allowRegex SecurityOnion
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.failAfterConsecutiveErrorCount 1
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.allowRegex "EquationGroup_Toolset_Apr17__ELV_.*"
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.failAfterConsecutiveErrorCount 1
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.allowRegex "(200033\\d|2100538|2102466)"
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.failAfterConsecutiveErrorCount 1
fi
}
toggle_telemetry() {
if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then
cat << ASSIST_EOF
--------------- SOC Telemetry ---------------
The Security Onion development team could use your help! Enabling SOC
Telemetry will help the team understand which UI features are being
used and enables informed prioritization of future development.
Adjust this setting at anytime via the SOC Configuration screen.
Documentation: https://securityonion.net/docs/telemetry
ASSIST_EOF
echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "
read -r input
input=$(echo "${input,,}" | xargs echo -n)
echo ""
if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then
echo "Thank you for helping improve Security Onion!"
else
if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then
echo "Disabled SOC Telemetry."
else
fail "Failed to disable SOC Telemetry; aborting."
fi
fi
echo ""
fi
}
rollover_index() {
idx=$1
exists=$(so-elasticsearch-query $idx -o /dev/null -w "%{http_code}")
@@ -594,320 +462,6 @@ rollover_index() {
fi
}
suricata_idstools_migration() {
# For 2.4.70
#Backup the pillars for idstools
mkdir -p /nsm/backup/detections-migration/idstools
rsync -av /opt/so/saltstack/local/pillar/idstools/* /nsm/backup/detections-migration/idstools
if [[ $? -eq 0 ]]; then
echo "IDStools configuration has been backed up."
else
fail "Error: rsync failed to copy the files. IDStools configuration has not been backed up."
fi
#Backup Thresholds
mkdir -p /nsm/backup/detections-migration/suricata
rsync -av /opt/so/saltstack/local/salt/suricata/thresholding /nsm/backup/detections-migration/suricata
if [[ $? -eq 0 ]]; then
echo "Suricata thresholds have been backed up."
else
fail "Error: rsync failed to copy the files. Thresholds have not been backed up."
fi
#Backup local rules
mkdir -p /nsm/backup/detections-migration/suricata/local-rules
rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules
if [[ -f /opt/so/saltstack/local/salt/idstools/rules/local.rules ]]; then
rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak
fi
#Tell SOC to migrate
mkdir -p /opt/so/conf/soc/migrations
echo "0" > /opt/so/conf/soc/migrations/suricata-migration-2.4.70
chown -R socore:socore /opt/so/conf/soc/migrations
}
playbook_migration() {
# Start SOC Detections migration
mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert}
# Remove cronjobs
crontab -l | grep -v 'so-playbook-sync_cron' | crontab -
crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab -
if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then
# Check for active Elastalert rules
active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f \( -name "*.yaml" -o -name "*.yml" \) | wc -l)
if [[ "$active_rules_count" -gt 0 ]]; then
# Prompt the user to press ENTER if active Elastalert rules found
echo
echo "$active_rules_count Active Elastalert/Playbook rules found."
echo "In preparation for the new Detections module, they will be backed up and then disabled."
echo
echo "Press ENTER to proceed."
echo
# Read user input
read -r
echo "Backing up the Elastalert rules..."
rsync -av --ignore-missing-args --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/
# Verify that rsync completed successfully
if [[ $? -eq 0 ]]; then
# Delete the Elastlaert rules
rm -f /opt/so/rules/elastalert/playbook/*.yaml
echo "Active Elastalert rules have been backed up."
else
fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up."
fi
fi
echo
echo "Exporting Sigma rules from Playbook..."
MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls)
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do
echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml"
done || fail "Failed to export Sigma rules..."
echo
echo "Exporting Sigma Filters from Playbook..."
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters."
echo
echo "Backing up Playbook database..."
docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database."
docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database."
fi
echo
echo "Stopping Playbook services & cleaning up..."
for container in so-playbook so-mysql so-soctopus; do
if [ -n "$(docker ps -q -f name=^${container}$)" ]; then
docker stop $container
fi
done
sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf
rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-*
echo
echo "Playbook Migration is complete...."
}
suricata_idstools_removal_pre() {
# For SOUPs beginning with 2.4.200 - pre SOUP checks
# Create syncBlock file
install -d -o 939 -g 939 -m 755 /opt/so/conf/soc/fingerprints
install -o 939 -g 939 -m 644 /dev/null /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF
Suricata ruleset sync is blocked until this file is removed. **CRITICAL** Make sure that you have manually added any custom Suricata rulesets via SOC config before removing this file - review the documentation for more details: https://securityonion.net/docs/nids
EOF
# Remove possible symlink & create salt local rules dir
[ -L /opt/so/saltstack/local/salt/suricata/rules ] && rm -f /opt/so/saltstack/local/salt/suricata/rules
install -d -o 939 -g 939 /opt/so/saltstack/local/salt/suricata/rules/ || echo "Failed to create Suricata local rules directory"
# Backup custom rules & overrides
mkdir -p /nsm/backup/detections-migration/2-4-200
cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200
cp /opt/so/conf/idstools/etc/rulecat.conf /nsm/backup/detections-migration/2-4-200
# Backup so-detection index via reindex
echo "Creating sos-backup index template..."
template_result=$(/sbin/so-elasticsearch-query '_index_template/sos-backup' -X PUT \
--retry 5 --retry-delay 15 --retry-all-errors \
-d '{"index_patterns":["sos-backup-*"],"priority":501,"template":{"settings":{"index":{"number_of_replicas":0,"number_of_shards":1}}}}')
if [[ -z "$template_result" ]] || ! echo "$template_result" | jq -e '.acknowledged == true' > /dev/null 2>&1; then
echo "Error: Failed to create sos-backup index template"
echo "$template_result"
exit 1
fi
BACKUP_INDEX="sos-backup-detection-$(date +%Y%m%d-%H%M%S)"
echo "Backing up so-detection index to $BACKUP_INDEX..."
reindex_result=$(/sbin/so-elasticsearch-query '_reindex?wait_for_completion=true' \
--retry 5 --retry-delay 15 --retry-all-errors \
-X POST -d "{\"source\": {\"index\": \"so-detection\"}, \"dest\": {\"index\": \"$BACKUP_INDEX\"}}")
if [[ -z "$reindex_result" ]]; then
echo "Error: Backup of detections failed - no response from Elasticsearch"
exit 1
elif echo "$reindex_result" | jq -e '.created >= 0' > /dev/null 2>&1; then
echo "Backup complete: $(echo "$reindex_result" | jq -r '.created') documents copied"
elif echo "$reindex_result" | grep -q "index_not_found_exception"; then
echo "so-detection index does not exist, skipping backup"
else
echo "Error: Backup of detections failed"
echo "$reindex_result"
exit 1
fi
}
suricata_idstools_removal_post() {
# For SOUPs beginning with 2.4.200 - post SOUP checks
echo "Checking idstools configuration for custom modifications..."
# Normalize and hash file content for consistent comparison
# Args: $1 - file path
# Outputs: SHA256 hash to stdout
# Returns: 0 on success, 1 on failure
hash_normalized_file() {
local file="$1"
if [[ ! -r "$file" ]]; then
return 1
fi
# Ensure trailing newline for consistent hashing regardless of source file
{ sed -E \
-e 's/^[[:space:]]+//; s/[[:space:]]+$//' \
-e '/^$/d' \
-e 's|--url=http://[^:]+:7788|--url=http://MANAGER:7788|' \
"$file"; echo; } | sed '/^$/d' | sha256sum | awk '{print $1}'
}
# Known-default hashes for so-rule-update (ETOPEN ruleset)
KNOWN_SO_RULE_UPDATE_HASHES=(
# 2.4.100+ (suricata 7.0.3, non-airgap)
"5fbd067ced86c8ec72ffb7e1798aa624123b536fb9d78f4b3ad8d3b45db1eae7" # 2.4.100-2.4.190 non-Airgap
# 2.4.90+ airgap (same for 2.4.90 and 2.4.100+)
"61f632c55791338c438c071040f1490066769bcce808b595b5cc7974a90e653a" # 2.4.90+ Airgap
# 2.4.90 (suricata 6.0, non-airgap, comment inside proxy block)
"0380ec52a05933244ab0f0bc506576e1d838483647b40612d5fe4b378e47aedd" # 2.4.90 non-Airgap
# 2.4.10-2.4.80 (suricata 6.0, non-airgap, comment outside proxy block)
"b6e4d1b5a78d57880ad038a9cd2cc6978aeb2dd27d48ea1a44dd866a2aee7ff4" # 2.4.10-2.4.80 non-Airgap
# 2.4.10-2.4.80 airgap
"b20146526ace2b142fde4664f1386a9a1defa319b3a1d113600ad33a1b037dad" # 2.4.10-2.4.80 Airgap
# 2.4.5 and earlier (no pidof check, non-airgap)
"d04f5e4015c348133d28a7840839e82d60009781eaaa1c66f7f67747703590dc" # 2.4.5 non-Airgap
)
# Known-default hashes for rulecat.conf
KNOWN_RULECAT_CONF_HASHES=(
# 2.4.100+ (suricata 7.0.3)
"302e75dca9110807f09ade2eec3be1fcfc8b2bf6cf2252b0269bb72efeefe67e" # 2.4.100-2.4.190 without SURICATA md_engine
"8029b7718c324a9afa06a5cf180afde703da1277af4bdd30310a6cfa3d6398cb" # 2.4.100-2.4.190 with SURICATA md_engine
# 2.4.80-2.4.90 (suricata 6.0, with --suricata-version and --output)
"4d8b318e6950a6f60b02f307cf27c929efd39652990c1bd0c8820aa8a307e1e7" # 2.4.80-2.4.90 without SURICATA md_engine
"a1ddf264c86c4e91c81c5a317f745a19466d4311e4533ec3a3c91fed04c11678" # 2.4.80-2.4.90 with SURICATA md_engine
# 2.4.50-2.4.70 (/suri/ path, no --suricata-version)
"86e3afb8d0f00c62337195602636864c98580a13ca9cc85029661a539deae6ae" # 2.4.50-2.4.70 without SURICATA md_engine
"5a97604ca5b820a10273a2d6546bb5e00c5122ca5a7dfe0ba0bfbce5fc026f4b" # 2.4.50-2.4.70 with SURICATA md_engine
# 2.4.20-2.4.40 (/nids/ path without /suri/)
"d098ea9ecd94b5cca35bf33543f8ea8f48066a0785221fabda7fef43d2462c29" # 2.4.20-2.4.40 without SURICATA md_engine
"9dbc60df22ae20d65738ba42e620392577857038ba92278e23ec182081d191cd" # 2.4.20-2.4.40 with SURICATA md_engine
# 2.4.5-2.4.10 (/sorules/ path for extraction/filters)
"490f6843d9fca759ee74db3ada9c702e2440b8393f2cfaf07bbe41aaa6d955c3" # 2.4.5-2.4.10 with SURICATA md_engine
# Note: 2.4.5-2.4.10 without SURICATA md_engine has same hash as 2.4.20-2.4.40 without SURICATA md_engine
)
# Check a config file against known hashes
# Args: $1 - file path, $2 - array name of known hashes
check_config_file() {
local file="$1"
local known_hashes_array="$2"
local file_display_name=$(basename "$file")
if [[ ! -f "$file" ]]; then
echo "Warning: $file not found"
echo "$file_display_name not found - manual verification required" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
return 1
fi
echo "Hashing $file..."
local file_hash
if ! file_hash=$(hash_normalized_file "$file"); then
echo "Warning: Could not read $file"
echo "$file_display_name not readable - manual verification required" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
return 1
fi
echo " Hash: $file_hash"
# Check if hash matches any known default
local -n known_hashes=$known_hashes_array
for known_hash in "${known_hashes[@]}"; do
if [[ "$file_hash" == "$known_hash" ]]; then
echo " Matches known default configuration"
return 0
fi
done
# No match - custom configuration detected
echo "Does not match known default - custom configuration detected"
echo "Custom $file_display_name detected (hash: $file_hash)" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
# If this is so-rule-update, check for ETPRO license code and write out to the syncBlock file
# If ETPRO is enabled, the license code already exists in the so-rule-update script, this is just making it easier to migrate
if [[ "$file_display_name" == "so-rule-update" ]]; then
local etpro_code
etpro_code=$(grep -oP '\-\-etpro=\K[0-9a-fA-F]+' "$file" 2>/dev/null) || true
if [[ -n "$etpro_code" ]]; then
echo "ETPRO code found: $etpro_code" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
fi
fi
return 1
}
# Check so-rule-update and rulecat.conf
SO_RULE_UPDATE="/usr/sbin/so-rule-update"
RULECAT_CONF="/opt/so/conf/idstools/etc/rulecat.conf"
custom_found=0
check_config_file "$SO_RULE_UPDATE" "KNOWN_SO_RULE_UPDATE_HASHES" || custom_found=1
check_config_file "$RULECAT_CONF" "KNOWN_RULECAT_CONF_HASHES" || custom_found=1
# Check for ETPRO rules on airgap systems
if [[ $is_airgap -eq 0 ]] && grep -q 'ETPRO ' /nsm/rules/suricata/emerging-all.rules 2>/dev/null; then
echo "ETPRO rules detected on airgap system - custom configuration"
echo "ETPRO rules detected on Airgap in /nsm/rules/suricata/emerging-all.rules" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
custom_found=1
fi
# If no custom configs found, remove syncBlock
if [[ $custom_found -eq 0 ]]; then
echo "idstools migration completed successfully - removing Suricata engine syncBlock"
rm -f /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
else
echo "Custom idstools configuration detected - syncBlock remains in place"
echo "Review /opt/so/conf/soc/fingerprints/suricataengine.syncBlock for details"
fi
echo "Cleaning up idstools"
echo "Stopping and removing the idstools container..."
if [ -n "$(docker ps -q -f name=^so-idstools$)" ]; then
image_name=$(docker ps -a --filter name=^so-idstools$ --format '{{.Image}}' 2>/dev/null || true)
docker stop so-idstools || echo "Warning: failed to stop so-idstools container"
docker rm so-idstools || echo "Warning: failed to remove so-idstools container"
if [[ -n "$image_name" ]]; then
echo "Removing idstools image: $image_name"
docker rmi "$image_name" || echo "Warning: failed to remove image $image_name"
fi
fi
echo "Removing idstools symlink and scripts..."
rm -rf /usr/sbin/so-idstools*
sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf
crontab -l | grep -v 'so-rule-update' | crontab -
# Backup the salt master config & manager pillar before editing it
cp /opt/so/saltstack/local/pillar/minions/$MINIONID.sls /nsm/backup/detections-migration/2-4-200/
cp /etc/salt/master /nsm/backup/detections-migration/2-4-200/
so-yaml.py remove /opt/so/saltstack/local/pillar/minions/$MINIONID.sls idstools
so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids
}
determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap
@@ -974,68 +528,6 @@ update_airgap_repo() {
createrepo /nsm/repo
}
update_elasticsearch_index_settings() {
# Update managed indices to reflect latest index template
for idx in "so-detection" "so-detectionhistory" "so-case" "so-casehistory"; do
ilm_name=$idx
if [ "$idx" = "so-detectionhistory" ]; then
ilm_name="so-detection"
elif [ "$idx" = "so-casehistory" ]; then
ilm_name="so-case"
fi
JSON_STRING=$( jq -n --arg ILM_NAME "$ilm_name" '{"settings": {"index.auto_expand_replicas":"0-2","index.lifecycle.name":($ILM_NAME + "-logs")}}')
echo "Checking if index \"$idx\" exists"
exists=$(curl -K /opt/so/conf/elasticsearch/curl.config -s -o /dev/null -w "%{http_code}" -k -L -H "Content-Type: application/json" "https://localhost:9200/$idx")
if [ $exists -eq 200 ]; then
echo "$idx index found..."
echo "Updating $idx index settings"
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/$idx/_settings" -d "$JSON_STRING" -XPUT
echo -e "\n"
else
echo -e "Skipping $idx... index does not exist\n"
fi
done
}
update_import_fleet_output() {
if output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" --retry 3 --fail 2>/dev/null); then
# Update the current config of so-manager_elasticsearch output policy in place (leaving any customizations like having changed the preset value from 'balanced' to 'performance')
CAFINGERPRINT=$(openssl x509 -in /etc/pki/tls/certs/intca.crt -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
updated_policy=$(jq --arg CAFINGERPRINT "$CAFINGERPRINT" '.item | (del(.id) | .ca_trusted_fingerprint = $CAFINGERPRINT)' <<< "$output")
if curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -XPUT -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$updated_policy" --retry 3 --fail 2>/dev/null; then
echo "Successfully updated so-manager_elasticsearch fleet output policy"
else
fail "Failed to update so-manager_elasticsearch fleet output policy"
fi
fi
}
update_default_logstash_output() {
echo "Updating fleet logstash output policy grid-logstash"
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
# Keep already configured hosts for this update, subsequent host updates come from so-elastic-fleet-outputs-update
HOSTS=$(echo "$logstash_policy" | jq -r '.item.hosts')
DEFAULT_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default')
DEFAULT_MONITORING_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default_monitoring')
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
JSON_STRING=$(jq -n \
--argjson HOSTS "$HOSTS" \
--arg DEFAULT_ENABLED "$DEFAULT_ENABLED" \
--arg DEFAULT_MONITORING_ENABLED "$DEFAULT_MONITORING_ENABLED" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $HOSTS,"is_default": $DEFAULT_ENABLED,"is_default_monitoring": $DEFAULT_MONITORING_ENABLED,"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }}}')
fi
if curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --retry 3 --retry-delay 10 --fail; then
echo "Successfully updated grid-logstash fleet output policy"
fi
}
update_salt_mine() {
echo "Populating the mine with mine_functions for each host."
set +e
@@ -1573,35 +1065,7 @@ EOF
# Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
salt-call state.apply elasticfleet -l info queue=True
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
elif [[ "$INSTALLEDVERSION" == "2.4.30" ]] ; then
if [[ $is_airgap -eq 0 ]]; then
update_airgap_rules
fi
if [[ -f /etc/pki/managerssl.key.old ]]; then
echo "Skipping Certificate Generation"
else
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
so-kibana-restart --force
so-kibana-api-check
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
rm -f /opt/so/state/eaintegrations.txt
salt-call state.apply ca queue=True
stop_salt_minion
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion"
wait_for_salt_minion_with_restart "$MINIONID" "60" "3" "$SOUP_LOG" || fail "Salt minion was not running or ready."
fi
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
fi
}
failed_soup_restore_items() {