Files
securityonion/salt/manager/tools/sbin/soup
2025-11-14 09:03:08 -05:00

1941 lines
65 KiB
Bash
Executable File
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
. /usr/sbin/so-image-common
UPDATE_DIR=/tmp/sogh/securityonion
DEFAULT_SALT_DIR=/opt/so/saltstack/default
INSTALLEDVERSION=$(cat /etc/soversion)
POSTVERSION=$INSTALLEDVERSION
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}')
BATCHSIZE=5
SOUP_LOG=/root/soup.log
WHATWOULDYOUSAYYAHDOHERE=soup
whiptail_title='Security Onion UPdater'
NOTIFYCUSTOMELASTICCONFIG=false
TOPFILE=/opt/so/saltstack/default/salt/top.sls
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
SALTUPGRADED=false
SALT_CLOUD_INSTALLED=false
SALT_CLOUD_CONFIGURED=false
# used to display messages to the user at the end of soup
declare -a FINAL_MESSAGE_QUEUE=()
check_err() {
local exit_code=$1
local err_msg="Unhandled error occured, please check $SOUP_LOG for details."
[[ $ERR_HANDLED == true ]] && exit $exit_code
if [[ $exit_code -ne 0 ]]; then
set +e
failed_soup_restore_items
printf '%s' "Soup failed with error $exit_code: "
case $exit_code in
2)
echo 'No such file or directory'
;;
5)
echo 'Interrupted system call'
;;
12)
echo 'Out of memory'
;;
28)
echo 'No space left on device'
echo "Likely ran out of space on disk, please review hardware requirements for Security Onion: $DOC_BASE_URL/hardware.html"
;;
30)
echo 'Read-only file system'
;;
35)
echo 'Resource temporarily unavailable'
;;
64)
echo 'Machine is not on the network'
;;
67)
echo 'Link has been severed'
;;
100)
echo 'Network is down'
;;
101)
echo 'Network is unreachable'
;;
102)
echo 'Network reset'
;;
110)
echo 'Connection timed out'
;;
111)
echo 'Connection refused'
;;
112)
echo 'Host is down'
;;
113)
echo 'No route to host'
;;
*)
echo 'Unhandled error'
echo "$err_msg"
;;
esac
if [[ $exit_code -ge 64 && $exit_code -le 113 ]]; then
echo "$err_msg"
fi
exit $exit_code
fi
}
add_common() {
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common queue=True
echo "Run soup one more time"
exit 0
}
airgap_mounted() {
# Let's see if the ISO is already mounted.
if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
echo "The ISO is already mounted"
else
if [[ -z $ISOLOC ]]; then
echo "This is airgap. Ask for a location."
echo ""
cat << EOF
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.
EOF
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
fi
if [[ -f $ISOLOC ]]; then
# Mounting the ISO image
mkdir -p /tmp/soagupdate
mount -t iso9660 -o loop $ISOLOC /tmp/soagupdate
# Make sure mounting was successful
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
echo "Something went wrong trying to mount the ISO."
echo "Ensure you verify the ISO that you downloaded."
exit 0
else
echo "ISO has been mounted!"
fi
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
ln -s $ISOLOC /tmp/soagupdate
echo "Found the update content"
elif [[ -b $ISOLOC ]]; then
mkdir -p /tmp/soagupdate
mount $ISOLOC /tmp/soagupdate
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
echo "Something went wrong trying to mount the device."
echo "Ensure you verify the ISO that you downloaded."
exit 0
else
echo "Device has been mounted!"
fi
else
echo "Could not find Security Onion ISO content at ${ISOLOC}"
echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
exit 0
fi
fi
}
airgap_update_dockers() {
if [[ $is_airgap -eq 0 ]] || [[ ! -z "$ISOLOC" ]]; then
# Let's copy the tarball
if [[ ! -f $AGDOCKER/registry.tar ]]; then
echo "Unable to locate registry. Exiting"
exit 0
else
echo "Stopping the registry docker"
docker stop so-dockerregistry
docker rm so-dockerregistry
echo "Copying the new dockers over"
tar xf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker
echo "Add Registry back"
docker load -i "$AGDOCKER/registry_image.tar"
echo "Restart registry container"
salt-call state.apply registry queue=True
fi
fi
}
backup_old_states_pillars() {
tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_default_states_pillars.tar.gz /opt/so/saltstack/default/
tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_local_states_pillars.tar.gz /opt/so/saltstack/local/
}
update_registry() {
docker stop so-dockerregistry
docker rm so-dockerregistry
salt-call state.apply registry queue=True
}
check_airgap() {
# See if this is an airgap install
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}' | tr '[:upper:]' '[:lower:]')
if [[ "$AIRGAP" == "true" ]]; then
is_airgap=0
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
AGDOCKER=/tmp/soagupdate/docker
AGREPO=/tmp/soagupdate/minimal/Packages
else
is_airgap=1
fi
}
# {% raw %}
check_local_mods() {
local salt_local=/opt/so/saltstack/local
local_mod_arr=()
while IFS= read -r -d '' local_file; do
stripped_path=${local_file#"$salt_local"}
default_file="${DEFAULT_SALT_DIR}${stripped_path}"
if [[ -f $default_file ]]; then
file_diff=$(diff "$default_file" "$local_file" )
if [[ $(echo "$file_diff" | grep -Ec "^[<>]") -gt 0 ]]; then
local_mod_arr+=( "$local_file" )
fi
fi
done< <(find $salt_local -type f -print0)
if [[ ${#local_mod_arr} -gt 0 ]]; then
echo "Potentially breaking changes found in the following files (check ${DEFAULT_SALT_DIR} for original copy):"
for file_str in "${local_mod_arr[@]}"; do
echo " $file_str"
done
echo ""
echo "To reference this list later, check $SOUP_LOG"
sleep 10
fi
}
# {% endraw %}
check_pillar_items() {
local pillar_output=$(salt-call pillar.items -lerror --out=json)
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
if [[ "$cond" == "true" ]]; then
printf "\nThere is an issue rendering the manager's pillars. Please correct the issues in the sls files mentioned below before running SOUP again.\n\n"
jq '.local._errors[]' <<< "$pillar_output"
exit 0
else
printf "\nThe manager's pillars can be rendered. We can proceed with SOUP.\n\n"
fi
}
check_saltmaster_status() {
set +e
echo "Waiting on the Salt Master service to be ready."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
}
check_sudoers() {
if grep -q "so-setup" /etc/sudoers; then
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
fi
}
check_os_updates() {
# Check to see if there are OS updates
echo "Checking for OS updates."
NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
OSUPDATES=$(dnf -q list updates | grep -v docker | grep -v containerd | grep -v salt | grep -v Available | wc -l)
if [[ "$OSUPDATES" -gt 0 ]]; then
if [[ -z $UNATTENDED ]]; then
echo "$NEEDUPDATES"
echo ""
read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
if [[ "$confirm" == [cC] ]]; then
echo "Continuing without updating packages"
elif [[ "$confirm" == [uU] ]]; then
echo "Applying Grid Updates"
update_flag=true
else
echo "Exiting soup"
exit 0
fi
else
update_flag=true
fi
else
echo "Looks like you have an updated OS"
fi
if [[ $update_flag == true ]]; then
set +e
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
set -e
fi
}
clean_dockers() {
# Place Holder for cleaning up old docker images
echo "Trying to clean up old dockers."
docker system prune -a -f --volumes
}
clone_to_tmp() {
# Clean old files
rm -rf /tmp/sogh
# Make a temp location for the files
mkdir -p /tmp/sogh
cd /tmp/sogh
SOUP_BRANCH="-b 2.4/main"
if [ -n "$BRANCH" ]; then
SOUP_BRANCH="-b $BRANCH"
fi
git clone $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git
cd /tmp
if [ ! -f $UPDATE_DIR/VERSION ]; then
echo "Update was unable to pull from Github. Please check your Internet access."
exit 0
fi
}
disable_logstash_heavynodes() {
c=0
printf "\nChecking for heavynodes and disabling Logstash if they exist\n"
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ "$file" =~ "_heavynode.sls" && ! "$file" =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
if [ "$c" -eq 0 ]; then
c=$((c + 1))
FINAL_MESSAGE_QUEUE+=("Logstash has been disabled on all heavynodes. It can be re-enabled via Grid Configuration in SOC.")
fi
echo "Disabling Logstash for: $file"
so-yaml.py replace "$file" logstash.enabled False
fi
done
}
enable_highstate() {
echo "Enabling highstate."
salt-call state.enable highstate -l info --local
echo ""
}
get_soup_script_hashes() {
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
}
highstate() {
# Run a highstate.
salt-call state.highstate -l info queue=True
}
masterlock() {
echo "Locking Salt Master"
mv -v $TOPFILE $BACKUPTOPFILE
echo "base:" > $TOPFILE
echo " $MINIONID:" >> $TOPFILE
echo " - ca" >> $TOPFILE
echo " - ssl" >> $TOPFILE
echo " - elasticsearch" >> $TOPFILE
}
masterunlock() {
if [ -f $BACKUPTOPFILE ]; then
echo "Unlocking Salt Master"
mv -v $BACKUPTOPFILE $TOPFILE
else
echo "Salt Master does not need unlocked."
fi
}
phases_pillar_2_4_80() {
echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists"
set +e
PHASES=$(so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases)
case $? in
0)
so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases
read -r -d '' msg <<- EOF
Found elasticsearch.index_settings.global_overrides.index_template.phases was set to:
${PHASES}
Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases
To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases
A backup of all pillar files was saved to /nsm/backup/
EOF
FINAL_MESSAGE_QUEUE+=("$msg")
;;
2) echo "Pillar elasticsearch.index_settings.global_overrides.index_template.phases does not exist. No action taken." ;;
*) echo "so-yaml.py returned something other than 0 or 2 exit code" ;; # we shouldn't see this
esac
set -e
}
preupgrade_changes() {
# This function is to add any new pillar items if needed.
echo "Checking to see if changes are needed."
[[ "$INSTALLEDVERSION" == 2.4.2 ]] && up_to_2.4.3
[[ "$INSTALLEDVERSION" == 2.4.3 ]] && up_to_2.4.4
[[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5
[[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10
[[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
[[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
[[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40
[[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50
[[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60
[[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
[[ "$INSTALLEDVERSION" == 2.4.70 ]] && up_to_2.4.80
[[ "$INSTALLEDVERSION" == 2.4.80 ]] && up_to_2.4.90
[[ "$INSTALLEDVERSION" == 2.4.90 ]] && up_to_2.4.100
[[ "$INSTALLEDVERSION" == 2.4.100 ]] && up_to_2.4.110
[[ "$INSTALLEDVERSION" == 2.4.110 ]] && up_to_2.4.111
[[ "$INSTALLEDVERSION" == 2.4.111 ]] && up_to_2.4.120
[[ "$INSTALLEDVERSION" == 2.4.120 ]] && up_to_2.4.130
[[ "$INSTALLEDVERSION" == 2.4.130 ]] && up_to_2.4.140
[[ "$INSTALLEDVERSION" == 2.4.140 ]] && up_to_2.4.141
[[ "$INSTALLEDVERSION" == 2.4.141 ]] && up_to_2.4.150
[[ "$INSTALLEDVERSION" == 2.4.150 ]] && up_to_2.4.160
[[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
[[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
true
}
postupgrade_changes() {
# This function is to add any new pillar items if needed.
echo "Running post upgrade processes."
[[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
[[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
[[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
[[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
[[ "$POSTVERSION" == 2.4.100 ]] && post_to_2.4.110
[[ "$POSTVERSION" == 2.4.110 ]] && post_to_2.4.111
[[ "$POSTVERSION" == 2.4.111 ]] && post_to_2.4.120
[[ "$POSTVERSION" == 2.4.120 ]] && post_to_2.4.130
[[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140
[[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141
[[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
[[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
[[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
true
}
post_to_2.4.3() {
echo "Nothing to apply"
POSTVERSION=2.4.3
}
post_to_2.4.4() {
echo "Nothing to apply"
POSTVERSION=2.4.4
}
post_to_2.4.5() {
echo "Nothing to apply"
POSTVERSION=2.4.5
}
post_to_2.4.10() {
echo "Updating Elastic Fleet ES URLs...."
/sbin/so-elastic-fleet-es-url-update --force
POSTVERSION=2.4.10
}
post_to_2.4.20() {
echo "Pruning unused docker volumes on all nodes - This process will run in the background."
salt --async \* cmd.run "docker volume prune -f"
POSTVERSION=2.4.20
}
post_to_2.4.30() {
# there is an occasional error with this state: pki_public_ca_crt: TypeError: list indices must be integers or slices, not str
set +e
salt-call state.apply ca queue=True
set -e
stop_salt_minion
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion"
salt-call state.apply nginx queue=True
enable_highstate
POSTVERSION=2.4.30
}
post_to_2.4.40() {
echo "Nothing to apply"
POSTVERSION=2.4.40
}
post_to_2.4.50() {
echo "Nothing to apply"
POSTVERSION=2.4.50
}
post_to_2.4.60() {
echo "Nothing to apply"
POSTVERSION=2.4.60
}
post_to_2.4.70() {
printf "\nRemoving idh.services from any existing IDH node pillar files\n"
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ $file =~ "_idh.sls" && ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
echo "Removing idh.services from: $file"
so-yaml.py remove "$file" idh.services
fi
done
POSTVERSION=2.4.70
}
post_to_2.4.80() {
echo -e "\nChecking if update to Elastic Fleet output policy is required\n"
so-kafka-fleet-output-policy
POSTVERSION=2.4.80
}
post_to_2.4.90() {
disable_logstash_heavynodes
POSTVERSION=2.4.90
}
post_to_2.4.100() {
echo "Nothing to apply"
POSTVERSION=2.4.100
}
post_to_2.4.110() {
echo "Nothing to apply"
POSTVERSION=2.4.110
}
post_to_2.4.111() {
echo "Nothing to apply"
POSTVERSION=2.4.111
}
post_to_2.4.120() {
update_elasticsearch_index_settings
# Manually rollover suricata alerts index to ensure data_stream.dataset expected mapping is set to 'suricata'
rollover_index "logs-suricata.alerts-so"
POSTVERSION=2.4.120
}
post_to_2.4.130() {
# Optional integrations are loaded AFTER initial successful load of core ES templates (/opt/so/state/estemplates.txt)
# Dynamic templates are created in elasticsearch.enabled for every optional integration based on output of so-elastic-fleet-optional-integrations-load script
echo "Ensuring Elasticsearch templates are up to date after updating package registry"
salt-call state.apply elasticsearch queue=True
# Update kibana default space
salt-call state.apply kibana.config queue=True
echo "Updating Kibana default space"
/usr/sbin/so-kibana-space-defaults
POSTVERSION=2.4.130
}
post_to_2.4.140() {
echo "Nothing to apply"
POSTVERSION=2.4.140
}
post_to_2.4.141() {
echo "Nothing to apply"
POSTVERSION=2.4.141
}
post_to_2.4.150() {
echo "Nothing to apply"
POSTVERSION=2.4.150
}
post_to_2.4.160() {
echo "Nothing to apply"
POSTVERSION=2.4.160
}
post_to_2.4.170() {
# Update kibana default space
salt-call state.apply kibana.config queue=True
echo "Updating Kibana default space"
/usr/sbin/so-kibana-space-defaults
POSTVERSION=2.4.170
}
post_to_2.4.180() {
# Force update to Kafka output policy
/usr/sbin/so-kafka-fleet-output-policy --force
POSTVERSION=2.4.180
}
post_to_2.4.190() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
# Only need to update import / eval nodes
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
update_import_fleet_output
fi
# Check if expected default policy is logstash (global.pipeline is REDIS or "")
pipeline=$(lookup_pillar "pipeline" "global")
if [[ -z "$pipeline" ]] || [[ "$pipeline" == "REDIS" ]]; then
# Check if this grid is currently affected by corrupt fleet output policy
if elastic-agent status | grep "config: key file not configured" > /dev/null 2>&1; then
echo "Elastic Agent shows an ssl error connecting to logstash output. Updating output policy..."
update_default_logstash_output
fi
fi
# Apply new elasticsearch.server index template
rollover_index "logs-elasticsearch.server-default"
POSTVERSION=2.4.190
}
post_to_2.4.200() {
echo "Initiating Suricata idstools migration..."
suricata_idstools_removal_post
POSTVERSION=2.4.200
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
}
stop_salt_master() {
# kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
set +e
echo ""
echo "Killing all Salt jobs across the grid."
salt \* saltutil.kill_all_jobs >> $SOUP_LOG 2>&1
echo ""
echo "Killing any queued Salt jobs on the manager."
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
echo ""
echo "Storing salt-master PID."
MASTERPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-master MainProcess')
if [ ! -z "$MASTERPID" ]; then
echo "Found salt-master PID $MASTERPID"
systemctl_func "stop" "salt-master"
if ps -p "$MASTERPID" > /dev/null 2>&1; then
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
fi
else
echo "The salt-master PID was not found. The process '/usr/bin/salt-master MainProcess' is not running."
fi
set -e
}
stop_salt_minion() {
echo "Disabling highstate to prevent from running if salt-minion restarts."
salt-call state.disable highstate -l info --local
echo ""
# kill all salt jobs before stopping salt-minion
set +e
echo ""
echo "Killing Salt jobs on this node."
salt-call saltutil.kill_all_jobs --local
echo "Storing salt-minion pid."
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
echo "Found salt-minion PID $MINIONPID"
systemctl_func "stop" "salt-minion"
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
set -e
}
up_to_2.4.3() {
echo "Nothing to do for 2.4.3"
INSTALLEDVERSION=2.4.3
}
up_to_2.4.4() {
echo "Nothing to do for 2.4.4"
INSTALLEDVERSION=2.4.4
}
up_to_2.4.5() {
echo "Nothing to do for 2.4.5"
INSTALLEDVERSION=2.4.5
}
up_to_2.4.10() {
echo "Nothing to do for 2.4.10"
INSTALLEDVERSION=2.4.10
}
up_to_2.4.20() {
echo "Nothing to do for 2.4.20"
INSTALLEDVERSION=2.4.20
}
up_to_2.4.30() {
echo "Nothing to do for 2.4.30"
INSTALLEDVERSION=2.4.30
}
up_to_2.4.40() {
echo "Removing old ATT&CK Navigator Layers..."
rm -f /opt/so/conf/navigator/layers/enterprise-attack.json
rm -f /opt/so/conf/navigator/layers/nav_layer_playbook.json
INSTALLEDVERSION=2.4.40
}
up_to_2.4.50() {
echo "Creating additional pillars.."
mkdir -p /opt/so/saltstack/local/pillar/stig/
mkdir -p /opt/so/saltstack/local/salt/stig/
chown socore:socore /opt/so/saltstack/local/salt/stig/
touch /opt/so/saltstack/local/pillar/stig/adv_stig.sls
touch /opt/so/saltstack/local/pillar/stig/soc_stig.sls
# the file_roots need to be update due to salt 3006.6 upgrade not allowing symlinks outside the file_roots
# put new so-yaml in place
echo "Updating so-yaml"
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" "$DEFAULT_SALT_DIR/salt/manager/tools/sbin/"
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" /usr/sbin/
echo "Creating a backup of the salt-master config."
# INSTALLEDVERSION is 2.4.40 at this point, but we want the backup to have the version
# so was at prior to starting upgrade. use POSTVERSION here since it doesnt change until
# post upgrade changes. POSTVERSION set to INSTALLEDVERSION at start of soup
cp -v /etc/salt/master "/etc/salt/master.so-$POSTVERSION.bak"
echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml"
so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules/nids
echo "Moving Suricata rules"
mkdir /opt/so/rules/nids/suri
chown socore:socore /opt/so/rules/nids/suri
mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/.
echo "Adding /nsm/elastic-fleet/artifacts to file_roots in /etc/salt/master using so-yaml"
so-yaml.py append /etc/salt/master file_roots.base /nsm/elastic-fleet/artifacts
INSTALLEDVERSION=2.4.50
}
up_to_2.4.60() {
echo "Creating directory to store Suricata classification.config"
mkdir -vp /opt/so/saltstack/local/salt/suricata/classification
chown socore:socore /opt/so/saltstack/local/salt/suricata/classification
INSTALLEDVERSION=2.4.60
}
up_to_2.4.70() {
playbook_migration
suricata_idstools_migration
toggle_telemetry
add_detection_test_pillars
INSTALLEDVERSION=2.4.70
}
up_to_2.4.80() {
phases_pillar_2_4_80
# Kafka configuration changes
# Global pipeline changes to REDIS or KAFKA
echo "Removing global.pipeline pillar configuration"
sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls
# Kafka pillars
mkdir -p /opt/so/saltstack/local/pillar/kafka
touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
kafka_cluster_id=$(get_random_value 22)
echo ' cluster_id: '$kafka_cluster_id >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
kafkapass=$(get_random_value)
echo ' password: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
INSTALLEDVERSION=2.4.80
}
up_to_2.4.90() {
kafkatrust=$(get_random_value)
# rearranging the kafka pillar to reduce clutter in SOC UI
kafkasavedpass=$(so-yaml.py get /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password)
kafkatrimpass=$(echo "$kafkasavedpass" | sed -n '1 p' )
echo "Making changes to the Kafka pillar layout"
so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.password "$kafkatrimpass"
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.trustpass "$kafkatrust"
INSTALLEDVERSION=2.4.90
}
up_to_2.4.100() {
echo "Nothing to do for 2.4.100"
INSTALLEDVERSION=2.4.100
}
up_to_2.4.110() {
echo "Nothing to do for 2.4.110"
INSTALLEDVERSION=2.4.110
}
up_to_2.4.111() {
echo "Nothing to do for 2.4.111"
INSTALLEDVERSION=2.4.111
}
up_to_2.4.120() {
add_hydra_pillars
# this is needed for the new versionlock state
mkdir -p /opt/so/saltstack/local/pillar/versionlock
touch /opt/so/saltstack/local/pillar/versionlock/adv_versionlock.sls /opt/so/saltstack/local/pillar/versionlock/soc_versionlock.sls
INSTALLEDVERSION=2.4.120
}
up_to_2.4.130() {
# Remove any old Elastic Defend config files
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
# Ensure override exists to allow nmcli access to other devices
touch /etc/NetworkManager/conf.d/10-globally-managed-devices.conf
INSTALLEDVERSION=2.4.130
}
up_to_2.4.140() {
echo "Nothing to do for 2.4.140"
INSTALLEDVERSION=2.4.140
}
up_to_2.4.141() {
echo "Nothing to do for 2.4.141"
INSTALLEDVERSION=2.4.141
}
up_to_2.4.150() {
echo "If the Detection indices exists, update the refresh_interval"
so-elasticsearch-query so-detection*/_settings -X PUT -d '{"index":{"refresh_interval":"1s"}}'
INSTALLEDVERSION=2.4.150
}
up_to_2.4.160() {
echo "Nothing to do for 2.4.160"
INSTALLEDVERSION=2.4.160
}
up_to_2.4.170() {
echo "Creating pillar files for virtualization feature"
states=("hypervisor" "vm" "libvirt")
# Create pillar files for each state
for state in "${states[@]}"; do
mkdir -p /opt/so/saltstack/local/pillar/$state
touch /opt/so/saltstack/local/pillar/$state/adv_$state.sls /opt/so/saltstack/local/pillar/$state/soc_$state.sls
done
INSTALLEDVERSION=2.4.170
}
up_to_2.4.180() {
echo "Nothing to do for 2.4.180"
INSTALLEDVERSION=2.4.180
}
up_to_2.4.190() {
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
INSTALLEDVERSION=2.4.190
}
up_to_2.4.200() {
echo "Backing up idstools config..."
suricata_idstools_removal_pre
INSTALLEDVERSION=2.4.200
}
add_hydra_pillars() {
mkdir -p /opt/so/saltstack/local/pillar/hydra
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
chmod 660 /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
touch /opt/so/saltstack/local/pillar/hydra/adv_hydra.sls
HYDRAKEY=$(get_random_value)
HYDRASALT=$(get_random_value)
printf '%s\n'\
"hydra:"\
" config:"\
" secrets:"\
" system:"\
" - '$HYDRAKEY'"\
" oidc:"\
" subject_identifiers:"\
" pairwise:"\
" salt: '$HYDRASALT'"\
"" > /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
}
add_detection_test_pillars() {
if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then
echo "Adding detection pillar values for automated testing"
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.allowRegex SecurityOnion
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.failAfterConsecutiveErrorCount 1
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.allowRegex "EquationGroup_Toolset_Apr17__ELV_.*"
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.failAfterConsecutiveErrorCount 1
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.allowRegex "(200033\\d|2100538|2102466)"
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.failAfterConsecutiveErrorCount 1
fi
}
toggle_telemetry() {
if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then
cat << ASSIST_EOF
--------------- SOC Telemetry ---------------
The Security Onion development team could use your help! Enabling SOC
Telemetry will help the team understand which UI features are being
used and enables informed prioritization of future development.
Adjust this setting at anytime via the SOC Configuration screen.
Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
ASSIST_EOF
echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "
read -r input
input=$(echo "${input,,}" | xargs echo -n)
echo ""
if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then
echo "Thank you for helping improve Security Onion!"
else
if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then
echo "Disabled SOC Telemetry."
else
fail "Failed to disable SOC Telemetry; aborting."
fi
fi
echo ""
fi
}
rollover_index() {
idx=$1
exists=$(so-elasticsearch-query $idx -o /dev/null -w "%{http_code}")
if [[ $exists -eq 200 ]]; then
rollover=$(so-elasticsearch-query $idx/_rollover -o /dev/null -w "%{http_code}" -XPOST)
if [[ $rollover -eq 200 ]]; then
echo "Successfully triggered rollover for $idx..."
else
echo "Could not trigger rollover for $idx..."
fi
else
echo "Could not find index $idx..."
fi
}
suricata_idstools_migration() {
# For 2.4.70
#Backup the pillars for idstools
mkdir -p /nsm/backup/detections-migration/idstools
rsync -av /opt/so/saltstack/local/pillar/idstools/* /nsm/backup/detections-migration/idstools
if [[ $? -eq 0 ]]; then
echo "IDStools configuration has been backed up."
else
fail "Error: rsync failed to copy the files. IDStools configuration has not been backed up."
fi
#Backup Thresholds
mkdir -p /nsm/backup/detections-migration/suricata
rsync -av /opt/so/saltstack/local/salt/suricata/thresholding /nsm/backup/detections-migration/suricata
if [[ $? -eq 0 ]]; then
echo "Suricata thresholds have been backed up."
else
fail "Error: rsync failed to copy the files. Thresholds have not been backed up."
fi
#Backup local rules
mkdir -p /nsm/backup/detections-migration/suricata/local-rules
rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules
if [[ -f /opt/so/saltstack/local/salt/idstools/rules/local.rules ]]; then
rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak
fi
#Tell SOC to migrate
mkdir -p /opt/so/conf/soc/migrations
echo "0" > /opt/so/conf/soc/migrations/suricata-migration-2.4.70
chown -R socore:socore /opt/so/conf/soc/migrations
}
playbook_migration() {
# Start SOC Detections migration
mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert}
# Remove cronjobs
crontab -l | grep -v 'so-playbook-sync_cron' | crontab -
crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab -
if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then
# Check for active Elastalert rules
active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f \( -name "*.yaml" -o -name "*.yml" \) | wc -l)
if [[ "$active_rules_count" -gt 0 ]]; then
# Prompt the user to press ENTER if active Elastalert rules found
echo
echo "$active_rules_count Active Elastalert/Playbook rules found."
echo "In preparation for the new Detections module, they will be backed up and then disabled."
echo
echo "Press ENTER to proceed."
echo
# Read user input
read -r
echo "Backing up the Elastalert rules..."
rsync -av --ignore-missing-args --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/
# Verify that rsync completed successfully
if [[ $? -eq 0 ]]; then
# Delete the Elastlaert rules
rm -f /opt/so/rules/elastalert/playbook/*.yaml
echo "Active Elastalert rules have been backed up."
else
fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up."
fi
fi
echo
echo "Exporting Sigma rules from Playbook..."
MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls)
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do
echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml"
done || fail "Failed to export Sigma rules..."
echo
echo "Exporting Sigma Filters from Playbook..."
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters."
echo
echo "Backing up Playbook database..."
docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database."
docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database."
fi
echo
echo "Stopping Playbook services & cleaning up..."
for container in so-playbook so-mysql so-soctopus; do
if [ -n "$(docker ps -q -f name=^${container}$)" ]; then
docker stop $container
fi
done
sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf
rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-*
echo
echo "Playbook Migration is complete...."
}
suricata_idstools_removal_pre() {
# For SOUPs beginning with 2.4.200 - pre SOUP checks
# Create syncBlock file
install -d -o 939 -g 939 -m 755 /opt/so/conf/soc/fingerprints
install -o 939 -g 939 -m 644 /dev/null /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF
Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs
EOF
# Backup custom rules & overrides
mkdir -p /nsm/backup/detections-migration/2-4-200
cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200
cp /opt/so/conf/idstools/etc/rulecat.conf /nsm/backup/detections-migration/2-4-200
if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then
python3 /opt/so/conf/soc/so-detections-backup.py
# Verify backup by comparing counts
echo "Verifying detection overrides backup..."
es_override_count=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -k -L \
"https://localhost:9200/so-detection/_count" \
-H "Content-Type: application/json" \
-d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count')
backup_override_count=$(find /nsm/backup/detections/repo/*/overrides -type f 2>/dev/null | wc -l)
echo " Elasticsearch overrides: $es_override_count"
echo " Backed up overrides: $backup_override_count"
if [[ "$es_override_count" -eq "$backup_override_count" ]]; then
echo " Override backup verified successfully"
else
echo " Warning: Override counts do not match"
fi
else
echo "SOC Detections backup script not found, skipping detection backup"
fi
}
suricata_idstools_removal_post() {
# For SOUPs beginning with 2.4.200 - post SOUP checks
echo "Checking idstools configuration for custom modifications..."
# Normalize file content for consistent hashing
# Args: $1 - file path
normalize_file() {
local file="$1"
if [[ ! -f "$file" ]]; then
echo "FILE_NOT_FOUND"
return 1
fi
# Strip whitespace, normalize hostname, remove blank lines
sed -E \
-e 's/^[[:space:]]+//; s/[[:space:]]+$//' \
-e '/^$/d' \
-e 's|--url=http://[^:]+:7788|--url=http://MANAGER:7788|' \
"$file"
}
# Hash normalized content
hash_file() {
local file="$1"
local normalized=$(normalize_file "$file")
if [[ "$normalized" == "FILE_NOT_FOUND" ]]; then
echo "FILE_NOT_FOUND"
return 1
fi
echo -n "$normalized" | sha256sum | awk '{print $1}'
}
# Known-default hashes
KNOWN_SO_RULE_UPDATE_HASHES=(
"8f1fe1cb65c08aab78830315b952785c7ccdcc108c5c0474f427e29d4e39ee5f" # non-Airgap
"d23ac5a962c709dcb888103effb71444df72b46009b6c426e280dbfbc7d74d40" # Airgap
)
KNOWN_RULECAT_CONF_HASHES=(
"17fc663a83b30d4ba43ac6643666b0c96343c5ea6ea833fe6a8362fe415b666b" # default
)
# Check a config file against known hashes
# Args: $1 - file path, $2 - array name of known hashes
check_config_file() {
local file="$1"
local known_hashes_array="$2"
local file_display_name=$(basename "$file")
if [[ ! -f "$file" ]]; then
echo "Warning: $file not found"
return 1
fi
echo "Hashing $file..."
local file_hash=$(hash_file "$file")
if [[ "$file_hash" == "FILE_NOT_FOUND" ]]; then
echo "Warning: Could not read $file"
return 1
fi
echo " Hash: $file_hash"
# Check if hash matches any known default
local match_found=0
local -n known_hashes=$known_hashes_array
for known_hash in "${known_hashes[@]}"; do
if [[ "$file_hash" == "$known_hash" ]]; then
match_found=1
echo " Matches known default configuration"
break
fi
done
if [[ $match_found -eq 0 ]]; then
echo "Does not match known default - custom configuration detected"
echo "Custom $file_display_name detected (hash: $file_hash)" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
return 1
fi
return 0
}
# Check so-rule-update and rulecat.conf
SO_RULE_UPDATE="/usr/sbin/so-rule-update"
RULECAT_CONF="/opt/so/conf/idstools/etc/rulecat.conf"
custom_found=0
check_config_file "$SO_RULE_UPDATE" "KNOWN_SO_RULE_UPDATE_HASHES" || custom_found=1
check_config_file "$RULECAT_CONF" "KNOWN_RULECAT_CONF_HASHES" || custom_found=1
# If no custom configs found, remove syncBlock
if [[ $custom_found -eq 0 ]]; then
echo "idstools migration completed successfully - removing Suricata engine syncBlock"
rm -f /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
else
echo "Custom idstools configuration detected - syncBlock remains in place"
echo "Review /opt/so/conf/soc/fingerprints/suricataengine.syncBlock for details"
fi
}
determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap
else
set +e
# the new elasticsearch defaults.yaml file is not yet placed in /opt/so/saltstack/default/salt/elasticsearch yet
update_elastic_agent "$UPDATE_DIR"
set -e
fi
}
update_elastic_agent_airgap() {
get_elastic_agent_vars "/tmp/soagupdate/SecurityOnion"
rsync -av /tmp/soagupdate/fleet/* /nsm/elastic-fleet/artifacts/
tar -xf "$ELASTIC_AGENT_FILE" -C "$ELASTIC_AGENT_EXPANSION_DIR"
}
verify_upgradespace() {
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
if [ "$CURRENTSPACE" -lt "10" ]; then
echo "You are low on disk space."
return 1
else
return 0
fi
}
upgrade_space() {
if ! verify_upgradespace; then
clean_dockers
if ! verify_upgradespace; then
echo "There is not enough space to perform the upgrade. Please free up space and try again"
exit 0
fi
else
echo "You have enough space for upgrade. Proceeding with soup."
fi
}
unmount_update() {
cd /tmp
umount /tmp/soagupdate
}
update_airgap_rules() {
# Copy the rules over to update them for airgap.
rsync -a $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/
rsync -a $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
rsync -a $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
# Copy the securityonion-resorces repo over for SOC Detection Summaries and checkout the published summaries branch
rsync -a --delete --chown=socore:socore $UPDATE_DIR/agrules/securityonion-resources /opt/so/conf/soc/ai_summary_repos
git config --global --add safe.directory /opt/so/conf/soc/ai_summary_repos/securityonion-resources
git -C /opt/so/conf/soc/ai_summary_repos/securityonion-resources checkout generated-summaries-published
# Copy the securityonion-resorces repo over to nsm
rsync -a $UPDATE_DIR/agrules/securityonion-resources/* /nsm/securityonion-resources/
}
update_airgap_repo() {
# Update the files in the repo
echo "Syncing new updates to /nsm/repo"
rsync -a $AGREPO/* /nsm/repo/
echo "Creating repo"
dnf -y install yum-utils createrepo_c
createrepo /nsm/repo
}
update_elasticsearch_index_settings() {
# Update managed indices to reflect latest index template
for idx in "so-detection" "so-detectionhistory" "so-case" "so-casehistory"; do
ilm_name=$idx
if [ "$idx" = "so-detectionhistory" ]; then
ilm_name="so-detection"
elif [ "$idx" = "so-casehistory" ]; then
ilm_name="so-case"
fi
JSON_STRING=$( jq -n --arg ILM_NAME "$ilm_name" '{"settings": {"index.auto_expand_replicas":"0-2","index.lifecycle.name":($ILM_NAME + "-logs")}}')
echo "Checking if index \"$idx\" exists"
exists=$(curl -K /opt/so/conf/elasticsearch/curl.config -s -o /dev/null -w "%{http_code}" -k -L -H "Content-Type: application/json" "https://localhost:9200/$idx")
if [ $exists -eq 200 ]; then
echo "$idx index found..."
echo "Updating $idx index settings"
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/$idx/_settings" -d "$JSON_STRING" -XPUT
echo -e "\n"
else
echo -e "Skipping $idx... index does not exist\n"
fi
done
}
update_import_fleet_output() {
if output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" --retry 3 --fail 2>/dev/null); then
# Update the current config of so-manager_elasticsearch output policy in place (leaving any customizations like having changed the preset value from 'balanced' to 'performance')
CAFINGERPRINT=$(openssl x509 -in /etc/pki/tls/certs/intca.crt -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
updated_policy=$(jq --arg CAFINGERPRINT "$CAFINGERPRINT" '.item | (del(.id) | .ca_trusted_fingerprint = $CAFINGERPRINT)' <<< "$output")
if curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -XPUT -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$updated_policy" --retry 3 --fail 2>/dev/null; then
echo "Successfully updated so-manager_elasticsearch fleet output policy"
else
fail "Failed to update so-manager_elasticsearch fleet output policy"
fi
fi
}
update_default_logstash_output() {
echo "Updating fleet logstash output policy grid-logstash"
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
# Keep already configured hosts for this update, subsequent host updates come from so-elastic-fleet-outputs-update
HOSTS=$(echo "$logstash_policy" | jq -r '.item.hosts')
DEFAULT_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default')
DEFAULT_MONITORING_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default_monitoring')
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
JSON_STRING=$(jq -n \
--argjson HOSTS "$HOSTS" \
--arg DEFAULT_ENABLED "$DEFAULT_ENABLED" \
--arg DEFAULT_MONITORING_ENABLED "$DEFAULT_MONITORING_ENABLED" \
--arg LOGSTASHKEY "$LOGSTASHKEY" \
--arg LOGSTASHCRT "$LOGSTASHCRT" \
--arg LOGSTASHCA "$LOGSTASHCA" \
'{"name":"grid-logstash","type":"logstash","hosts": $HOSTS,"is_default": $DEFAULT_ENABLED,"is_default_monitoring": $DEFAULT_MONITORING_ENABLED,"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }}}')
fi
if curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --retry 3 --retry-delay 10 --fail; then
echo "Successfully updated grid-logstash fleet output policy"
fi
}
update_salt_mine() {
echo "Populating the mine with mine_functions for each host."
set +e
salt \* mine.update -b 50
set -e
}
update_version() {
# Update the version to the latest
echo "Updating the Security Onion version file."
echo $NEWVERSION > /etc/soversion
echo $HOTFIXVERSION > /etc/sohotfix
sed -i "s/soversion:.*/soversion: $NEWVERSION/" /opt/so/saltstack/local/pillar/global/soc_global.sls
}
upgrade_check() {
# Let's make sure we actually need to update.
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
if [ ! -f /etc/sohotfix ]; then
touch /etc/sohotfix
fi
[[ -f /etc/sohotfix ]] && CURRENTHOTFIX=$(cat /etc/sohotfix)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "Checking to see if there are hotfixes needed"
if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
else
echo "We need to apply a hotfix"
is_hotfix=true
fi
else
is_hotfix=false
fi
}
upgrade_check_salt() {
NEWSALTVERSION=$(grep "version:" $UPDATE_DIR/salt/salt/master.defaults.yaml | grep -o "[0-9]\+\.[0-9]\+")
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
echo "Salt needs to be upgraded to $NEWSALTVERSION."
UPGRADESALT=1
fi
}
upgrade_salt() {
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If rhel family
if [[ $is_rpm ]]; then
# Check if salt-cloud is installed
if rpm -q salt-cloud &>/dev/null; then
SALT_CLOUD_INSTALLED=true
fi
# Check if salt-cloud is configured
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
SALT_CLOUD_CONFIGURED=true
fi
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt"
yum versionlock delete "salt-minion"
yum versionlock delete "salt-master"
# Remove salt-cloud versionlock if installed
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
yum versionlock delete "salt-cloud"
fi
echo "Updating Salt packages."
echo ""
set +e
# if oracle run with -r to ignore repos set by bootstrap
if [[ $OS == 'oracle' ]]; then
# Add -L flag only if salt-cloud is already installed
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -L -F -M stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
set -e
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
# Add salt-cloud versionlock if installed
if [[ $SALT_CLOUD_INSTALLED == true ]]; then
yum versionlock add "salt-cloud-0:$NEWSALTVERSION-0.*"
fi
# Else do Ubuntu things
elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
echo "Checking if Salt was upgraded."
echo ""
# Check that Salt was upgraded
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 1
else
SALTUPGRADED=true
echo "Salt upgrade success."
echo ""
fi
}
verify_latest_update_script() {
get_soup_script_hashes
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
echo "This version of the soup script is up to date. Proceeding."
else
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null
# Verify that soup scripts updated as expected
get_soup_script_hashes
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
echo "Succesfully updated soup scripts."
else
echo "There was a problem updating soup scripts. Trying to rerun script update."
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
fi
echo ""
echo "The soup script has been modified. Please run soup again to continue the upgrade."
exit 0
fi
}
# Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
salt-call state.apply elasticfleet -l info queue=True
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
elif [[ "$INSTALLEDVERSION" == "2.4.30" ]] ; then
if [[ $is_airgap -eq 0 ]]; then
update_airgap_rules
fi
if [[ -f /etc/pki/managerssl.key.old ]]; then
echo "Skipping Certificate Generation"
else
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
so-kibana-restart --force
so-kibana-api-check
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
rm -f /opt/so/state/eaintegrations.txt
salt-call state.apply ca queue=True
stop_salt_minion
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion"
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
fi
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
fi
}
failed_soup_restore_items() {
local services=("$cron_service_name" "salt-master" "salt-minion")
for SERVICE_NAME in "${services[@]}"; do
if ! systemctl is-active --quiet "$SERVICE_NAME"; then
systemctl_func "start" "$SERVICE_NAME"
fi
done
enable_highstate
masterunlock
}
main() {
trap 'check_err $?' EXIT
if [ -n "$BRANCH" ]; then
echo "SOUP will use the $BRANCH branch."
echo ""
fi
echo "### Preparing soup at $(date) ###"
echo ""
set_os
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "Checking to see if this is a manager."
echo ""
require_manager
failed_soup_restore_items
check_pillar_items
echo "Checking to see if this is an airgap install."
echo ""
check_airgap
if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then
echo "Missing file argument (-f <FILENAME>) for unattended airgap upgrade."
exit 0
fi
set_minionid
MINION_ROLE=$(lookup_role)
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
echo ""
if [[ $is_airgap -eq 0 ]]; then
# Let's mount the ISO since this is airgap
airgap_mounted
else
# if not airgap but -f was used
if [[ ! -z "$ISOLOC" ]]; then
airgap_mounted
AGDOCKER=/tmp/soagupdate/docker
fi
echo "Cloning Security Onion github repo into $UPDATE_DIR."
echo "Removing previous upgrade sources."
rm -rf $UPDATE_DIR
echo "Cloning the Security Onion Repo."
clone_to_tmp
fi
echo "Verifying we have the latest soup script."
verify_latest_update_script
echo "Let's see if we need to update Security Onion."
upgrade_check
upgrade_space
echo "Checking for Salt Master and Minion updates."
upgrade_check_salt
set -e
if [[ $is_airgap -eq 0 ]]; then
update_airgap_repo
dnf clean all
check_os_updates
elif [[ $OS == 'oracle' ]]; then
# sync remote repo down to local if not airgap
repo_sync
dnf clean all
check_os_updates
fi
if [ "$is_hotfix" == "true" ]; then
echo "Applying $HOTFIXVERSION hotfix"
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
if [[ ! "$MINION_ROLE" == "import" ]]; then
backup_old_states_pillars
fi
copy_new_files
create_local_directories "/opt/so/saltstack/default"
apply_hotfix
echo "Hotfix applied"
update_version
enable_highstate
highstate
else
echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
echo ""
systemctl_func "stop" "$cron_service_name"
echo "Updating dockers to $NEWVERSION."
if [[ $is_airgap -eq 0 ]]; then
airgap_update_dockers
# if not airgap but -f was used
elif [[ ! -z "$ISOLOC" ]]; then
airgap_update_dockers
unmount_update
else
update_registry
set +e
update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
set -e
fi
stop_salt_minion
stop_salt_master
#update_repo
# Does salt need upgraded. If so update it.
if [[ $UPGRADESALT -eq 1 ]]; then
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
# for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt
# * WARN: Not starting daemons on Debian based distributions
# is not working mostly because starting them is the default behaviour.
if [[ $is_deb ]]; then
stop_salt_minion
stop_salt_master
fi
fi
preupgrade_changes
echo ""
if [[ $is_airgap -eq 0 ]]; then
echo "Updating Rule Files to the Latest."
update_airgap_rules
echo "Updating Playbooks to the Latest."
airgap_playbooks "$UPDATE_DIR"
fi
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
if [[ ! "$MINION_ROLE" == "import" ]]; then
echo ""
echo "Creating snapshots of default and local Salt states and pillars and saving to /nsm/backup/"
backup_old_states_pillars
fi
echo ""
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
copy_new_files
echo ""
create_local_directories "/opt/so/saltstack/default"
update_version
echo ""
echo "Locking down Salt Master for upgrade at $(date +"%T.%6N")."
masterlock
systemctl_func "start" "salt-master"
# Testing that salt-master is up by checking that is it connected to itself
check_saltmaster_status
# update the salt-minion configs here and start the minion
# since highstate are disabled above, minion start should not trigger a highstate
echo ""
echo "Ensuring salt-minion configs are up-to-date."
salt-call state.apply salt.minion -l info queue=True
echo ""
# ensure the mine is updated and populated before highstates run, following the salt-master restart
update_salt_mine
if [[ $SALT_CLOUD_CONFIGURED == true && $SALTUPGRADED == true ]]; then
echo "Updating salt-cloud config to use the new Salt version"
salt-call state.apply salt.cloud.config concurrent=True
fi
enable_highstate
echo ""
echo "Running a highstate. This could take several minutes."
set +e
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
set -e
stop_salt_master
masterunlock
systemctl_func "start" "salt-master"
check_saltmaster_status
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
# Stop long-running scripts to allow potentially updated scripts to load on the next execution.
killall salt-relay.sh
# ensure the mine is updated and populated before highstates run, following the salt-master restart
update_salt_mine
highstate
check_saltmaster_status
postupgrade_changes
[[ $is_airgap -eq 0 ]] && unmount_update
echo ""
echo "Upgrade to $NEWVERSION complete."
# Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error
set +e
echo "Checking the number of minions."
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | grep -v adv_ | wc -l)
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
if [[ $is_airgap -eq 0 ]]; then
echo ""
echo "Cleaning repos on remote Security Onion nodes."
salt -C 'not *_eval and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
echo ""
fi
fi
#echo "Checking for local modifications."
#check_local_mods
echo "Checking sudoers file."
check_sudoers
systemctl_func "start" "$cron_service_name"
if [[ -n $lsl_msg ]]; then
case $lsl_msg in
'distributed')
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
echo " -> We recommend checking and adjusting the values as necessary."
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
;;
'single-node')
# We can assume the lsl_details array has been set if lsl_msg has this value
echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
echo " -> We recommend checking and adjusting the value as necessary."
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
;;
esac
fi
if [[ $NUM_MINIONS -gt 1 ]]; then
cat << EOF
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
If it looks like youre missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Sensor nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
For more information, please see $DOC_BASE_URL/soup.html#distributed-deployments.
EOF
fi
fi
if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then
cat << EOF
A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80.
If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at $DOC_BASE_URL/elasticsearch.html.
EOF
fi
# check if the FINAL_MESSAGE_QUEUE is not empty
if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then
echo "The following additional information applies specifically to your grid:"
for m in "${FINAL_MESSAGE_QUEUE[@]}"; do
echo "$m"
echo
done
fi
echo "### soup has been served at $(date) ###"
}
while getopts ":b:f:y" opt; do
case ${opt} in
b )
BATCHSIZE="$OPTARG"
if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then
echo "Batch size must be a number greater than 0."
exit 1
fi
;;
y )
if [[ ! -f /opt/so/state/yeselastic.txt ]]; then
echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
exit 1
else
UNATTENDED=true
fi
;;
f )
ISOLOC="$OPTARG"
;;
\? )
echo "Usage: soup [-b] [-y] [-f <iso location>]"
exit 1
;;
: )
echo "Invalid option: $OPTARG requires an argument"
exit 1
;;
esac
done
shift $((OPTIND - 1))
if [ -f $SOUP_LOG ]; then
CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
mv $SOUP_LOG $SOUP_LOG.$INSTALLEDVERSION.$CURRENT_TIME
fi
if [[ -z $UNATTENDED ]]; then
cat << EOF
SOUP - Security Onion UPdater
Please review the following for more information about the update process and recent updates:
$DOC_BASE_URL/soup.html
https://blog.securityonion.net
WARNING: If you run soup via an SSH session and that SSH session terminates, then any processes running in that session would terminate. You should avoid leaving soup unattended especially if the machine you are SSHing from is configured to sleep after a period of time. You might also consider using something like screen or tmux so that if your SSH session terminates, the processes will continue running on the server.
EOF
cat << EOF
Press Enter to continue or Ctrl-C to cancel.
EOF
read -r input
fi
main "$@" | tee -a $SOUP_LOG