mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
1452 lines
48 KiB
Bash
Executable File
1452 lines
48 KiB
Bash
Executable File
#!/bin/bash
|
||
|
||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||
# Elastic License 2.0.
|
||
|
||
|
||
. /usr/sbin/so-common
|
||
. /usr/sbin/so-image-common
|
||
|
||
UPDATE_DIR=/tmp/sogh/securityonion
|
||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||
INSTALLEDVERSION=$(cat /etc/soversion)
|
||
POSTVERSION=$INSTALLEDVERSION
|
||
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
||
BATCHSIZE=5
|
||
SOUP_LOG=/root/soup.log
|
||
WHATWOULDYOUSAYYAHDOHERE=soup
|
||
whiptail_title='Security Onion UPdater'
|
||
NOTIFYCUSTOMELASTICCONFIG=false
|
||
TOPFILE=/opt/so/saltstack/default/salt/top.sls
|
||
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
|
||
# used to display messages to the user at the end of soup
|
||
declare -a FINAL_MESSAGE_QUEUE=()
|
||
|
||
|
||
check_err() {
|
||
local exit_code=$1
|
||
local err_msg="Unhandled error occured, please check $SOUP_LOG for details."
|
||
|
||
[[ $ERR_HANDLED == true ]] && exit $exit_code
|
||
|
||
if [[ $exit_code -ne 0 ]]; then
|
||
|
||
set +e
|
||
failed_soup_restore_items
|
||
|
||
printf '%s' "Soup failed with error $exit_code: "
|
||
case $exit_code in
|
||
2)
|
||
echo 'No such file or directory'
|
||
;;
|
||
5)
|
||
echo 'Interrupted system call'
|
||
;;
|
||
12)
|
||
echo 'Out of memory'
|
||
;;
|
||
28)
|
||
echo 'No space left on device'
|
||
echo "Likely ran out of space on disk, please review hardware requirements for Security Onion: $DOC_BASE_URL/hardware.html"
|
||
;;
|
||
30)
|
||
echo 'Read-only file system'
|
||
;;
|
||
35)
|
||
echo 'Resource temporarily unavailable'
|
||
;;
|
||
64)
|
||
echo 'Machine is not on the network'
|
||
;;
|
||
67)
|
||
echo 'Link has been severed'
|
||
;;
|
||
100)
|
||
echo 'Network is down'
|
||
;;
|
||
101)
|
||
echo 'Network is unreachable'
|
||
;;
|
||
102)
|
||
echo 'Network reset'
|
||
;;
|
||
110)
|
||
echo 'Connection timed out'
|
||
;;
|
||
111)
|
||
echo 'Connection refused'
|
||
;;
|
||
112)
|
||
echo 'Host is down'
|
||
;;
|
||
113)
|
||
echo 'No route to host'
|
||
;;
|
||
*)
|
||
echo 'Unhandled error'
|
||
echo "$err_msg"
|
||
;;
|
||
esac
|
||
if [[ $exit_code -ge 64 && $exit_code -le 113 ]]; then
|
||
echo "$err_msg"
|
||
fi
|
||
|
||
exit $exit_code
|
||
fi
|
||
|
||
}
|
||
|
||
add_common() {
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
salt-call state.apply common queue=True
|
||
echo "Run soup one more time"
|
||
exit 0
|
||
}
|
||
|
||
airgap_mounted() {
|
||
# Let's see if the ISO is already mounted.
|
||
if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
|
||
echo "The ISO is already mounted"
|
||
else
|
||
if [[ -z $ISOLOC ]]; then
|
||
echo "This is airgap. Ask for a location."
|
||
echo ""
|
||
cat << EOF
|
||
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
|
||
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
|
||
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.
|
||
|
||
EOF
|
||
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
|
||
fi
|
||
if [[ -f $ISOLOC ]]; then
|
||
# Mounting the ISO image
|
||
mkdir -p /tmp/soagupdate
|
||
mount -t iso9660 -o loop $ISOLOC /tmp/soagupdate
|
||
# Make sure mounting was successful
|
||
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||
echo "Something went wrong trying to mount the ISO."
|
||
echo "Ensure you verify the ISO that you downloaded."
|
||
exit 0
|
||
else
|
||
echo "ISO has been mounted!"
|
||
fi
|
||
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
|
||
ln -s $ISOLOC /tmp/soagupdate
|
||
echo "Found the update content"
|
||
elif [[ -b $ISOLOC ]]; then
|
||
mkdir -p /tmp/soagupdate
|
||
mount $ISOLOC /tmp/soagupdate
|
||
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||
echo "Something went wrong trying to mount the device."
|
||
echo "Ensure you verify the ISO that you downloaded."
|
||
exit 0
|
||
else
|
||
echo "Device has been mounted!"
|
||
fi
|
||
else
|
||
echo "Could not find Security Onion ISO content at ${ISOLOC}"
|
||
echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
|
||
exit 0
|
||
fi
|
||
fi
|
||
}
|
||
|
||
airgap_update_dockers() {
|
||
if [[ $is_airgap -eq 0 ]] || [[ ! -z "$ISOLOC" ]]; then
|
||
# Let's copy the tarball
|
||
if [[ ! -f $AGDOCKER/registry.tar ]]; then
|
||
echo "Unable to locate registry. Exiting"
|
||
exit 0
|
||
else
|
||
echo "Stopping the registry docker"
|
||
docker stop so-dockerregistry
|
||
docker rm so-dockerregistry
|
||
echo "Copying the new dockers over"
|
||
tar xvf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker
|
||
echo "Add Registry back"
|
||
docker load -i "$AGDOCKER/registry_image.tar"
|
||
fi
|
||
fi
|
||
}
|
||
|
||
backup_old_states_pillars() {
|
||
|
||
tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_default_states_pillars.tar.gz /opt/so/saltstack/default/
|
||
tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_local_states_pillars.tar.gz /opt/so/saltstack/local/
|
||
|
||
}
|
||
|
||
update_registry() {
|
||
docker stop so-dockerregistry
|
||
docker rm so-dockerregistry
|
||
salt-call state.apply registry queue=True
|
||
}
|
||
|
||
check_airgap() {
|
||
# See if this is an airgap install
|
||
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}' | tr '[:upper:]' '[:lower:]')
|
||
if [[ "$AIRGAP" == "true" ]]; then
|
||
is_airgap=0
|
||
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
||
AGDOCKER=/tmp/soagupdate/docker
|
||
AGREPO=/tmp/soagupdate/minimal/Packages
|
||
else
|
||
is_airgap=1
|
||
fi
|
||
}
|
||
|
||
# {% raw %}
|
||
|
||
check_local_mods() {
|
||
local salt_local=/opt/so/saltstack/local
|
||
|
||
local_mod_arr=()
|
||
|
||
while IFS= read -r -d '' local_file; do
|
||
stripped_path=${local_file#"$salt_local"}
|
||
default_file="${DEFAULT_SALT_DIR}${stripped_path}"
|
||
if [[ -f $default_file ]]; then
|
||
file_diff=$(diff "$default_file" "$local_file" )
|
||
if [[ $(echo "$file_diff" | grep -Ec "^[<>]") -gt 0 ]]; then
|
||
local_mod_arr+=( "$local_file" )
|
||
fi
|
||
fi
|
||
done< <(find $salt_local -type f -print0)
|
||
|
||
if [[ ${#local_mod_arr} -gt 0 ]]; then
|
||
echo "Potentially breaking changes found in the following files (check ${DEFAULT_SALT_DIR} for original copy):"
|
||
for file_str in "${local_mod_arr[@]}"; do
|
||
echo " $file_str"
|
||
done
|
||
echo ""
|
||
echo "To reference this list later, check $SOUP_LOG"
|
||
sleep 10
|
||
fi
|
||
}
|
||
|
||
# {% endraw %}
|
||
|
||
check_pillar_items() {
|
||
local pillar_output=$(salt-call pillar.items -lerror --out=json)
|
||
|
||
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
|
||
if [[ "$cond" == "true" ]]; then
|
||
printf "\nThere is an issue rendering the manager's pillars. Please correct the issues in the sls files mentioned below before running SOUP again.\n\n"
|
||
jq '.local._errors[]' <<< "$pillar_output"
|
||
exit 0
|
||
else
|
||
printf "\nThe manager's pillars can be rendered. We can proceed with SOUP.\n\n"
|
||
fi
|
||
}
|
||
|
||
check_sudoers() {
|
||
if grep -q "so-setup" /etc/sudoers; then
|
||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||
fi
|
||
}
|
||
|
||
check_os_updates() {
|
||
# Check to see if there are OS updates
|
||
echo "Checking for OS updates."
|
||
NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
|
||
OSUPDATES=$(dnf -q list updates | grep -v docker | grep -v containerd | grep -v salt | grep -v Available | wc -l)
|
||
if [[ "$OSUPDATES" -gt 0 ]]; then
|
||
if [[ -z $UNATTENDED ]]; then
|
||
echo "$NEEDUPDATES"
|
||
echo ""
|
||
read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
|
||
if [[ "$confirm" == [cC] ]]; then
|
||
echo "Continuing without updating packages"
|
||
elif [[ "$confirm" == [uU] ]]; then
|
||
echo "Applying Grid Updates"
|
||
update_flag=true
|
||
else
|
||
echo "Exiting soup"
|
||
exit 0
|
||
fi
|
||
else
|
||
update_flag=true
|
||
fi
|
||
else
|
||
echo "Looks like you have an updated OS"
|
||
fi
|
||
|
||
if [[ $update_flag == true ]]; then
|
||
set +e
|
||
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
|
||
set -e
|
||
fi
|
||
}
|
||
|
||
clean_dockers() {
|
||
# Place Holder for cleaning up old docker images
|
||
echo "Trying to clean up old dockers."
|
||
docker system prune -a -f --volumes
|
||
|
||
}
|
||
|
||
clone_to_tmp() {
|
||
# Clean old files
|
||
rm -rf /tmp/sogh
|
||
# Make a temp location for the files
|
||
mkdir -p /tmp/sogh
|
||
cd /tmp/sogh
|
||
SOUP_BRANCH="-b 2.4/main"
|
||
if [ -n "$BRANCH" ]; then
|
||
SOUP_BRANCH="-b $BRANCH"
|
||
fi
|
||
git clone $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git
|
||
cd /tmp
|
||
if [ ! -f $UPDATE_DIR/VERSION ]; then
|
||
echo "Update was unable to pull from Github. Please check your Internet access."
|
||
exit 0
|
||
fi
|
||
}
|
||
|
||
disable_logstash_heavynodes() {
|
||
c=0
|
||
printf "\nChecking for heavynodes and disabling Logstash if they exist\n"
|
||
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
|
||
if [[ "$file" =~ "_heavynode.sls" && ! "$file" =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
|
||
if [ "$c" -eq 0 ]; then
|
||
c=$((c + 1))
|
||
FINAL_MESSAGE_QUEUE+=("Logstash has been disabled on all heavynodes. It can be re-enabled via Grid Configuration in SOC.")
|
||
fi
|
||
echo "Disabling Logstash for: $file"
|
||
so-yaml.py replace "$file" logstash.enabled False
|
||
fi
|
||
done
|
||
}
|
||
|
||
enable_highstate() {
|
||
echo "Enabling highstate."
|
||
salt-call state.enable highstate -l info --local
|
||
echo ""
|
||
}
|
||
|
||
get_soup_script_hashes() {
|
||
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
|
||
GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
|
||
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
|
||
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
|
||
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
|
||
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
|
||
}
|
||
|
||
highstate() {
|
||
# Run a highstate.
|
||
salt-call state.highstate -l info queue=True
|
||
}
|
||
|
||
masterlock() {
|
||
echo "Locking Salt Master"
|
||
mv -v $TOPFILE $BACKUPTOPFILE
|
||
echo "base:" > $TOPFILE
|
||
echo " $MINIONID:" >> $TOPFILE
|
||
echo " - ca" >> $TOPFILE
|
||
echo " - ssl" >> $TOPFILE
|
||
echo " - elasticsearch" >> $TOPFILE
|
||
}
|
||
|
||
masterunlock() {
|
||
if [ -f $BACKUPTOPFILE ]; then
|
||
echo "Unlocking Salt Master"
|
||
mv -v $BACKUPTOPFILE $TOPFILE
|
||
else
|
||
echo "Salt Master does not need unlocked."
|
||
fi
|
||
}
|
||
|
||
phases_pillar_2_4_80() {
|
||
echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists"
|
||
set +e
|
||
PHASES=$(so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases)
|
||
case $? in
|
||
0)
|
||
so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases
|
||
read -r -d '' msg <<- EOF
|
||
Found elasticsearch.index_settings.global_overrides.index_template.phases was set to:
|
||
${PHASES}
|
||
|
||
Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases
|
||
To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases
|
||
A backup of all pillar files was saved to /nsm/backup/
|
||
EOF
|
||
FINAL_MESSAGE_QUEUE+=("$msg")
|
||
;;
|
||
2) echo "Pillar elasticsearch.index_settings.global_overrides.index_template.phases does not exist. No action taken." ;;
|
||
*) echo "so-yaml.py returned something other than 0 or 2 exit code" ;; # we shouldn't see this
|
||
esac
|
||
set -e
|
||
}
|
||
|
||
preupgrade_changes() {
|
||
# This function is to add any new pillar items if needed.
|
||
echo "Checking to see if changes are needed."
|
||
|
||
[[ "$INSTALLEDVERSION" == 2.4.2 ]] && up_to_2.4.3
|
||
[[ "$INSTALLEDVERSION" == 2.4.3 ]] && up_to_2.4.4
|
||
[[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5
|
||
[[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10
|
||
[[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
|
||
[[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
|
||
[[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40
|
||
[[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50
|
||
[[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60
|
||
[[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
|
||
[[ "$INSTALLEDVERSION" == 2.4.70 ]] && up_to_2.4.80
|
||
[[ "$INSTALLEDVERSION" == 2.4.80 ]] && up_to_2.4.90
|
||
[[ "$INSTALLEDVERSION" == 2.4.90 ]] && up_to_2.4.100
|
||
true
|
||
}
|
||
|
||
postupgrade_changes() {
|
||
# This function is to add any new pillar items if needed.
|
||
echo "Running post upgrade processes."
|
||
|
||
[[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3
|
||
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
|
||
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
|
||
[[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
|
||
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
|
||
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
|
||
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
|
||
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
|
||
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
|
||
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
|
||
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
|
||
[[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
|
||
[[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
|
||
true
|
||
}
|
||
|
||
post_to_2.4.3() {
|
||
echo "Nothing to apply"
|
||
POSTVERSION=2.4.3
|
||
}
|
||
|
||
post_to_2.4.4() {
|
||
echo "Nothing to apply"
|
||
POSTVERSION=2.4.4
|
||
}
|
||
|
||
post_to_2.4.5() {
|
||
echo "Nothing to apply"
|
||
POSTVERSION=2.4.5
|
||
}
|
||
|
||
post_to_2.4.10() {
|
||
echo "Updating Elastic Fleet ES URLs...."
|
||
/sbin/so-elastic-fleet-es-url-update --force
|
||
POSTVERSION=2.4.10
|
||
}
|
||
|
||
post_to_2.4.20() {
|
||
echo "Pruning unused docker volumes on all nodes - This process will run in the background."
|
||
salt --async \* cmd.run "docker volume prune -f"
|
||
POSTVERSION=2.4.20
|
||
}
|
||
|
||
post_to_2.4.30() {
|
||
# there is an occasional error with this state: pki_public_ca_crt: TypeError: list indices must be integers or slices, not str
|
||
set +e
|
||
salt-call state.apply ca queue=True
|
||
set -e
|
||
stop_salt_minion
|
||
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
|
||
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
|
||
systemctl_func "start" "salt-minion"
|
||
salt-call state.apply nginx queue=True
|
||
enable_highstate
|
||
POSTVERSION=2.4.30
|
||
}
|
||
|
||
post_to_2.4.40() {
|
||
echo "Nothing to apply"
|
||
POSTVERSION=2.4.40
|
||
}
|
||
|
||
post_to_2.4.50() {
|
||
echo "Nothing to apply"
|
||
POSTVERSION=2.4.50
|
||
}
|
||
|
||
post_to_2.4.60() {
|
||
echo "Nothing to apply"
|
||
POSTVERSION=2.4.60
|
||
}
|
||
|
||
post_to_2.4.70() {
|
||
printf "\nRemoving idh.services from any existing IDH node pillar files\n"
|
||
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
|
||
if [[ $file =~ "_idh.sls" && ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
|
||
echo "Removing idh.services from: $file"
|
||
so-yaml.py remove "$file" idh.services
|
||
fi
|
||
done
|
||
POSTVERSION=2.4.70
|
||
}
|
||
|
||
post_to_2.4.80() {
|
||
echo -e "\nChecking if update to Elastic Fleet output policy is required\n"
|
||
so-kafka-fleet-output-policy
|
||
POSTVERSION=2.4.80
|
||
}
|
||
|
||
post_to_2.4.90() {
|
||
disable_logstash_heavynodes
|
||
POSTVERSION=2.4.90
|
||
}
|
||
|
||
post_to_2.4.100() {
|
||
echo "Regenerating Elastic Agent Installers"
|
||
/sbin/so-elastic-agent-gen-installers
|
||
POSTVERSION=2.4.100
|
||
}
|
||
|
||
repo_sync() {
|
||
echo "Sync the local repo."
|
||
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
||
}
|
||
|
||
stop_salt_master() {
|
||
# kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
|
||
set +e
|
||
echo ""
|
||
echo "Killing all Salt jobs across the grid."
|
||
salt \* saltutil.kill_all_jobs >> $SOUP_LOG 2>&1
|
||
echo ""
|
||
echo "Killing any queued Salt jobs on the manager."
|
||
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
|
||
|
||
echo ""
|
||
echo "Storing salt-master PID."
|
||
MASTERPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-master MainProcess')
|
||
if [ ! -z "$MASTERPID" ]; then
|
||
echo "Found salt-master PID $MASTERPID"
|
||
systemctl_func "stop" "salt-master"
|
||
if ps -p "$MASTERPID" > /dev/null 2>&1; then
|
||
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
|
||
fi
|
||
else
|
||
echo "The salt-master PID was not found. The process '/usr/bin/salt-master MainProcess' is not running."
|
||
fi
|
||
set -e
|
||
}
|
||
|
||
stop_salt_minion() {
|
||
echo "Disabling highstate to prevent from running if salt-minion restarts."
|
||
salt-call state.disable highstate -l info --local
|
||
echo ""
|
||
|
||
# kill all salt jobs before stopping salt-minion
|
||
set +e
|
||
echo ""
|
||
echo "Killing Salt jobs on this node."
|
||
salt-call saltutil.kill_all_jobs --local
|
||
|
||
echo "Storing salt-minion pid."
|
||
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
|
||
echo "Found salt-minion PID $MINIONPID"
|
||
systemctl_func "stop" "salt-minion"
|
||
|
||
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
|
||
set -e
|
||
}
|
||
|
||
|
||
up_to_2.4.3() {
|
||
echo "Nothing to do for 2.4.3"
|
||
|
||
INSTALLEDVERSION=2.4.3
|
||
}
|
||
|
||
up_to_2.4.4() {
|
||
echo "Nothing to do for 2.4.4"
|
||
|
||
INSTALLEDVERSION=2.4.4
|
||
}
|
||
|
||
up_to_2.4.5() {
|
||
echo "Nothing to do for 2.4.5"
|
||
|
||
INSTALLEDVERSION=2.4.5
|
||
}
|
||
|
||
up_to_2.4.10() {
|
||
echo "Nothing to do for 2.4.10"
|
||
|
||
INSTALLEDVERSION=2.4.10
|
||
}
|
||
|
||
up_to_2.4.20() {
|
||
echo "Nothing to do for 2.4.20"
|
||
|
||
INSTALLEDVERSION=2.4.20
|
||
}
|
||
|
||
up_to_2.4.30() {
|
||
echo "Nothing to do for 2.4.30"
|
||
|
||
INSTALLEDVERSION=2.4.30
|
||
}
|
||
|
||
up_to_2.4.40() {
|
||
echo "Removing old ATT&CK Navigator Layers..."
|
||
rm -f /opt/so/conf/navigator/layers/enterprise-attack.json
|
||
rm -f /opt/so/conf/navigator/layers/nav_layer_playbook.json
|
||
|
||
INSTALLEDVERSION=2.4.40
|
||
}
|
||
|
||
up_to_2.4.50() {
|
||
echo "Creating additional pillars.."
|
||
mkdir -p /opt/so/saltstack/local/pillar/stig/
|
||
mkdir -p /opt/so/saltstack/local/salt/stig/
|
||
chown socore:socore /opt/so/saltstack/local/salt/stig/
|
||
touch /opt/so/saltstack/local/pillar/stig/adv_stig.sls
|
||
touch /opt/so/saltstack/local/pillar/stig/soc_stig.sls
|
||
|
||
# the file_roots need to be update due to salt 3006.6 upgrade not allowing symlinks outside the file_roots
|
||
# put new so-yaml in place
|
||
echo "Updating so-yaml"
|
||
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" "$DEFAULT_SALT_DIR/salt/manager/tools/sbin/"
|
||
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" /usr/sbin/
|
||
echo "Creating a backup of the salt-master config."
|
||
# INSTALLEDVERSION is 2.4.40 at this point, but we want the backup to have the version
|
||
# so was at prior to starting upgrade. use POSTVERSION here since it doesnt change until
|
||
# post upgrade changes. POSTVERSION set to INSTALLEDVERSION at start of soup
|
||
cp -v /etc/salt/master "/etc/salt/master.so-$POSTVERSION.bak"
|
||
echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml"
|
||
so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules/nids
|
||
echo "Moving Suricata rules"
|
||
mkdir /opt/so/rules/nids/suri
|
||
chown socore:socore /opt/so/rules/nids/suri
|
||
mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/.
|
||
|
||
echo "Adding /nsm/elastic-fleet/artifacts to file_roots in /etc/salt/master using so-yaml"
|
||
so-yaml.py append /etc/salt/master file_roots.base /nsm/elastic-fleet/artifacts
|
||
|
||
INSTALLEDVERSION=2.4.50
|
||
}
|
||
|
||
up_to_2.4.60() {
|
||
echo "Creating directory to store Suricata classification.config"
|
||
mkdir -vp /opt/so/saltstack/local/salt/suricata/classification
|
||
chown socore:socore /opt/so/saltstack/local/salt/suricata/classification
|
||
|
||
INSTALLEDVERSION=2.4.60
|
||
}
|
||
|
||
up_to_2.4.70() {
|
||
playbook_migration
|
||
suricata_idstools_migration
|
||
toggle_telemetry
|
||
add_detection_test_pillars
|
||
|
||
INSTALLEDVERSION=2.4.70
|
||
}
|
||
|
||
up_to_2.4.80() {
|
||
phases_pillar_2_4_80
|
||
# Kafka configuration changes
|
||
|
||
# Global pipeline changes to REDIS or KAFKA
|
||
echo "Removing global.pipeline pillar configuration"
|
||
sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||
# Kafka pillars
|
||
mkdir -p /opt/so/saltstack/local/pillar/kafka
|
||
touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
|
||
touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
|
||
echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
|
||
kafka_cluster_id=$(get_random_value 22)
|
||
echo ' cluster_id: '$kafka_cluster_id >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
|
||
kafkapass=$(get_random_value)
|
||
echo ' password: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
|
||
|
||
INSTALLEDVERSION=2.4.80
|
||
}
|
||
|
||
up_to_2.4.90() {
|
||
kafkatrust=$(get_random_value)
|
||
# rearranging the kafka pillar to reduce clutter in SOC UI
|
||
kafkasavedpass=$(so-yaml.py get /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password)
|
||
kafkatrimpass=$(echo "$kafkasavedpass" | sed -n '1 p' )
|
||
echo "Making changes to the Kafka pillar layout"
|
||
so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password
|
||
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.password "$kafkatrimpass"
|
||
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.trustpass "$kafkatrust"
|
||
echo "If the Detection index exists, update the refresh_interval"
|
||
so-elasticsearch-query so-detection*/_settings -X PUT -d '{"index":{"refresh_interval":"1s"}}'
|
||
|
||
INSTALLEDVERSION=2.4.90
|
||
}
|
||
up_to_2.4.100() {
|
||
# Elastic Update for this release, so download Elastic Agent files
|
||
determine_elastic_agent_upgrade
|
||
INSTALLEDVERSION=2.4.100
|
||
}
|
||
|
||
add_detection_test_pillars() {
|
||
if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then
|
||
echo "Adding detection pillar values for automated testing"
|
||
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.allowRegex SecurityOnion
|
||
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.failAfterConsecutiveErrorCount 1
|
||
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.allowRegex "EquationGroup_Toolset_Apr17__ELV_.*"
|
||
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.failAfterConsecutiveErrorCount 1
|
||
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.allowRegex "(200033\\d|2100538|2102466)"
|
||
so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.failAfterConsecutiveErrorCount 1
|
||
fi
|
||
}
|
||
|
||
toggle_telemetry() {
|
||
if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then
|
||
cat << ASSIST_EOF
|
||
|
||
--------------- SOC Telemetry ---------------
|
||
|
||
The Security Onion development team could use your help! Enabling SOC
|
||
Telemetry will help the team understand which UI features are being
|
||
used and enables informed prioritization of future development.
|
||
|
||
Adjust this setting at anytime via the SOC Configuration screen.
|
||
|
||
Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
|
||
|
||
ASSIST_EOF
|
||
|
||
echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "
|
||
|
||
read -r input
|
||
input=$(echo "${input,,}" | xargs echo -n)
|
||
echo ""
|
||
if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then
|
||
echo "Thank you for helping improve Security Onion!"
|
||
else
|
||
if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then
|
||
echo "Disabled SOC Telemetry."
|
||
else
|
||
fail "Failed to disable SOC Telemetry; aborting."
|
||
fi
|
||
fi
|
||
echo ""
|
||
fi
|
||
}
|
||
|
||
suricata_idstools_migration() {
|
||
#Backup the pillars for idstools
|
||
mkdir -p /nsm/backup/detections-migration/idstools
|
||
rsync -av /opt/so/saltstack/local/pillar/idstools/* /nsm/backup/detections-migration/idstools
|
||
if [[ $? -eq 0 ]]; then
|
||
echo "IDStools configuration has been backed up."
|
||
else
|
||
fail "Error: rsync failed to copy the files. IDStools configuration has not been backed up."
|
||
fi
|
||
|
||
#Backup Thresholds
|
||
mkdir -p /nsm/backup/detections-migration/suricata
|
||
rsync -av /opt/so/saltstack/local/salt/suricata/thresholding /nsm/backup/detections-migration/suricata
|
||
if [[ $? -eq 0 ]]; then
|
||
echo "Suricata thresholds have been backed up."
|
||
else
|
||
fail "Error: rsync failed to copy the files. Thresholds have not been backed up."
|
||
fi
|
||
|
||
#Backup local rules
|
||
mkdir -p /nsm/backup/detections-migration/suricata/local-rules
|
||
rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules
|
||
if [[ -f /opt/so/saltstack/local/salt/idstools/rules/local.rules ]]; then
|
||
rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak
|
||
fi
|
||
|
||
#Tell SOC to migrate
|
||
mkdir -p /opt/so/conf/soc/migrations
|
||
echo "0" > /opt/so/conf/soc/migrations/suricata-migration-2.4.70
|
||
chown -R socore:socore /opt/so/conf/soc/migrations
|
||
}
|
||
|
||
playbook_migration() {
|
||
# Start SOC Detections migration
|
||
mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert}
|
||
|
||
# Remove cronjobs
|
||
crontab -l | grep -v 'so-playbook-sync_cron' | crontab -
|
||
crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab -
|
||
|
||
if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then
|
||
|
||
# Check for active Elastalert rules
|
||
active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f \( -name "*.yaml" -o -name "*.yml" \) | wc -l)
|
||
|
||
if [[ "$active_rules_count" -gt 0 ]]; then
|
||
# Prompt the user to press ENTER if active Elastalert rules found
|
||
echo
|
||
echo "$active_rules_count Active Elastalert/Playbook rules found."
|
||
echo "In preparation for the new Detections module, they will be backed up and then disabled."
|
||
echo
|
||
echo "Press ENTER to proceed."
|
||
echo
|
||
# Read user input
|
||
read -r
|
||
|
||
echo "Backing up the Elastalert rules..."
|
||
rsync -av --ignore-missing-args --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/
|
||
|
||
# Verify that rsync completed successfully
|
||
if [[ $? -eq 0 ]]; then
|
||
# Delete the Elastlaert rules
|
||
rm -f /opt/so/rules/elastalert/playbook/*.yaml
|
||
echo "Active Elastalert rules have been backed up."
|
||
else
|
||
fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up."
|
||
fi
|
||
fi
|
||
|
||
echo
|
||
echo "Exporting Sigma rules from Playbook..."
|
||
MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls)
|
||
|
||
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do
|
||
echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml"
|
||
done || fail "Failed to export Sigma rules..."
|
||
|
||
echo
|
||
echo "Exporting Sigma Filters from Playbook..."
|
||
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters."
|
||
|
||
echo
|
||
echo "Backing up Playbook database..."
|
||
docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database."
|
||
docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database."
|
||
fi
|
||
|
||
echo
|
||
echo "Stopping Playbook services & cleaning up..."
|
||
for container in so-playbook so-mysql so-soctopus; do
|
||
if [ -n "$(docker ps -q -f name=^${container}$)" ]; then
|
||
docker stop $container
|
||
fi
|
||
done
|
||
sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf
|
||
rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-*
|
||
|
||
echo
|
||
echo "Playbook Migration is complete...."
|
||
}
|
||
|
||
determine_elastic_agent_upgrade() {
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
update_elastic_agent_airgap
|
||
else
|
||
set +e
|
||
# the new elasticsearch defaults.yaml file is not yet placed in /opt/so/saltstack/default/salt/elasticsearch yet
|
||
update_elastic_agent "$UPDATE_DIR"
|
||
set -e
|
||
fi
|
||
}
|
||
|
||
update_elastic_agent_airgap() {
|
||
get_elastic_agent_vars "/tmp/soagupdate/SecurityOnion"
|
||
rsync -av /tmp/soagupdate/fleet/* /nsm/elastic-fleet/artifacts/
|
||
tar -xf "$ELASTIC_AGENT_FILE" -C "$ELASTIC_AGENT_EXPANSION_DIR"
|
||
}
|
||
|
||
verify_upgradespace() {
|
||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||
echo "You are low on disk space."
|
||
return 1
|
||
else
|
||
return 0
|
||
fi
|
||
}
|
||
|
||
upgrade_space() {
|
||
if ! verify_upgradespace; then
|
||
clean_dockers
|
||
if ! verify_upgradespace; then
|
||
echo "There is not enough space to perform the upgrade. Please free up space and try again"
|
||
exit 0
|
||
fi
|
||
else
|
||
echo "You have enough space for upgrade. Proceeding with soup."
|
||
fi
|
||
}
|
||
|
||
unmount_update() {
|
||
cd /tmp
|
||
umount /tmp/soagupdate
|
||
}
|
||
|
||
update_airgap_rules() {
|
||
# Copy the rules over to update them for airgap.
|
||
rsync -av $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/
|
||
rsync -av $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
|
||
rsync -av $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
|
||
# Copy the securityonion-resorces repo over for SOC Detection Summaries and checkout the published summaries branch
|
||
rsync -av --chown=socore:socore $UPDATE_DIR/agrules/securityonion-resources /opt/so/conf/soc/ai_summary_repos
|
||
git config --global --add safe.directory /opt/so/conf/soc/ai_summary_repos/securityonion-resources
|
||
git -C /opt/so/conf/soc/ai_summary_repos/securityonion-resources checkout generated-summaries-published
|
||
# Copy the securityonion-resorces repo over to nsm
|
||
rsync -av $UPDATE_DIR/agrules/securityonion-resources/* /nsm/securityonion-resources/
|
||
}
|
||
|
||
update_airgap_repo() {
|
||
# Update the files in the repo
|
||
echo "Syncing new updates to /nsm/repo"
|
||
rsync -av $AGREPO/* /nsm/repo/
|
||
echo "Creating repo"
|
||
dnf -y install yum-utils createrepo
|
||
createrepo /nsm/repo
|
||
}
|
||
|
||
update_salt_mine() {
|
||
echo "Populating the mine with mine_functions for each host."
|
||
set +e
|
||
salt \* mine.update -b 50
|
||
set -e
|
||
}
|
||
|
||
update_version() {
|
||
# Update the version to the latest
|
||
echo "Updating the Security Onion version file."
|
||
echo $NEWVERSION > /etc/soversion
|
||
echo $HOTFIXVERSION > /etc/sohotfix
|
||
sed -i "s/soversion:.*/soversion: $NEWVERSION/" /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||
}
|
||
|
||
upgrade_check() {
|
||
# Let's make sure we actually need to update.
|
||
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
|
||
HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
|
||
if [ ! -f /etc/sohotfix ]; then
|
||
touch /etc/sohotfix
|
||
fi
|
||
[[ -f /etc/sohotfix ]] && CURRENTHOTFIX=$(cat /etc/sohotfix)
|
||
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
|
||
echo "Checking to see if there are hotfixes needed"
|
||
if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
|
||
echo "You are already running the latest version of Security Onion."
|
||
exit 0
|
||
else
|
||
echo "We need to apply a hotfix"
|
||
is_hotfix=true
|
||
fi
|
||
else
|
||
is_hotfix=false
|
||
fi
|
||
|
||
}
|
||
|
||
upgrade_check_salt() {
|
||
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk '{print $2}')
|
||
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
|
||
echo "You are already running the correct version of Salt for Security Onion."
|
||
else
|
||
echo "Salt needs to be upgraded to $NEWSALTVERSION."
|
||
UPGRADESALT=1
|
||
fi
|
||
}
|
||
|
||
upgrade_salt() {
|
||
SALTUPGRADED=True
|
||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||
echo ""
|
||
# If rhel family
|
||
if [[ $is_rpm ]]; then
|
||
echo "Removing yum versionlock for Salt."
|
||
echo ""
|
||
yum versionlock delete "salt"
|
||
yum versionlock delete "salt-minion"
|
||
yum versionlock delete "salt-master"
|
||
echo "Updating Salt packages."
|
||
echo ""
|
||
set +e
|
||
# if oracle run with -r to ignore repos set by bootstrap
|
||
if [[ $OS == 'oracle' ]]; then
|
||
run_check_net_err \
|
||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
||
"Could not update salt, please check $SOUP_LOG for details."
|
||
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
|
||
else
|
||
run_check_net_err \
|
||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
||
"Could not update salt, please check $SOUP_LOG for details."
|
||
fi
|
||
set -e
|
||
echo "Applying yum versionlock for Salt."
|
||
echo ""
|
||
yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
|
||
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
|
||
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
|
||
# Else do Ubuntu things
|
||
elif [[ $is_deb ]]; then
|
||
echo "Removing apt hold for Salt."
|
||
echo ""
|
||
apt-mark unhold "salt-common"
|
||
apt-mark unhold "salt-master"
|
||
apt-mark unhold "salt-minion"
|
||
echo "Updating Salt packages."
|
||
echo ""
|
||
set +e
|
||
run_check_net_err \
|
||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
||
"Could not update salt, please check $SOUP_LOG for details."
|
||
set -e
|
||
echo "Applying apt hold for Salt."
|
||
echo ""
|
||
apt-mark hold "salt-common"
|
||
apt-mark hold "salt-master"
|
||
apt-mark hold "salt-minion"
|
||
fi
|
||
|
||
echo "Checking if Salt was upgraded."
|
||
echo ""
|
||
# Check that Salt was upgraded
|
||
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
||
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
|
||
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
|
||
echo "Once the issue is resolved, run soup again."
|
||
echo "Exiting."
|
||
echo ""
|
||
exit 1
|
||
else
|
||
echo "Salt upgrade success."
|
||
echo ""
|
||
fi
|
||
|
||
}
|
||
|
||
verify_latest_update_script() {
|
||
get_soup_script_hashes
|
||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
|
||
echo "This version of the soup script is up to date. Proceeding."
|
||
else
|
||
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
|
||
|
||
salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null
|
||
|
||
# Verify that soup scripts updated as expected
|
||
get_soup_script_hashes
|
||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
|
||
echo "Succesfully updated soup scripts."
|
||
else
|
||
echo "There was a problem updating soup scripts. Trying to rerun script update."
|
||
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
|
||
fi
|
||
|
||
echo ""
|
||
echo "The soup script has been modified. Please run soup again to continue the upgrade."
|
||
exit 0
|
||
fi
|
||
|
||
}
|
||
# Keeping this block in case we need to do a hotfix that requires salt update
|
||
apply_hotfix() {
|
||
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
|
||
salt-call state.apply elasticfleet -l info queue=True
|
||
. /usr/sbin/so-elastic-fleet-common
|
||
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
|
||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||
elif [[ "$INSTALLEDVERSION" == "2.4.30" ]] ; then
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
update_airgap_rules
|
||
fi
|
||
if [[ -f /etc/pki/managerssl.key.old ]]; then
|
||
echo "Skipping Certificate Generation"
|
||
else
|
||
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
|
||
so-kibana-restart --force
|
||
so-kibana-api-check
|
||
. /usr/sbin/so-elastic-fleet-common
|
||
|
||
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
|
||
rm -f /opt/so/state/eaintegrations.txt
|
||
salt-call state.apply ca queue=True
|
||
stop_salt_minion
|
||
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
|
||
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
|
||
systemctl_func "start" "salt-minion"
|
||
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||
fi
|
||
else
|
||
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
|
||
fi
|
||
}
|
||
|
||
failed_soup_restore_items() {
|
||
local services=("$cron_service_name" "salt-master" "salt-minion")
|
||
for SERVICE_NAME in "${services[@]}"; do
|
||
if ! systemctl is-active --quiet "$SERVICE_NAME"; then
|
||
systemctl_func "start" "$SERVICE_NAME"
|
||
fi
|
||
done
|
||
enable_highstate
|
||
masterunlock
|
||
}
|
||
|
||
#upgrade salt to 3004.1
|
||
#2_3_10_hotfix_1() {
|
||
# systemctl_func "stop" "$cron_service_name"
|
||
# # update mine items prior to stopping salt-minion and salt-master
|
||
# update_salt_mine
|
||
# stop_salt_minion
|
||
# stop_salt_master
|
||
# update_repo
|
||
# # Does salt need upgraded. If so update it.
|
||
# if [[ $UPGRADESALT -eq 1 ]]; then
|
||
# echo "Upgrading Salt"
|
||
# # Update the repo files so it can actually upgrade
|
||
# upgrade_salt
|
||
# fi
|
||
# systemctl_func "start" "salt-master"
|
||
# systemctl_func "start" "salt-minion"
|
||
# systemctl_func "start" "$cron_service_name"
|
||
|
||
#}
|
||
|
||
main() {
|
||
trap 'check_err $?' EXIT
|
||
|
||
if [ -n "$BRANCH" ]; then
|
||
echo "SOUP will use the $BRANCH branch."
|
||
echo ""
|
||
fi
|
||
|
||
echo "### Preparing soup at $(date) ###"
|
||
echo ""
|
||
set_os
|
||
|
||
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
|
||
|
||
echo "Checking to see if this is a manager."
|
||
echo ""
|
||
require_manager
|
||
|
||
failed_soup_restore_items
|
||
|
||
check_pillar_items
|
||
|
||
echo "Checking to see if this is an airgap install."
|
||
echo ""
|
||
check_airgap
|
||
if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then
|
||
echo "Missing file argument (-f <FILENAME>) for unattended airgap upgrade."
|
||
exit 0
|
||
fi
|
||
|
||
set_minionid
|
||
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
||
echo ""
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
# Let's mount the ISO since this is airgap
|
||
airgap_mounted
|
||
else
|
||
# if not airgap but -f was used
|
||
if [[ ! -z "$ISOLOC" ]]; then
|
||
airgap_mounted
|
||
AGDOCKER=/tmp/soagupdate/docker
|
||
fi
|
||
echo "Cloning Security Onion github repo into $UPDATE_DIR."
|
||
echo "Removing previous upgrade sources."
|
||
rm -rf $UPDATE_DIR
|
||
echo "Cloning the Security Onion Repo."
|
||
clone_to_tmp
|
||
fi
|
||
echo "Verifying we have the latest soup script."
|
||
verify_latest_update_script
|
||
|
||
echo "Let's see if we need to update Security Onion."
|
||
upgrade_check
|
||
upgrade_space
|
||
|
||
echo "Checking for Salt Master and Minion updates."
|
||
upgrade_check_salt
|
||
set -e
|
||
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
update_airgap_repo
|
||
dnf clean all
|
||
check_os_updates
|
||
elif [[ $OS == 'oracle' ]]; then
|
||
# sync remote repo down to local if not airgap
|
||
repo_sync
|
||
dnf clean all
|
||
check_os_updates
|
||
fi
|
||
|
||
if [ "$is_hotfix" == "true" ]; then
|
||
echo "Applying $HOTFIXVERSION hotfix"
|
||
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
|
||
if [[ ! "$MINIONID" =~ "_import" ]]; then
|
||
backup_old_states_pillars
|
||
fi
|
||
copy_new_files
|
||
create_local_directories "/opt/so/saltstack/default"
|
||
apply_hotfix
|
||
echo "Hotfix applied"
|
||
update_version
|
||
enable_highstate
|
||
highstate
|
||
else
|
||
echo ""
|
||
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
|
||
echo ""
|
||
|
||
systemctl_func "stop" "$cron_service_name"
|
||
|
||
echo "Updating dockers to $NEWVERSION."
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
airgap_update_dockers
|
||
# if not airgap but -f was used
|
||
elif [[ ! -z "$ISOLOC" ]]; then
|
||
airgap_update_dockers
|
||
unmount_update
|
||
else
|
||
update_registry
|
||
set +e
|
||
update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
|
||
set -e
|
||
fi
|
||
|
||
stop_salt_minion
|
||
|
||
stop_salt_master
|
||
|
||
#update_repo
|
||
|
||
# Does salt need upgraded. If so update it.
|
||
if [[ $UPGRADESALT -eq 1 ]]; then
|
||
echo "Upgrading Salt"
|
||
# Update the repo files so it can actually upgrade
|
||
upgrade_salt
|
||
|
||
# for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt
|
||
# * WARN: Not starting daemons on Debian based distributions
|
||
# is not working mostly because starting them is the default behaviour.
|
||
if [[ $is_deb ]]; then
|
||
stop_salt_minion
|
||
stop_salt_master
|
||
fi
|
||
fi
|
||
|
||
preupgrade_changes
|
||
echo ""
|
||
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
echo "Updating Rule Files to the Latest."
|
||
update_airgap_rules
|
||
fi
|
||
|
||
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
|
||
if [[ ! "$MINIONID" =~ "_import" ]]; then
|
||
echo ""
|
||
echo "Creating snapshots of default and local Salt states and pillars and saving to /nsm/backup/"
|
||
backup_old_states_pillars
|
||
fi
|
||
|
||
echo ""
|
||
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
|
||
copy_new_files
|
||
echo ""
|
||
create_local_directories "/opt/so/saltstack/default"
|
||
update_version
|
||
|
||
echo ""
|
||
echo "Locking down Salt Master for upgrade at $(date +"%T.%6N")."
|
||
masterlock
|
||
|
||
systemctl_func "start" "salt-master"
|
||
|
||
# Testing that salt-master is up by checking that is it connected to itself
|
||
set +e
|
||
echo "Waiting on the Salt Master service to be ready."
|
||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||
set -e
|
||
|
||
# update the salt-minion configs here and start the minion
|
||
# since highstate are disabled above, minion start should not trigger a highstate
|
||
echo ""
|
||
echo "Ensuring salt-minion configs are up-to-date."
|
||
salt-call state.apply salt.minion -l info queue=True
|
||
echo ""
|
||
|
||
# ensure the mine is updated and populated before highstates run, following the salt-master restart
|
||
update_salt_mine
|
||
|
||
enable_highstate
|
||
|
||
echo ""
|
||
echo "Running a highstate. This could take several minutes."
|
||
set +e
|
||
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||
highstate
|
||
set -e
|
||
|
||
stop_salt_master
|
||
|
||
masterunlock
|
||
|
||
systemctl_func "start" "salt-master"
|
||
|
||
set +e
|
||
echo "Waiting on the Salt Master service to be ready."
|
||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||
set -e
|
||
|
||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||
highstate
|
||
postupgrade_changes
|
||
[[ $is_airgap -eq 0 ]] && unmount_update
|
||
|
||
echo ""
|
||
echo "Upgrade to $NEWVERSION complete."
|
||
|
||
# Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error
|
||
set +e
|
||
|
||
echo "Checking the number of minions."
|
||
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | grep -v adv_ | wc -l)
|
||
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
echo ""
|
||
echo "Cleaning repos on remote Security Onion nodes."
|
||
salt -C 'not *_eval and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
|
||
echo ""
|
||
fi
|
||
fi
|
||
|
||
#echo "Checking for local modifications."
|
||
#check_local_mods
|
||
|
||
echo "Checking sudoers file."
|
||
check_sudoers
|
||
|
||
systemctl_func "start" "$cron_service_name"
|
||
|
||
if [[ -n $lsl_msg ]]; then
|
||
case $lsl_msg in
|
||
'distributed')
|
||
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
|
||
echo " -> We recommend checking and adjusting the values as necessary."
|
||
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
|
||
;;
|
||
'single-node')
|
||
# We can assume the lsl_details array has been set if lsl_msg has this value
|
||
echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
|
||
echo " -> We recommend checking and adjusting the value as necessary."
|
||
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
|
||
;;
|
||
esac
|
||
fi
|
||
|
||
if [[ $NUM_MINIONS -gt 1 ]]; then
|
||
|
||
cat << EOF
|
||
|
||
|
||
|
||
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
|
||
|
||
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
||
|
||
If it looks like you’re missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
|
||
|
||
For more information, please see $DOC_BASE_URL/soup.html#distributed-deployments.
|
||
|
||
EOF
|
||
|
||
fi
|
||
fi
|
||
|
||
if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then
|
||
|
||
cat << EOF
|
||
|
||
|
||
A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80.
|
||
|
||
If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at $DOC_BASE_URL/elasticsearch.html.
|
||
|
||
EOF
|
||
|
||
fi
|
||
|
||
# check if the FINAL_MESSAGE_QUEUE is not empty
|
||
if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then
|
||
echo "The following additional information applies specifically to your grid:"
|
||
for m in "${FINAL_MESSAGE_QUEUE[@]}"; do
|
||
echo "$m"
|
||
echo
|
||
done
|
||
fi
|
||
|
||
echo "### soup has been served at $(date) ###"
|
||
}
|
||
|
||
while getopts ":b:f:y" opt; do
|
||
case ${opt} in
|
||
b )
|
||
BATCHSIZE="$OPTARG"
|
||
if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then
|
||
echo "Batch size must be a number greater than 0."
|
||
exit 1
|
||
fi
|
||
;;
|
||
y )
|
||
if [[ ! -f /opt/so/state/yeselastic.txt ]]; then
|
||
echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
|
||
exit 1
|
||
else
|
||
UNATTENDED=true
|
||
fi
|
||
;;
|
||
f )
|
||
ISOLOC="$OPTARG"
|
||
;;
|
||
\? )
|
||
echo "Usage: soup [-b] [-y] [-f <iso location>]"
|
||
exit 1
|
||
;;
|
||
: )
|
||
echo "Invalid option: $OPTARG requires an argument"
|
||
exit 1
|
||
;;
|
||
esac
|
||
done
|
||
shift $((OPTIND - 1))
|
||
|
||
if [ -f $SOUP_LOG ]; then
|
||
CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
|
||
mv $SOUP_LOG $SOUP_LOG.$INSTALLEDVERSION.$CURRENT_TIME
|
||
fi
|
||
|
||
if [[ -z $UNATTENDED ]]; then
|
||
cat << EOF
|
||
|
||
SOUP - Security Onion UPdater
|
||
|
||
Please review the following for more information about the update process and recent updates:
|
||
$DOC_BASE_URL/soup.html
|
||
https://blog.securityonion.net
|
||
|
||
WARNING: If you run soup via an SSH session and that SSH session terminates, then any processes running in that session would terminate. You should avoid leaving soup unattended especially if the machine you are SSHing from is configured to sleep after a period of time. You might also consider using something like screen or tmux so that if your SSH session terminates, the processes will continue running on the server.
|
||
|
||
EOF
|
||
|
||
cat << EOF
|
||
Press Enter to continue or Ctrl-C to cancel.
|
||
EOF
|
||
|
||
read -r input
|
||
fi
|
||
|
||
main "$@" | tee -a $SOUP_LOG
|