mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
1067 lines
32 KiB
Bash
Executable File
1067 lines
32 KiB
Bash
Executable File
#!/bin/bash
|
||
|
||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||
# Elastic License 2.0.
|
||
|
||
|
||
. /usr/sbin/so-common
|
||
. /usr/sbin/so-image-common
|
||
|
||
UPDATE_DIR=/tmp/sogh/securityonion
|
||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||
INSTALLEDVERSION=$(cat /etc/soversion)
|
||
POSTVERSION=$INSTALLEDVERSION
|
||
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
||
BATCHSIZE=5
|
||
SOUP_LOG=/root/soup.log
|
||
WHATWOULDYOUSAYYAHDOHERE=soup
|
||
whiptail_title='Security Onion UPdater'
|
||
NOTIFYCUSTOMELASTICCONFIG=false
|
||
|
||
check_err() {
|
||
local exit_code=$1
|
||
local err_msg="Unhandled error occured, please check $SOUP_LOG for details."
|
||
|
||
[[ $ERR_HANDLED == true ]] && exit $exit_code
|
||
|
||
if [[ $exit_code -ne 0 ]]; then
|
||
|
||
set +e
|
||
systemctl_func "start" "$cron_service_name"
|
||
systemctl_func "start" "salt-master"
|
||
systemctl_func "start" "salt-minion"
|
||
enable_highstate
|
||
|
||
printf '%s' "Soup failed with error $exit_code: "
|
||
case $exit_code in
|
||
2)
|
||
echo 'No such file or directory'
|
||
;;
|
||
5)
|
||
echo 'Interrupted system call'
|
||
;;
|
||
12)
|
||
echo 'Out of memory'
|
||
;;
|
||
28)
|
||
echo 'No space left on device'
|
||
echo "Likely ran out of space on disk, please review hardware requirements for Security Onion: $DOC_BASE_URL/hardware.html"
|
||
;;
|
||
30)
|
||
echo 'Read-only file system'
|
||
;;
|
||
35)
|
||
echo 'Resource temporarily unavailable'
|
||
;;
|
||
64)
|
||
echo 'Machine is not on the network'
|
||
;;
|
||
67)
|
||
echo 'Link has been severed'
|
||
;;
|
||
100)
|
||
echo 'Network is down'
|
||
;;
|
||
101)
|
||
echo 'Network is unreachable'
|
||
;;
|
||
102)
|
||
echo 'Network reset'
|
||
;;
|
||
110)
|
||
echo 'Connection timed out'
|
||
;;
|
||
111)
|
||
echo 'Connection refused'
|
||
;;
|
||
112)
|
||
echo 'Host is down'
|
||
;;
|
||
113)
|
||
echo 'No route to host'
|
||
;;
|
||
*)
|
||
echo 'Unhandled error'
|
||
echo "$err_msg"
|
||
;;
|
||
esac
|
||
if [[ $exit_code -ge 64 && $exit_code -le 113 ]]; then
|
||
echo "$err_msg"
|
||
fi
|
||
|
||
exit $exit_code
|
||
fi
|
||
|
||
}
|
||
|
||
add_common() {
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
salt-call state.apply common queue=True
|
||
echo "Run soup one more time"
|
||
exit 0
|
||
}
|
||
|
||
airgap_mounted() {
|
||
# Let's see if the ISO is already mounted.
|
||
if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
|
||
echo "The ISO is already mounted"
|
||
else
|
||
if [[ -z $ISOLOC ]]; then
|
||
echo "This is airgap. Ask for a location."
|
||
echo ""
|
||
cat << EOF
|
||
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
|
||
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
|
||
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.
|
||
|
||
EOF
|
||
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
|
||
fi
|
||
if [[ -f $ISOLOC ]]; then
|
||
# Mounting the ISO image
|
||
mkdir -p /tmp/soagupdate
|
||
mount -t iso9660 -o loop $ISOLOC /tmp/soagupdate
|
||
# Make sure mounting was successful
|
||
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||
echo "Something went wrong trying to mount the ISO."
|
||
echo "Ensure you verify the ISO that you downloaded."
|
||
exit 0
|
||
else
|
||
echo "ISO has been mounted!"
|
||
fi
|
||
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
|
||
ln -s $ISOLOC /tmp/soagupdate
|
||
echo "Found the update content"
|
||
elif [[ -b $ISOLOC ]]; then
|
||
mkdir -p /tmp/soagupdate
|
||
mount $ISOLOC /tmp/soagupdate
|
||
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||
echo "Something went wrong trying to mount the device."
|
||
echo "Ensure you verify the ISO that you downloaded."
|
||
exit 0
|
||
else
|
||
echo "Device has been mounted!"
|
||
fi
|
||
else
|
||
echo "Could not find Security Onion ISO content at ${ISOLOC}"
|
||
echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
|
||
exit 0
|
||
fi
|
||
fi
|
||
}
|
||
|
||
airgap_update_dockers() {
|
||
if [[ $is_airgap -eq 0 ]] || [[ ! -z "$ISOLOC" ]]; then
|
||
# Let's copy the tarball
|
||
if [[ ! -f $AGDOCKER/registry.tar ]]; then
|
||
echo "Unable to locate registry. Exiting"
|
||
exit 0
|
||
else
|
||
echo "Stopping the registry docker"
|
||
docker stop so-dockerregistry
|
||
docker rm so-dockerregistry
|
||
echo "Copying the new dockers over"
|
||
tar xvf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker
|
||
echo "Add Registry back"
|
||
docker load -i "$AGDOCKER/registry_image.tar"
|
||
fi
|
||
fi
|
||
}
|
||
|
||
backup_old_states_pillars() {
|
||
|
||
tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_default_states_pillars.tar.gz /opt/so/saltstack/default/
|
||
tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_local_states_pillars.tar.gz /opt/so/saltstack/local/
|
||
|
||
}
|
||
|
||
update_registry() {
|
||
docker stop so-dockerregistry
|
||
docker rm so-dockerregistry
|
||
salt-call state.apply registry queue=True
|
||
}
|
||
|
||
check_airgap() {
|
||
# See if this is an airgap install
|
||
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}' | tr '[:upper:]' '[:lower:]')
|
||
if [[ "$AIRGAP" == "true" ]]; then
|
||
is_airgap=0
|
||
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
||
AGDOCKER=/tmp/soagupdate/docker
|
||
AGREPO=/tmp/soagupdate/minimal/Packages
|
||
else
|
||
is_airgap=1
|
||
fi
|
||
}
|
||
|
||
# {% raw %}
|
||
|
||
check_local_mods() {
|
||
local salt_local=/opt/so/saltstack/local
|
||
|
||
local_mod_arr=()
|
||
|
||
while IFS= read -r -d '' local_file; do
|
||
stripped_path=${local_file#"$salt_local"}
|
||
default_file="${DEFAULT_SALT_DIR}${stripped_path}"
|
||
if [[ -f $default_file ]]; then
|
||
file_diff=$(diff "$default_file" "$local_file" )
|
||
if [[ $(echo "$file_diff" | grep -Ec "^[<>]") -gt 0 ]]; then
|
||
local_mod_arr+=( "$local_file" )
|
||
fi
|
||
fi
|
||
done< <(find $salt_local -type f -print0)
|
||
|
||
if [[ ${#local_mod_arr} -gt 0 ]]; then
|
||
echo "Potentially breaking changes found in the following files (check ${DEFAULT_SALT_DIR} for original copy):"
|
||
for file_str in "${local_mod_arr[@]}"; do
|
||
echo " $file_str"
|
||
done
|
||
echo ""
|
||
echo "To reference this list later, check $SOUP_LOG"
|
||
sleep 10
|
||
fi
|
||
}
|
||
|
||
# {% endraw %}
|
||
|
||
check_pillar_items() {
|
||
local pillar_output=$(salt-call pillar.items --out=json)
|
||
|
||
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
|
||
if [[ "$cond" == "true" ]]; then
|
||
printf "\nThere is an issue rendering the manager's pillars. Please correct the issues in the sls files mentioned below before running SOUP again.\n\n"
|
||
jq '.local._errors[]' <<< "$pillar_output"
|
||
exit 0
|
||
else
|
||
printf "\nThe manager's pillars can be rendered. We can proceed with SOUP.\n\n"
|
||
fi
|
||
}
|
||
|
||
check_sudoers() {
|
||
if grep -q "so-setup" /etc/sudoers; then
|
||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||
fi
|
||
}
|
||
|
||
check_log_size_limit() {
|
||
local num_minion_pillars
|
||
num_minion_pillars=$(find /opt/so/saltstack/local/pillar/minions/ -type f | wc -l)
|
||
|
||
if [[ $num_minion_pillars -gt 1 ]]; then
|
||
if find /opt/so/saltstack/local/pillar/minions/ -type f | grep -q "_heavynode"; then
|
||
lsl_msg='distributed'
|
||
fi
|
||
else
|
||
local minion_id
|
||
minion_id=$(lookup_salt_value "id" "" "grains" "" "local")
|
||
|
||
local minion_arr
|
||
IFS='_' read -ra minion_arr <<< "$minion_id"
|
||
|
||
local node_type="${minion_arr[0]}"
|
||
|
||
local current_limit
|
||
# since it is possible for the salt-master service to be stopped when this is run, we need to check the pillar values locally
|
||
# we need to combine default local and default pillars before doing this so we can define --pillar-root in salt-call
|
||
local epoch_date=$(date +%s%N)
|
||
mkdir -vp /opt/so/saltstack/soup_tmp_${epoch_date}/
|
||
cp -r /opt/so/saltstack/default/pillar/ /opt/so/saltstack/soup_tmp_${epoch_date}/
|
||
# use \cp here to overwrite any pillar files from default with those in local for the tmp directory
|
||
\cp -r /opt/so/saltstack/local/pillar/ /opt/so/saltstack/soup_tmp_${epoch_date}/
|
||
current_limit=$(salt-call pillar.get elasticsearch:log_size_limit --local --pillar-root=/opt/so/saltstack/soup_tmp_${epoch_date}/pillar --out=newline_values_only)
|
||
rm -rf /opt/so/saltstack/soup_tmp_${epoch_date}/
|
||
|
||
local percent
|
||
case $node_type in
|
||
'standalone' | 'eval')
|
||
percent=50
|
||
;;
|
||
*)
|
||
percent=80
|
||
;;
|
||
esac
|
||
|
||
local disk_dir="/"
|
||
if [ -d /nsm ]; then
|
||
disk_dir="/nsm"
|
||
fi
|
||
|
||
local disk_size_1k
|
||
disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
|
||
|
||
local ratio="1048576"
|
||
|
||
local disk_size_gb
|
||
disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
|
||
|
||
local new_limit
|
||
new_limit=$( echo "$disk_size_gb" "$percent" | awk '{printf("%.0f", $1 * ($2/100))}')
|
||
|
||
if [[ $current_limit != "$new_limit" ]]; then
|
||
lsl_msg='single-node'
|
||
lsl_details=( "$current_limit" "$new_limit" "$minion_id" )
|
||
fi
|
||
fi
|
||
}
|
||
|
||
check_os_updates() {
|
||
# Check to see if there are OS updates
|
||
echo "Checking for OS updates."
|
||
NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
|
||
OSUPDATES=$(dnf -q list updates | grep -v docker | grep -v containerd | grep -v salt | grep -v Available | wc -l)
|
||
if [[ "$OSUPDATES" -gt 0 ]]; then
|
||
if [[ -z $UNATTENDED ]]; then
|
||
echo "$NEEDUPDATES"
|
||
echo ""
|
||
read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
|
||
if [[ "$confirm" == [cC] ]]; then
|
||
echo "Continuing without updating packages"
|
||
elif [[ "$confirm" == [uU] ]]; then
|
||
echo "Applying Grid Updates"
|
||
update_flag=true
|
||
else
|
||
echo "Exiting soup"
|
||
exit 0
|
||
fi
|
||
else
|
||
update_flag=true
|
||
fi
|
||
else
|
||
echo "Looks like you have an updated OS"
|
||
fi
|
||
|
||
if [[ $update_flag == true ]]; then
|
||
set +e
|
||
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
|
||
set -e
|
||
fi
|
||
}
|
||
|
||
clean_dockers() {
|
||
# Place Holder for cleaning up old docker images
|
||
echo "Trying to clean up old dockers."
|
||
docker system prune -a -f --volumes
|
||
|
||
}
|
||
|
||
clone_to_tmp() {
|
||
# Clean old files
|
||
rm -rf /tmp/sogh
|
||
# Make a temp location for the files
|
||
mkdir -p /tmp/sogh
|
||
cd /tmp/sogh
|
||
SOUP_BRANCH="-b 2.4/main"
|
||
if [ -n "$BRANCH" ]; then
|
||
SOUP_BRANCH="-b $BRANCH"
|
||
fi
|
||
git clone $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git
|
||
cd /tmp
|
||
if [ ! -f $UPDATE_DIR/VERSION ]; then
|
||
echo "Update was unable to pull from Github. Please check your Internet access."
|
||
exit 0
|
||
fi
|
||
}
|
||
|
||
enable_highstate() {
|
||
echo "Enabling highstate."
|
||
salt-call state.enable highstate -l info --local
|
||
echo ""
|
||
}
|
||
|
||
highstate() {
|
||
# Run a highstate.
|
||
salt-call state.highstate -l info queue=True
|
||
}
|
||
|
||
masterlock() {
|
||
echo "Locking Salt Master"
|
||
TOPFILE=/opt/so/saltstack/default/salt/top.sls
|
||
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
|
||
mv -v $TOPFILE $BACKUPTOPFILE
|
||
echo "base:" > $TOPFILE
|
||
echo " $MINIONID:" >> $TOPFILE
|
||
echo " - ca" >> $TOPFILE
|
||
echo " - ssl" >> $TOPFILE
|
||
echo " - elasticsearch" >> $TOPFILE
|
||
}
|
||
|
||
masterunlock() {
|
||
echo "Unlocking Salt Master"
|
||
mv -v $BACKUPTOPFILE $TOPFILE
|
||
}
|
||
|
||
preupgrade_changes() {
|
||
# This function is to add any new pillar items if needed.
|
||
echo "Checking to see if changes are needed."
|
||
|
||
[[ "$INSTALLEDVERSION" == 2.4.2 ]] && up_to_2.4.3
|
||
[[ "$INSTALLEDVERSION" == 2.4.3 ]] && up_to_2.4.4
|
||
[[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5
|
||
[[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10
|
||
[[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
|
||
[[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
|
||
true
|
||
}
|
||
|
||
postupgrade_changes() {
|
||
# This function is to add any new pillar items if needed.
|
||
echo "Running post upgrade processes."
|
||
|
||
[[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3
|
||
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
|
||
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
|
||
[[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
|
||
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
|
||
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
|
||
true
|
||
}
|
||
|
||
post_to_2.4.3() {
|
||
echo "Nothing to apply"
|
||
POSTVERSION=2.4.3
|
||
}
|
||
|
||
post_to_2.4.4() {
|
||
echo "Nothing to apply"
|
||
POSTVERSION=2.4.4
|
||
}
|
||
|
||
post_to_2.4.5() {
|
||
echo "Regenerating Elastic Agent Installers"
|
||
/sbin/so-elastic-agent-gen-installers
|
||
POSTVERSION=2.4.5
|
||
}
|
||
|
||
post_to_2.4.10() {
|
||
echo "Updating Elastic Fleet ES URLs...."
|
||
/sbin/so-elastic-fleet-es-url-update --force
|
||
POSTVERSION=2.4.10
|
||
}
|
||
|
||
post_to_2.4.20() {
|
||
echo "Pruning unused docker volumes on all nodes - This process will run in the background."
|
||
salt --async \* cmd.run "docker volume prune -f"
|
||
POSTVERSION=2.4.20
|
||
}
|
||
|
||
post_to_2.4.30() {
|
||
echo "Nothing to apply"
|
||
POSTVERSION=2.4.30
|
||
}
|
||
|
||
repo_sync() {
|
||
echo "Sync the local repo."
|
||
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
||
}
|
||
|
||
stop_salt_master() {
|
||
# kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
|
||
set +e
|
||
echo ""
|
||
echo "Killing all Salt jobs across the grid."
|
||
salt \* saltutil.kill_all_jobs >> $SOUP_LOG 2>&1
|
||
echo ""
|
||
echo "Killing any queued Salt jobs on the manager."
|
||
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
|
||
set -e
|
||
|
||
echo ""
|
||
echo "Storing salt-master pid."
|
||
MASTERPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-master MainProcess')
|
||
echo "Found salt-master PID $MASTERPID"
|
||
systemctl_func "stop" "salt-master"
|
||
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
|
||
}
|
||
|
||
stop_salt_minion() {
|
||
echo "Disabling highstate to prevent from running if salt-minion restarts."
|
||
salt-call state.disable highstate -l info --local
|
||
echo ""
|
||
|
||
# kill all salt jobs before stopping salt-minion
|
||
set +e
|
||
echo ""
|
||
echo "Killing Salt jobs on this node."
|
||
salt-call saltutil.kill_all_jobs --local
|
||
set -e
|
||
|
||
echo "Storing salt-minion pid."
|
||
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
|
||
echo "Found salt-minion PID $MINIONPID"
|
||
systemctl_func "stop" "salt-minion"
|
||
|
||
set +e
|
||
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
|
||
set -e
|
||
}
|
||
|
||
|
||
up_to_2.4.3() {
|
||
echo "Nothing to do for 2.4.3"
|
||
|
||
INSTALLEDVERSION=2.4.3
|
||
}
|
||
|
||
up_to_2.4.4() {
|
||
echo "Nothing to do for 2.4.4"
|
||
|
||
INSTALLEDVERSION=2.4.4
|
||
}
|
||
|
||
up_to_2.4.5() {
|
||
determine_elastic_agent_upgrade
|
||
|
||
INSTALLEDVERSION=2.4.5
|
||
}
|
||
|
||
up_to_2.4.10() {
|
||
echo "Nothing to do for 2.4.10"
|
||
|
||
INSTALLEDVERSION=2.4.10
|
||
}
|
||
|
||
up_to_2.4.20() {
|
||
echo "Nothing to do for 2.4.20"
|
||
|
||
INSTALLEDVERSION=2.4.20
|
||
}
|
||
|
||
up_to_2.4.30() {
|
||
echo "Nothing to do for 2.4.30"
|
||
|
||
INSTALLEDVERSION=2.4.30
|
||
}
|
||
|
||
determine_elastic_agent_upgrade() {
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
update_elastic_agent_airgap
|
||
else
|
||
update_elastic_agent
|
||
fi
|
||
}
|
||
|
||
update_elastic_agent_airgap() {
|
||
rsync -av /tmp/soagupdate/fleet/* /nsm/elastic-fleet/artifacts/
|
||
tar -xf "$ELASTIC_AGENT_FILE" -C "$ELASTIC_AGENT_EXPANSION_DIR"
|
||
}
|
||
|
||
verify_upgradespace() {
|
||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||
echo "You are low on disk space."
|
||
return 1
|
||
else
|
||
return 0
|
||
fi
|
||
}
|
||
|
||
upgrade_space() {
|
||
if ! verify_upgradespace; then
|
||
clean_dockers
|
||
if ! verify_upgradespace; then
|
||
echo "There is not enough space to perform the upgrade. Please free up space and try again"
|
||
exit 0
|
||
fi
|
||
else
|
||
echo "You have enough space for upgrade. Proceeding with soup."
|
||
fi
|
||
}
|
||
|
||
unmount_update() {
|
||
cd /tmp
|
||
umount /tmp/soagupdate
|
||
}
|
||
|
||
update_airgap_rules() {
|
||
# Copy the rules over to update them for airgap.
|
||
rsync -av $UPDATE_DIR/agrules/* /nsm/repo/rules/
|
||
}
|
||
|
||
update_centos_repo() {
|
||
# Update the files in the repo
|
||
echo "Syncing new updates to /nsm/repo"
|
||
rsync -av $AGREPO/* /nsm/repo/
|
||
echo "Creating repo"
|
||
dnf -y install yum-utils createrepo
|
||
createrepo /nsm/repo
|
||
}
|
||
|
||
update_salt_mine() {
|
||
echo "Populating the mine with mine_functions for each host."
|
||
set +e
|
||
salt \* mine.update -b 50
|
||
set -e
|
||
}
|
||
|
||
update_version() {
|
||
# Update the version to the latest
|
||
echo "Updating the Security Onion version file."
|
||
echo $NEWVERSION > /etc/soversion
|
||
echo $HOTFIXVERSION > /etc/sohotfix
|
||
sed -i "s/soversion:.*/soversion: $NEWVERSION/" /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||
}
|
||
|
||
upgrade_check() {
|
||
# Let's make sure we actually need to update.
|
||
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
|
||
HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
|
||
if [ ! -f /etc/sohotfix ]; then
|
||
touch /etc/sohotfix
|
||
fi
|
||
[[ -f /etc/sohotfix ]] && CURRENTHOTFIX=$(cat /etc/sohotfix)
|
||
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
|
||
echo "Checking to see if there are hotfixes needed"
|
||
if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
|
||
echo "You are already running the latest version of Security Onion."
|
||
exit 0
|
||
else
|
||
echo "We need to apply a hotfix"
|
||
is_hotfix=true
|
||
fi
|
||
else
|
||
is_hotfix=false
|
||
fi
|
||
|
||
}
|
||
|
||
upgrade_check_salt() {
|
||
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk '{print $2}')
|
||
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
|
||
echo "You are already running the correct version of Salt for Security Onion."
|
||
else
|
||
UPGRADESALT=1
|
||
fi
|
||
}
|
||
|
||
upgrade_salt() {
|
||
SALTUPGRADED=True
|
||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||
echo ""
|
||
# If CentOS
|
||
if [[ $OS == 'centos' ]]; then
|
||
echo "Removing yum versionlock for Salt."
|
||
echo ""
|
||
yum versionlock delete "salt-*"
|
||
echo "Updating Salt packages."
|
||
echo ""
|
||
set +e
|
||
run_check_net_err \
|
||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
||
"Could not update salt, please check $SOUP_LOG for details."
|
||
set -e
|
||
echo "Applying yum versionlock for Salt."
|
||
echo ""
|
||
yum versionlock add "salt-*"
|
||
# Else do Ubuntu things
|
||
fi
|
||
|
||
echo "Checking if Salt was upgraded."
|
||
echo ""
|
||
# Check that Salt was upgraded
|
||
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
||
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
|
||
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
|
||
echo "Once the issue is resolved, run soup again."
|
||
echo "Exiting."
|
||
echo ""
|
||
exit 0
|
||
else
|
||
echo "Salt upgrade success."
|
||
echo ""
|
||
fi
|
||
|
||
}
|
||
|
||
verify_latest_update_script() {
|
||
# Check to see if the update scripts match. If not run the new one.
|
||
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
|
||
GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
|
||
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
|
||
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
|
||
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
|
||
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
|
||
|
||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
|
||
echo "This version of the soup script is up to date. Proceeding."
|
||
else
|
||
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
|
||
cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
|
||
echo ""
|
||
echo "The soup script has been modified. Please run soup again to continue the upgrade."
|
||
exit 0
|
||
fi
|
||
}
|
||
|
||
# Keeping this block in case we need to do a hotfix that requires salt update
|
||
apply_hotfix() {
|
||
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
|
||
salt-call state.apply elasticfleet -l info queue=True
|
||
. /usr/sbin/so-elastic-fleet-common
|
||
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
|
||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||
# elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
|
||
# 2_3_10_hotfix_1
|
||
else
|
||
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
|
||
fi
|
||
}
|
||
|
||
|
||
#upgrade salt to 3004.1
|
||
#2_3_10_hotfix_1() {
|
||
# systemctl_func "stop" "$cron_service_name"
|
||
# # update mine items prior to stopping salt-minion and salt-master
|
||
# update_salt_mine
|
||
# stop_salt_minion
|
||
# stop_salt_master
|
||
# update_repo
|
||
# # Does salt need upgraded. If so update it.
|
||
# if [[ $UPGRADESALT -eq 1 ]]; then
|
||
# echo "Upgrading Salt"
|
||
# # Update the repo files so it can actually upgrade
|
||
# upgrade_salt
|
||
# fi
|
||
# systemctl_func "start" "salt-master"
|
||
# systemctl_func "start" "salt-minion"
|
||
# systemctl_func "start" "$cron_service_name"
|
||
|
||
#}
|
||
|
||
main() {
|
||
trap 'check_err $?' EXIT
|
||
|
||
if [ -n "$BRANCH" ]; then
|
||
echo "SOUP will use the $BRANCH branch."
|
||
echo ""
|
||
fi
|
||
|
||
echo "### Preparing soup at $(date) ###"
|
||
echo ""
|
||
|
||
set_os
|
||
|
||
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
|
||
|
||
echo "Checking to see if this is a manager."
|
||
echo ""
|
||
require_manager
|
||
|
||
check_pillar_items
|
||
|
||
echo "Checking to see if this is an airgap install."
|
||
echo ""
|
||
check_airgap
|
||
if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then
|
||
echo "Missing file argument (-f <FILENAME>) for unattended airgap upgrade."
|
||
exit 0
|
||
fi
|
||
|
||
set_minionid
|
||
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
||
echo ""
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
# Let's mount the ISO since this is airgap
|
||
airgap_mounted
|
||
else
|
||
# if not airgap but -f was used
|
||
if [[ ! -z "$ISOLOC" ]]; then
|
||
airgap_mounted
|
||
AGDOCKER=/tmp/soagupdate/docker
|
||
fi
|
||
echo "Cloning Security Onion github repo into $UPDATE_DIR."
|
||
echo "Removing previous upgrade sources."
|
||
rm -rf $UPDATE_DIR
|
||
echo "Cloning the Security Onion Repo."
|
||
clone_to_tmp
|
||
fi
|
||
echo "Verifying we have the latest soup script."
|
||
verify_latest_update_script
|
||
|
||
echo "Let's see if we need to update Security Onion."
|
||
upgrade_check
|
||
upgrade_space
|
||
|
||
echo "Checking for Salt Master and Minion updates."
|
||
upgrade_check_salt
|
||
set -e
|
||
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
yum clean all
|
||
check_os_updates
|
||
elif [[ $OS == 'oel' ]]; then
|
||
# sync remote repo down to local if not airgap
|
||
repo_sync
|
||
check_os_updates
|
||
fi
|
||
|
||
if [ "$is_hotfix" == "true" ]; then
|
||
echo "Applying $HOTFIXVERSION hotfix"
|
||
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
|
||
if [[ ! "$MINIONID" =~ "_import" ]]; then
|
||
backup_old_states_pillars
|
||
fi
|
||
copy_new_files
|
||
apply_hotfix
|
||
echo "Hotfix applied"
|
||
update_version
|
||
enable_highstate
|
||
salt-call state.highstate -l info queue=True
|
||
else
|
||
echo ""
|
||
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
|
||
echo ""
|
||
|
||
systemctl_func "stop" "$cron_service_name"
|
||
|
||
# update mine items prior to stopping salt-minion and salt-master
|
||
update_salt_mine
|
||
|
||
echo "Updating dockers to $NEWVERSION."
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
airgap_update_dockers
|
||
# if not airgap but -f was used
|
||
elif [[ ! -z "$ISOLOC" ]]; then
|
||
airgap_update_dockers
|
||
unmount_update
|
||
else
|
||
update_registry
|
||
set +e
|
||
update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
|
||
set -e
|
||
fi
|
||
|
||
stop_salt_minion
|
||
|
||
stop_salt_master
|
||
|
||
#update_repo
|
||
|
||
# Does salt need upgraded. If so update it.
|
||
if [[ $UPGRADESALT -eq 1 ]]; then
|
||
echo "Upgrading Salt"
|
||
# Update the repo files so it can actually upgrade
|
||
upgrade_salt
|
||
fi
|
||
|
||
preupgrade_changes
|
||
echo ""
|
||
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
echo "Updating Rule Files to the Latest."
|
||
update_airgap_rules
|
||
fi
|
||
|
||
# Only update the repo if its airgap
|
||
if [[ $is_airgap -eq 0 && $UPGRADESALT -ne 1 ]]; then
|
||
update_centos_repo
|
||
fi
|
||
|
||
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
|
||
if [[ ! "$MINIONID" =~ "_import" ]]; then
|
||
echo ""
|
||
echo "Creating snapshots of default and local Salt states and pillars and saving to /nsm/backup/"
|
||
backup_old_states_pillars
|
||
fi
|
||
|
||
echo ""
|
||
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
|
||
copy_new_files
|
||
echo ""
|
||
update_version
|
||
|
||
echo ""
|
||
echo "Locking down Salt Master for upgrade at $(date +"%T.%6N")."
|
||
masterlock
|
||
|
||
systemctl_func "start" "salt-master"
|
||
|
||
# Testing that salt-master is up by checking that is it connected to itself
|
||
set +e
|
||
echo "Waiting on the Salt Master service to be ready."
|
||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||
set -e
|
||
|
||
# update the salt-minion configs here and start the minion
|
||
# since highstate are disabled above, minion start should not trigger a highstate
|
||
echo ""
|
||
echo "Ensuring salt-minion configs are up-to-date."
|
||
salt-call state.apply salt.minion -l info queue=True
|
||
echo ""
|
||
|
||
# Only regenerate osquery packages if Fleet is enabled
|
||
FLEET_MANAGER=$(lookup_pillar fleet_manager)
|
||
FLEET_NODE=$(lookup_pillar fleet_node)
|
||
if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
|
||
echo ""
|
||
echo "Regenerating Osquery Packages.... This will take several minutes."
|
||
salt-call state.apply fleet.event_gen-packages -l info queue=True
|
||
echo ""
|
||
fi
|
||
|
||
enable_highstate
|
||
|
||
echo ""
|
||
echo "Running a highstate. This could take several minutes."
|
||
set +e
|
||
salt-call state.highstate -l info queue=True
|
||
set -e
|
||
|
||
stop_salt_master
|
||
|
||
masterunlock
|
||
|
||
systemctl_func "start" "salt-master"
|
||
|
||
set +e
|
||
echo "Waiting on the Salt Master service to be ready."
|
||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||
set -e
|
||
|
||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||
salt-call state.highstate -l info queue=True
|
||
postupgrade_changes
|
||
[[ $is_airgap -eq 0 ]] && unmount_update
|
||
|
||
echo ""
|
||
echo "Upgrade to $NEWVERSION complete."
|
||
|
||
# Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error
|
||
set +e
|
||
|
||
echo "Checking the number of minions."
|
||
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | grep -v adv_ | wc -l)
|
||
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
echo ""
|
||
echo "Cleaning repos on remote Security Onion nodes."
|
||
salt -C 'not *_eval and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
|
||
echo ""
|
||
fi
|
||
fi
|
||
|
||
#echo "Checking for local modifications."
|
||
#check_local_mods
|
||
|
||
echo "Checking sudoers file."
|
||
check_sudoers
|
||
|
||
systemctl_func "start" "$cron_service_name"
|
||
|
||
if [[ -n $lsl_msg ]]; then
|
||
case $lsl_msg in
|
||
'distributed')
|
||
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
|
||
echo " -> We recommend checking and adjusting the values as necessary."
|
||
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
|
||
;;
|
||
'single-node')
|
||
# We can assume the lsl_details array has been set if lsl_msg has this value
|
||
echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
|
||
echo " -> We recommend checking and adjusting the value as necessary."
|
||
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
|
||
;;
|
||
esac
|
||
fi
|
||
|
||
if [[ $NUM_MINIONS -gt 1 ]]; then
|
||
|
||
cat << EOF
|
||
|
||
|
||
|
||
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
|
||
|
||
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
||
|
||
If it looks like you’re missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
|
||
|
||
For more information, please see $DOC_BASE_URL/soup.html#distributed-deployments.
|
||
|
||
EOF
|
||
|
||
fi
|
||
fi
|
||
|
||
if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then
|
||
|
||
cat << EOF
|
||
|
||
|
||
A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80.
|
||
|
||
If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at $DOC_BASE_URL/elasticsearch.html.
|
||
|
||
EOF
|
||
|
||
fi
|
||
|
||
echo "### soup has been served at $(date) ###"
|
||
}
|
||
|
||
while getopts ":b:f:y" opt; do
|
||
case ${opt} in
|
||
b )
|
||
BATCHSIZE="$OPTARG"
|
||
if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then
|
||
echo "Batch size must be a number greater than 0."
|
||
exit 1
|
||
fi
|
||
;;
|
||
y )
|
||
if [[ ! -f /opt/so/state/yeselastic.txt ]]; then
|
||
echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
|
||
exit 1
|
||
else
|
||
UNATTENDED=true
|
||
fi
|
||
;;
|
||
f )
|
||
ISOLOC="$OPTARG"
|
||
;;
|
||
\? )
|
||
echo "Usage: soup [-b] [-y] [-f <iso location>]"
|
||
exit 1
|
||
;;
|
||
: )
|
||
echo "Invalid option: $OPTARG requires an argument"
|
||
exit 1
|
||
;;
|
||
esac
|
||
done
|
||
shift $((OPTIND - 1))
|
||
|
||
if [ -f $SOUP_LOG ]; then
|
||
CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
|
||
mv $SOUP_LOG $SOUP_LOG.$INSTALLEDVERSION.$CURRENT_TIME
|
||
fi
|
||
|
||
if [[ -z $UNATTENDED ]]; then
|
||
cat << EOF
|
||
|
||
SOUP - Security Onion UPdater
|
||
|
||
Please review the following for more information about the update process and recent updates:
|
||
$DOC_BASE_URL/soup.html
|
||
https://blog.securityonion.net
|
||
|
||
EOF
|
||
|
||
cat << EOF
|
||
Press Enter to continue or Ctrl-C to cancel.
|
||
EOF
|
||
|
||
read -r input
|
||
fi
|
||
|
||
main "$@" | tee -a $SOUP_LOG
|