mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
1591 lines
52 KiB
Bash
Executable File
1591 lines
52 KiB
Bash
Executable File
#!/bin/bash
|
||
|
||
# Copyright 2014-2022 Security Onion Solutions, LLC
|
||
|
||
# This program is free software: you can redistribute it and/or modify
|
||
# it under the terms of the GNU General Public License as published by
|
||
# the Free Software Foundation, either version 3 of the License, or
|
||
# (at your option) any later version.
|
||
#
|
||
# This program is distributed in the hope that it will be useful,
|
||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||
# GNU General Public License for more details.
|
||
#
|
||
# You should have received a copy of the GNU General Public License
|
||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||
|
||
. /usr/sbin/so-common
|
||
|
||
UPDATE_DIR=/tmp/sogh/securityonion
|
||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||
INSTALLEDVERSION=$(cat /etc/soversion)
|
||
POSTVERSION=$INSTALLEDVERSION
|
||
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
||
BATCHSIZE=5
|
||
SOUP_LOG=/root/soup.log
|
||
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
|
||
WHATWOULDYOUSAYYAHDOHERE=soup
|
||
whiptail_title='Security Onion UPdater'
|
||
NOTIFYCUSTOMELASTICCONFIG=false
|
||
|
||
check_err() {
|
||
local exit_code=$1
|
||
local err_msg="Unhandled error occured, please check $SOUP_LOG for details."
|
||
|
||
[[ $ERR_HANDLED == true ]] && exit $exit_code
|
||
|
||
if [[ $exit_code -ne 0 ]]; then
|
||
|
||
set +e
|
||
systemctl_func "start" "$cron_service_name"
|
||
systemctl_func "start" "salt-master"
|
||
systemctl_func "start" "salt-minion"
|
||
enable_highstate
|
||
|
||
printf '%s' "Soup failed with error $exit_code: "
|
||
case $exit_code in
|
||
2)
|
||
echo 'No such file or directory'
|
||
;;
|
||
5)
|
||
echo 'Interrupted system call'
|
||
;;
|
||
12)
|
||
echo 'Out of memory'
|
||
;;
|
||
28)
|
||
echo 'No space left on device'
|
||
echo 'Likely ran out of space on disk, please review hardware requirements for Security Onion: https://docs.securityonion.net/en/2.3/hardware.html'
|
||
;;
|
||
30)
|
||
echo 'Read-only file system'
|
||
;;
|
||
35)
|
||
echo 'Resource temporarily unavailable'
|
||
;;
|
||
64)
|
||
echo 'Machine is not on the network'
|
||
;;
|
||
67)
|
||
echo 'Link has been severed'
|
||
;;
|
||
100)
|
||
echo 'Network is down'
|
||
;;
|
||
101)
|
||
echo 'Network is unreachable'
|
||
;;
|
||
102)
|
||
echo 'Network reset'
|
||
;;
|
||
110)
|
||
echo 'Connection timed out'
|
||
;;
|
||
111)
|
||
echo 'Connection refused'
|
||
;;
|
||
112)
|
||
echo 'Host is down'
|
||
;;
|
||
113)
|
||
echo 'No route to host'
|
||
;;
|
||
*)
|
||
echo 'Unhandled error'
|
||
echo "$err_msg"
|
||
;;
|
||
esac
|
||
if [[ $exit_code -ge 64 && $exit_code -le 113 ]]; then
|
||
echo "$err_msg"
|
||
fi
|
||
|
||
exit $exit_code
|
||
fi
|
||
|
||
}
|
||
|
||
add_common() {
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
salt-call state.apply common queue=True
|
||
echo "Run soup one more time"
|
||
exit 0
|
||
}
|
||
|
||
airgap_mounted() {
|
||
# Let's see if the ISO is already mounted.
|
||
if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
|
||
echo "The ISO is already mounted"
|
||
else
|
||
if [[ -z $ISOLOC ]]; then
|
||
echo "This is airgap. Ask for a location."
|
||
echo ""
|
||
cat << EOF
|
||
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
|
||
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
|
||
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.
|
||
|
||
EOF
|
||
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
|
||
fi
|
||
if [[ -f $ISOLOC ]]; then
|
||
# Mounting the ISO image
|
||
mkdir -p /tmp/soagupdate
|
||
mount -t iso9660 -o loop $ISOLOC /tmp/soagupdate
|
||
# Make sure mounting was successful
|
||
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||
echo "Something went wrong trying to mount the ISO."
|
||
echo "Ensure you verify the ISO that you downloaded."
|
||
exit 0
|
||
else
|
||
echo "ISO has been mounted!"
|
||
fi
|
||
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
|
||
ln -s $ISOLOC /tmp/soagupdate
|
||
echo "Found the update content"
|
||
elif [[ -b $ISOLOC ]]; then
|
||
mkdir -p /tmp/soagupdate
|
||
mount $ISOLOC /tmp/soagupdate
|
||
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||
echo "Something went wrong trying to mount the device."
|
||
echo "Ensure you verify the ISO that you downloaded."
|
||
exit 0
|
||
else
|
||
echo "Device has been mounted!"
|
||
fi
|
||
else
|
||
echo "Could not find Security Onion ISO content at ${ISOLOC}"
|
||
echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
|
||
exit 0
|
||
fi
|
||
fi
|
||
}
|
||
|
||
airgap_update_dockers() {
|
||
if [[ $is_airgap -eq 0 ]] || [[ ! -z "$ISOLOC" ]]; then
|
||
# Let's copy the tarball
|
||
if [[ ! -f $AGDOCKER/registry.tar ]]; then
|
||
echo "Unable to locate registry. Exiting"
|
||
exit 0
|
||
else
|
||
echo "Stopping the registry docker"
|
||
docker stop so-dockerregistry
|
||
docker rm so-dockerregistry
|
||
echo "Copying the new dockers over"
|
||
tar xvf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker
|
||
echo "Add Registry back"
|
||
docker load -i "$AGDOCKER/registry_image.tar"
|
||
fi
|
||
fi
|
||
}
|
||
|
||
update_registry() {
|
||
docker stop so-dockerregistry
|
||
docker rm so-dockerregistry
|
||
salt-call state.apply registry queue=True
|
||
}
|
||
|
||
check_airgap() {
|
||
# See if this is an airgap install
|
||
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap: | awk '{print $2}')
|
||
if [[ "$AIRGAP" == "True" ]]; then
|
||
is_airgap=0
|
||
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
||
AGDOCKER=/tmp/soagupdate/docker
|
||
AGREPO=/tmp/soagupdate/Packages
|
||
else
|
||
is_airgap=1
|
||
fi
|
||
}
|
||
|
||
# {% raw %}
|
||
|
||
check_local_mods() {
|
||
local salt_local=/opt/so/saltstack/local
|
||
local_ignore_arr=("/opt/so/saltstack/local/salt/zeek/policy/intel/intel.dat")
|
||
local_mod_arr=()
|
||
|
||
while IFS= read -r -d '' local_file; do
|
||
stripped_path=${local_file#"$salt_local"}
|
||
default_file="${DEFAULT_SALT_DIR}${stripped_path}"
|
||
if [[ -f $default_file ]]; then
|
||
file_diff=$(diff "$default_file" "$local_file" )
|
||
if [[ ! " ${local_ignore_arr[*]} " =~ " ${local_file} " ]]; then
|
||
if [[ $(echo "$file_diff" | grep -c "^<") -gt 0 ]]; then
|
||
local_mod_arr+=( "$local_file" )
|
||
fi
|
||
fi
|
||
fi
|
||
done< <(find $salt_local -type f -print0)
|
||
|
||
if [[ ${#local_mod_arr} -gt 0 ]]; then
|
||
echo "Potentially breaking changes found in the following files (check ${DEFAULT_SALT_DIR} for original copy):"
|
||
for file_str in "${local_mod_arr[@]}"; do
|
||
echo " $file_str"
|
||
done
|
||
echo ""
|
||
echo "To reference this list later, check $SOUP_LOG".
|
||
echo
|
||
if [[ -z $UNATTENDED ]] && ! [[ "${1}" == "skip-prompt" ]]; then
|
||
while true; do
|
||
read -p "Please review the local modifications shown above as they may cause problems during or after the update.
|
||
|
||
Would you like to proceed with the update anyway?
|
||
|
||
If so, type 'YES'. Otherwise, type anything else to exit SOUP. " yn
|
||
|
||
case $yn in
|
||
[yY][eE][sS] ) echo "Local modifications accepted. Continuing..."; break;;
|
||
* ) exit 0;;
|
||
esac
|
||
done
|
||
fi
|
||
fi
|
||
}
|
||
# {% endraw %}
|
||
|
||
check_pillar_items() {
|
||
local pillar_output=$(salt-call pillar.items --out=json)
|
||
|
||
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
|
||
if [[ "$cond" == "true" ]]; then
|
||
printf "\nThere is an issue rendering the manager's pillars. Please correct the issues in the sls files mentioned below before running SOUP again.\n\n"
|
||
jq '.local._errors[]' <<< "$pillar_output"
|
||
exit 0
|
||
else
|
||
printf "\nThe manager's pillars can be rendered. We can proceed with SOUP.\n\n"
|
||
fi
|
||
}
|
||
|
||
check_sudoers() {
|
||
if grep -q "so-setup" /etc/sudoers; then
|
||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||
fi
|
||
}
|
||
|
||
check_log_size_limit() {
|
||
local num_minion_pillars
|
||
num_minion_pillars=$(find /opt/so/saltstack/local/pillar/minions/ -type f | wc -l)
|
||
|
||
if [[ $num_minion_pillars -gt 1 ]]; then
|
||
if find /opt/so/saltstack/local/pillar/minions/ -type f | grep -q "_heavynode"; then
|
||
lsl_msg='distributed'
|
||
fi
|
||
else
|
||
local minion_id
|
||
minion_id=$(lookup_salt_value "id" "" "grains" "" "local")
|
||
|
||
local minion_arr
|
||
IFS='_' read -ra minion_arr <<< "$minion_id"
|
||
|
||
local node_type="${minion_arr[0]}"
|
||
|
||
local current_limit
|
||
# since it is possible for the salt-master service to be stopped when this is run, we need to check the pillar values locally
|
||
# we need to combine default local and default pillars before doing this so we can define --pillar-root in salt-call
|
||
local epoch_date=$(date +%s%N)
|
||
mkdir -vp /opt/so/saltstack/soup_tmp_${epoch_date}/
|
||
cp -r /opt/so/saltstack/default/pillar/ /opt/so/saltstack/soup_tmp_${epoch_date}/
|
||
# use \cp here to overwrite any pillar files from default with those in local for the tmp directory
|
||
\cp -r /opt/so/saltstack/local/pillar/ /opt/so/saltstack/soup_tmp_${epoch_date}/
|
||
current_limit=$(salt-call pillar.get elasticsearch:log_size_limit --local --pillar-root=/opt/so/saltstack/soup_tmp_${epoch_date}/pillar --out=newline_values_only)
|
||
rm -rf /opt/so/saltstack/soup_tmp_${epoch_date}/
|
||
|
||
local percent
|
||
case $node_type in
|
||
'standalone' | 'eval')
|
||
percent=50
|
||
;;
|
||
*)
|
||
percent=80
|
||
;;
|
||
esac
|
||
|
||
local disk_dir="/"
|
||
if [ -d /nsm ]; then
|
||
disk_dir="/nsm"
|
||
fi
|
||
|
||
local disk_size_1k
|
||
disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
|
||
|
||
local ratio="1048576"
|
||
|
||
local disk_size_gb
|
||
disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
|
||
|
||
local new_limit
|
||
new_limit=$( echo "$disk_size_gb" "$percent" | awk '{printf("%.0f", $1 * ($2/100))}')
|
||
|
||
if [[ $current_limit != "$new_limit" ]]; then
|
||
lsl_msg='single-node'
|
||
lsl_details=( "$current_limit" "$new_limit" "$minion_id" )
|
||
fi
|
||
fi
|
||
}
|
||
|
||
check_os_updates() {
|
||
# Check to see if there are OS updates
|
||
NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
|
||
if [[ $OS == 'ubuntu' ]]; then
|
||
OSUPDATES=$(apt list --upgradeable | grep -v "^Listing..." | grep -v "^docker-ce" | grep -v "^wazuh-" | grep -v "^salt-" | wc -l)
|
||
else
|
||
OSUPDATES=$(yum -q list updates | wc -l)
|
||
fi
|
||
if [[ "$OSUPDATES" -gt 0 ]]; then
|
||
if [[ -z $UNATTENDED ]]; then
|
||
echo "$NEEDUPDATES"
|
||
echo ""
|
||
read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
|
||
if [[ "$confirm" == [cC] ]]; then
|
||
echo "Continuing without updating packages"
|
||
elif [[ "$confirm" == [uU] ]]; then
|
||
echo "Applying Grid Updates"
|
||
update_flag=true
|
||
else
|
||
echo "Exiting soup"
|
||
exit 0
|
||
fi
|
||
else
|
||
update_flag=true
|
||
fi
|
||
else
|
||
echo "Looks like you have an updated OS"
|
||
fi
|
||
|
||
if [[ $update_flag == true ]]; then
|
||
set +e
|
||
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
|
||
set -e
|
||
fi
|
||
}
|
||
|
||
clean_dockers() {
|
||
# Place Holder for cleaning up old docker images
|
||
echo "Trying to clean up old dockers."
|
||
docker system prune -a -f
|
||
|
||
}
|
||
|
||
clone_to_tmp() {
|
||
# Clean old files
|
||
rm -rf /tmp/sogh
|
||
# Make a temp location for the files
|
||
mkdir -p /tmp/sogh
|
||
cd /tmp/sogh
|
||
SOUP_BRANCH=""
|
||
if [ -n "$BRANCH" ]; then
|
||
SOUP_BRANCH="-b $BRANCH"
|
||
fi
|
||
git clone $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git
|
||
cd /tmp
|
||
if [ ! -f $UPDATE_DIR/VERSION ]; then
|
||
echo "Update was unable to pull from github. Please check your internet."
|
||
exit 0
|
||
fi
|
||
}
|
||
|
||
elastalert_indices_check() {
|
||
echo "Checking Elastalert indices for compatibility..."
|
||
# Wait for ElasticSearch to initialize
|
||
echo -n "Waiting for ElasticSearch..."
|
||
COUNT=0
|
||
ELASTICSEARCH_CONNECTED="no"
|
||
while [[ "$COUNT" -le 240 ]]; do
|
||
so-elasticsearch-query / -k --output /dev/null
|
||
if [ $? -eq 0 ]; then
|
||
ELASTICSEARCH_CONNECTED="yes"
|
||
echo "connected!"
|
||
break
|
||
else
|
||
((COUNT+=1))
|
||
sleep 1
|
||
echo -n "."
|
||
fi
|
||
done
|
||
|
||
# Unable to connect to Elasticsearch
|
||
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||
echo
|
||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||
echo
|
||
exit 1
|
||
fi
|
||
|
||
MAJOR_ES_VERSION=$(so-elasticsearch-query / | jq -r .version.number | cut -d '.' -f1)
|
||
if [[ "$MAJOR_ES_VERSION" -lt "8" ]]; then
|
||
|
||
# Stop Elastalert to prevent Elastalert indices from being re-created
|
||
if grep -q "^so-elastalert$" /opt/so/conf/so-status/so-status.conf ; then
|
||
so-elastalert-stop || true
|
||
fi
|
||
|
||
# Check Elastalert indices
|
||
echo "Deleting Elastalert indices to prevent issues with upgrade to Elastic 8..."
|
||
CHECK_COUNT=0
|
||
while [[ "$CHECK_COUNT" -le 2 ]]; do
|
||
# Delete Elastalert indices
|
||
for i in $(so-elasticsearch-query _cat/indices | grep elastalert | awk '{print $3}'); do
|
||
so-elasticsearch-query $i -XDELETE;
|
||
done
|
||
|
||
# Check to ensure Elastalert indices are deleted
|
||
COUNT=0
|
||
ELASTALERT_INDICES_DELETED="no"
|
||
while [[ "$COUNT" -le 240 ]]; do
|
||
RESPONSE=$(so-elasticsearch-query "elastalert*")
|
||
if [[ "$RESPONSE" == "{}" ]]; then
|
||
ELASTALERT_INDICES_DELETED="yes"
|
||
break
|
||
else
|
||
((COUNT+=1))
|
||
sleep 1
|
||
echo -n "."
|
||
fi
|
||
done
|
||
((CHECK_COUNT+=1))
|
||
done
|
||
|
||
# If we were unable to delete the Elastalert indices, exit the script
|
||
if [ "$ELASTALERT_INDICES_DELETED" == "yes" ]; then
|
||
echo "Elastalert indices successfully deleted."
|
||
else
|
||
echo
|
||
echo -e "Unable to connect to delete Elastalert indices. Exiting."
|
||
echo
|
||
exit 1
|
||
fi
|
||
else
|
||
echo "Major Elasticsearch version is 8 or greater...skipping Elastalert index maintenance."
|
||
fi
|
||
}
|
||
|
||
enable_highstate() {
|
||
echo "Enabling highstate."
|
||
salt-call state.enable highstate -l info --local
|
||
echo ""
|
||
}
|
||
|
||
es_version_check() {
|
||
CHECK_ES=$(echo $INSTALLEDVERSION | awk -F. '{print $3}')
|
||
|
||
if [[ "$CHECK_ES" -lt "110" ]]; then
|
||
echo "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version 2.3.130 before updating to 2.3.140 or higher."
|
||
echo ""
|
||
echo "If your deployment has Internet access, you can use the following command to update to 2.3.130:"
|
||
echo "sudo BRANCH=2.3.130-20220607 soup"
|
||
echo ""
|
||
echo "Otherwise, if your deployment is configured for airgap, you can instead download the 2.3.130 ISO image from https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso."
|
||
echo ""
|
||
echo "*** Once you have updated to 2.3.130, you can then update to 2.3.140 or higher as you would normally. ***"
|
||
exit 1
|
||
fi
|
||
}
|
||
|
||
es_indices_check() {
|
||
echo "Checking for unsupported Elasticsearch indices..."
|
||
UNSUPPORTED_INDICES=$(for INDEX in $(so-elasticsearch-indices-list | awk '{print $3}'); do so-elasticsearch-query $INDEX/_settings?human |grep '"created_string":"6' | jq -r 'keys'[0]; done)
|
||
if [ -z "$UNSUPPORTED_INDICES" ]; then
|
||
echo "No unsupported indices found."
|
||
else
|
||
echo "The following indices were created with Elasticsearch 6, and are not supported when upgrading to Elasticsearch 8. These indices may need to be deleted, migrated, or re-indexed before proceeding with the upgrade. Please see https://docs.securityonion.net/en/2.3/soup.html#elastic-8 for more details."
|
||
echo
|
||
echo "$UNSUPPORTED_INDICES"
|
||
exit 1
|
||
fi
|
||
}
|
||
|
||
generate_and_clean_tarballs() {
|
||
local new_version
|
||
new_version=$(cat $UPDATE_DIR/VERSION)
|
||
[ -d /opt/so/repo ] || mkdir -p /opt/so/repo
|
||
tar -czf "/opt/so/repo/$new_version.tar.gz" -C "$UPDATE_DIR" .
|
||
find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \;
|
||
}
|
||
|
||
highstate() {
|
||
# Run a highstate.
|
||
salt-call state.highstate -l info queue=True
|
||
}
|
||
|
||
masterlock() {
|
||
echo "Locking Salt Master"
|
||
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
|
||
TOPFILE=/opt/so/saltstack/default/salt/top.sls
|
||
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
|
||
mv -v $TOPFILE $BACKUPTOPFILE
|
||
echo "base:" > $TOPFILE
|
||
echo " $MINIONID:" >> $TOPFILE
|
||
echo " - ca" >> $TOPFILE
|
||
echo " - ssl" >> $TOPFILE
|
||
echo " - elasticsearch" >> $TOPFILE
|
||
fi
|
||
}
|
||
|
||
masterunlock() {
|
||
echo "Unlocking Salt Master"
|
||
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
|
||
mv -v $BACKUPTOPFILE $TOPFILE
|
||
fi
|
||
}
|
||
|
||
preupgrade_changes() {
|
||
# This function is to add any new pillar items if needed.
|
||
echo "Checking to see if changes are needed."
|
||
|
||
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_to_2.3.20
|
||
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_to_2.3.30
|
||
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_to_2.3.50
|
||
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_to_2.3.80
|
||
[[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90
|
||
[[ "$INSTALLEDVERSION" == 2.3.90 || "$INSTALLEDVERSION" == 2.3.91 ]] && up_to_2.3.100
|
||
[[ "$INSTALLEDVERSION" == 2.3.100 ]] && up_to_2.3.110
|
||
[[ "$INSTALLEDVERSION" == 2.3.110 ]] && up_to_2.3.120
|
||
[[ "$INSTALLEDVERSION" == 2.3.120 ]] && up_to_2.3.130
|
||
[[ "$INSTALLEDVERSION" == 2.3.130 ]] && up_to_2.3.140
|
||
[[ "$INSTALLEDVERSION" == 2.3.140 ]] && up_to_2.3.150
|
||
[[ "$INSTALLEDVERSION" == 2.3.150 ]] && up_to_2.3.160
|
||
[[ "$INSTALLEDVERSION" == 2.3.160 ]] && up_to_2.3.170
|
||
[[ "$INSTALLEDVERSION" == 2.3.170 ]] && up_to_2.3.180
|
||
[[ "$INSTALLEDVERSION" == 2.3.180 ]] && up_to_2.3.181
|
||
[[ "$INSTALLEDVERSION" == 2.3.181 ]] && up_to_2.3.190
|
||
true
|
||
}
|
||
|
||
postupgrade_changes() {
|
||
# This function is to add any new pillar items if needed.
|
||
echo "Running post upgrade processes."
|
||
|
||
[[ "$POSTVERSION" == 2.3.0 || "$POSTVERSION" == 2.3.1 || "$POSTVERSION" == 2.3.2 || "$POSTVERSION" == 2.3.10 || "$POSTVERSION" == 2.3.20 ]] && post_to_2.3.21
|
||
[[ "$POSTVERSION" == 2.3.21 || "$POSTVERSION" == 2.3.30 ]] && post_to_2.3.40
|
||
[[ "$POSTVERSION" == 2.3.40 || "$POSTVERSION" == 2.3.50 || "$POSTVERSION" == 2.3.51 || "$POSTVERSION" == 2.3.52 ]] && post_to_2.3.60
|
||
[[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90
|
||
[[ "$POSTVERSION" == 2.3.90 || "$POSTVERSION" == 2.3.91 ]] && post_to_2.3.100
|
||
[[ "$POSTVERSION" == 2.3.100 ]] && post_to_2.3.110
|
||
[[ "$POSTVERSION" == 2.3.110 ]] && post_to_2.3.120
|
||
[[ "$POSTVERSION" == 2.3.120 ]] && post_to_2.3.130
|
||
[[ "$POSTVERSION" == 2.3.130 ]] && post_to_2.3.140
|
||
[[ "$POSTVERSION" == 2.3.140 ]] && post_to_2.3.150
|
||
[[ "$POSTVERSION" == 2.3.150 ]] && post_to_2.3.160
|
||
[[ "$POSTVERSION" == 2.3.160 ]] && post_to_2.3.170
|
||
[[ "$POSTVERSION" == 2.3.170 ]] && post_to_2.3.180
|
||
[[ "$POSTVERSION" == 2.3.180 ]] && post_to_2.3.181
|
||
[[ "$POSTVERSION" == 2.3.181 ]] && post_to_2.3.190
|
||
|
||
true
|
||
}
|
||
|
||
post_to_2.3.21() {
|
||
salt-call state.apply playbook.OLD_db_init
|
||
rm -f /opt/so/rules/elastalert/playbook/*.yaml
|
||
so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 &
|
||
POSTVERSION=2.3.21
|
||
}
|
||
|
||
post_to_2.3.40() {
|
||
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
|
||
so-kibana-space-defaults
|
||
POSTVERSION=2.3.40
|
||
}
|
||
|
||
post_to_2.3.60() {
|
||
for table in identity_recovery_addresses selfservice_recovery_flows selfservice_registration_flows selfservice_verification_flows identities identity_verification_tokens identity_credentials selfservice_settings_flows identity_recovery_tokens continuity_containers identity_credential_identifiers identity_verifiable_addresses courier_messages selfservice_errors sessions selfservice_login_flows
|
||
do
|
||
echo "Forcing Kratos network migration: $table"
|
||
sqlite3 /opt/so/conf/kratos/db/db.sqlite "update $table set nid=(select id from networks limit 1);"
|
||
done
|
||
|
||
POSTVERSION=2.3.60
|
||
}
|
||
|
||
post_to_2.3.90() {
|
||
# Create FleetDM service account
|
||
FLEET_MANAGER=$(lookup_pillar fleet_manager)
|
||
if [[ "$FLEET_MANAGER" == "True" ]]; then
|
||
FLEET_SA_EMAIL=$(lookup_pillar_secret fleet_sa_email)
|
||
FLEET_SA_PW=$(lookup_pillar_secret fleet_sa_password)
|
||
MYSQL_PW=$(lookup_pillar_secret mysql)
|
||
|
||
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_SA_PW'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PW fleet -e \
|
||
"INSERT INTO users (password,salt,email,name,global_role) VALUES ('$FLEET_HASH','','$FLEET_SA_EMAIL','$FLEET_SA_EMAIL','admin')" 2>&1)
|
||
|
||
if [[ $? -eq 0 ]]; then
|
||
echo "Successfully added service account to Fleet"
|
||
else
|
||
echo "Unable to add service account to Fleet"
|
||
echo "$MYSQL_OUTPUT"
|
||
fi
|
||
fi
|
||
|
||
POSTVERSION=2.3.90
|
||
}
|
||
|
||
post_to_2.3.100() {
|
||
echo "Post Processing for 2.3.100"
|
||
POSTVERSION=2.3.100
|
||
}
|
||
|
||
post_to_2.3.110() {
|
||
echo "Post Processing for 2.3.110"
|
||
echo "Removing old Elasticsearch index templates"
|
||
[ -d /opt/so/saltstack/default/salt/elasticsearch/templates/so ] && rm -rf /opt/so/saltstack/default/salt/elasticsearch/templates/so
|
||
echo "Updating Kibana dashboards"
|
||
salt-call state.apply kibana.so_savedobjects_defaults queue=True
|
||
POSTVERSION=2.3.110
|
||
}
|
||
|
||
post_to_2.3.120() {
|
||
echo "Post Processing for 2.3.120"
|
||
POSTVERSION=2.3.120
|
||
sed -i '/so-thehive-es/d;/so-thehive/d;/so-cortex/d' /opt/so/conf/so-status/so-status.conf
|
||
}
|
||
|
||
post_to_2.3.130() {
|
||
echo "Post Processing for 2.3.130"
|
||
POSTVERSION=2.3.130
|
||
}
|
||
|
||
post_to_2.3.140() {
|
||
echo "Post Processing for 2.3.140"
|
||
FORCE_SYNC=true so-user sync
|
||
so-kibana-restart
|
||
so-kibana-space-defaults
|
||
POSTVERSION=2.3.140
|
||
}
|
||
|
||
post_to_2.3.150() {
|
||
echo "Nothing to do for .150"
|
||
POSTVERSION=2.3.150
|
||
}
|
||
|
||
post_to_2.3.160() {
|
||
echo "Nothing to do for .160"
|
||
POSTVERSION=2.3.160
|
||
}
|
||
|
||
post_to_2.3.170() {
|
||
echo "Nothing to do for .170"
|
||
POSTVERSION=2.3.170
|
||
}
|
||
|
||
post_to_2.3.180() {
|
||
echo "Nothing to do for .180"
|
||
POSTVERSION=2.3.180
|
||
}
|
||
|
||
post_to_2.3.181() {
|
||
echo "Nothing to do for .181"
|
||
POSTVERSION=2.3.181
|
||
}
|
||
|
||
post_to_2.3.190() {
|
||
echo "Nothing to do for .190"
|
||
POSTVERSION=2.3.190
|
||
}
|
||
|
||
stop_salt_master() {
|
||
# kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
|
||
set +e
|
||
echo ""
|
||
echo "Killing all Salt jobs across the grid."
|
||
salt \* saltutil.kill_all_jobs >> $SOUP_LOG 2>&1
|
||
echo ""
|
||
echo "Killing any queued Salt jobs on the manager."
|
||
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
|
||
set -e
|
||
|
||
echo ""
|
||
echo "Storing salt-master pid."
|
||
MASTERPID=$(pgrep salt-master | head -1)
|
||
echo "Found salt-master PID $MASTERPID"
|
||
systemctl_func "stop" "salt-master"
|
||
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
|
||
}
|
||
|
||
stop_salt_minion() {
|
||
echo "Disabling highstate to prevent from running if salt-minion restarts."
|
||
salt-call state.disable highstate -l info --local
|
||
echo ""
|
||
|
||
# kill all salt jobs before stopping salt-minion
|
||
set +e
|
||
echo ""
|
||
echo "Killing Salt jobs on this node."
|
||
salt-call saltutil.kill_all_jobs --local
|
||
set -e
|
||
|
||
echo "Storing salt-minion pid."
|
||
MINIONPID=$(pgrep salt-minion | head -1)
|
||
echo "Found salt-minion PID $MINIONPID"
|
||
systemctl_func "stop" "salt-minion"
|
||
|
||
set +e
|
||
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
|
||
set -e
|
||
}
|
||
|
||
up_to_2.3.20(){
|
||
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
|
||
# Remove PCAP from global
|
||
sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls
|
||
sed '/sensor_checkin_interval_ms:/d' /opt/so/saltstack/local/pillar/global.sls
|
||
|
||
# Add checking interval to glbal
|
||
echo "sensoroni:" >> /opt/so/saltstack/local/pillar/global.sls
|
||
echo " node_checkin_interval_ms: 10000" >> /opt/so/saltstack/local/pillar/global.sls
|
||
|
||
# Update pillar fiels for new sensoroni functionality
|
||
for file in /opt/so/saltstack/local/pillar/minions/*; do
|
||
echo "sensoroni:" >> $file
|
||
echo " node_description:" >> $file
|
||
local SOMEADDRESS=$(cat $file | grep mainip | tail -n 1 | awk '{print $2'})
|
||
echo " node_address: $SOMEADDRESS" >> $file
|
||
done
|
||
|
||
# Remove old firewall config to reduce confusion
|
||
rm -f /opt/so/saltstack/default/pillar/firewall/ports.sls
|
||
|
||
# Fix daemon.json by managing it
|
||
echo "docker:" >> /opt/so/saltstack/local/pillar/global.sls
|
||
DOCKERGREP=$(cat /etc/docker/daemon.json | grep base | awk {'print $3'} | cut -f1 -d"," | tr -d '"')
|
||
if [ -z "$DOCKERGREP" ]; then
|
||
echo " range: '172.17.0.0/24'" >> /opt/so/saltstack/local/pillar/global.sls
|
||
echo " bip: '172.17.0.1/24'" >> /opt/so/saltstack/local/pillar/global.sls
|
||
else
|
||
DOCKERSTUFF="${DOCKERGREP//\"}"
|
||
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
|
||
echo " range: '$DOCKERSTUFF/24'" >> /opt/so/saltstack/local/pillar/global.sls
|
||
echo " bip: '$DOCKERSTUFFBIP'" >> /opt/so/saltstack/local/pillar/global.sls
|
||
|
||
fi
|
||
|
||
INSTALLEDVERSION=2.3.20
|
||
}
|
||
|
||
up_to_2.3.30() {
|
||
# Replace any curly brace scalars with the same scalar in single quotes
|
||
readarray -t minion_pillars <<< "$(find /opt/so/saltstack/local/pillar/minions -type f -name '*.sls')"
|
||
for pillar in "${minion_pillars[@]}"; do
|
||
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
|
||
done
|
||
|
||
# Change the IMAGEREPO
|
||
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
||
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
||
|
||
# Strelka rule repo pillar addition
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
# Add manager as default Strelka YARA rule repo
|
||
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
|
||
else
|
||
# Add Github repo for Strelka YARA rules
|
||
sed -i "/^strelka:/a \\ repos: \n - https://github.com/Neo23x0/signature-base" /opt/so/saltstack/local/pillar/global.sls;
|
||
fi
|
||
check_log_size_limit
|
||
INSTALLEDVERSION=2.3.30
|
||
}
|
||
|
||
up_to_2.3.50() {
|
||
|
||
cat <<EOF > /tmp/supersed.txt
|
||
/so-zeek:/ {
|
||
p;
|
||
n;
|
||
/shards:/ {
|
||
p;
|
||
n;
|
||
/warm:/ {
|
||
p;
|
||
n;
|
||
/close:/ {
|
||
s/close: 365/close: 45/;
|
||
p;
|
||
n;
|
||
/delete:/ {
|
||
s/delete: 45/delete: 365/;
|
||
p;
|
||
d;
|
||
}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
p;
|
||
EOF
|
||
sed -n -i -f /tmp/supersed.txt /opt/so/saltstack/local/pillar/global.sls
|
||
rm /tmp/supersed.txt
|
||
INSTALLEDVERSION=2.3.50
|
||
}
|
||
|
||
up_to_2.3.80() {
|
||
|
||
# Remove watermark settings from global.sls
|
||
sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls
|
||
|
||
# Add new indices to the global
|
||
sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||
sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||
sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||
sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||
|
||
# Do some pillar formatting
|
||
tc=$(grep -w true_cluster /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print tolower($2)'}| xargs)
|
||
|
||
if [[ "$tc" == "true" ]]; then
|
||
tcname=$(grep -w true_cluster_name /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print $2'})
|
||
sed -i "/^elasticsearch:/a \\ config: \n cluster: \n name: $tcname" /opt/so/saltstack/local/pillar/global.sls
|
||
sed -i '/ true_cluster_name/d' /opt/so/saltstack/local/pillar/global.sls
|
||
sed -i '/ esclustername/d' /opt/so/saltstack/local/pillar/global.sls
|
||
|
||
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
|
||
if [[ ${file} != *"manager.sls"* ]]; then
|
||
noderoutetype=$(grep -w node_route_type $file | awk -F: {'print $2'})
|
||
if [ -n "$noderoutetype" ]; then
|
||
sed -i "/^elasticsearch:/a \\ config: \n node: \n attr: \n box_type: $noderoutetype" $file
|
||
sed -i '/ node_route_type/d' $file
|
||
noderoutetype=''
|
||
fi
|
||
fi
|
||
done
|
||
fi
|
||
|
||
# check for local es config to inform user that the config in local is now ignored and those options need to be placed in the pillar
|
||
if [ -f "/opt/so/saltstack/local/salt/elasticsearch/files/elasticsearch.yml" ]; then
|
||
NOTIFYCUSTOMELASTICCONFIG=true
|
||
fi
|
||
|
||
INSTALLEDVERSION=2.3.80
|
||
}
|
||
|
||
up_to_2.3.90() {
|
||
for i in manager managersearch eval standalone; do
|
||
echo "Checking for compgen match of /opt/so/saltstack/local/pillar/minions/*_$i.sls"
|
||
if compgen -G "/opt/so/saltstack/local/pillar/minions/*_$i.sls"; then
|
||
echo "Found compgen match for /opt/so/saltstack/local/pillar/minions/*_$i.sls"
|
||
for f in $(compgen -G "/opt/so/saltstack/local/pillar/minions/*_$i.sls"); do
|
||
if grep -qozP "^soc:\n.*es_index_patterns: '\*:so-\*,\*:endgame-\*'" "$f"; then
|
||
echo "soc:es_index_patterns already present in $f"
|
||
else
|
||
echo "Appending soc pillar data to $f"
|
||
echo "soc:" >> "$f"
|
||
sed -i "/^soc:/a \\ es_index_patterns: '*:so-*,*:endgame-*'" "$f"
|
||
fi
|
||
done
|
||
fi
|
||
done
|
||
|
||
# Create Endgame Hostgroup
|
||
echo "Adding endgame hostgroup with so-firewall"
|
||
if so-firewall addhostgroup endgame 2>&1 | grep -q 'Already exists'; then
|
||
echo 'endgame hostgroup already exists'
|
||
else
|
||
echo 'endgame hostgroup added'
|
||
fi
|
||
|
||
# Force influx to generate a new cert
|
||
echo "Moving influxdb.crt and influxdb.key to generate new certs"
|
||
mv -vf /etc/pki/influxdb.crt /etc/pki/influxdb.crt.2390upgrade
|
||
mv -vf /etc/pki/influxdb.key /etc/pki/influxdb.key.2390upgrade
|
||
|
||
# remove old common ingest pipeline in default
|
||
rm -vf /opt/so/saltstack/default/salt/elasticsearch/files/ingest/common
|
||
# if custom common, move from local ingest to local ingest-dynamic
|
||
mkdir -vp /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic
|
||
if [[ -f "/opt/so/saltstack/local/salt/elasticsearch/files/ingest/common" ]]; then
|
||
mv -v /opt/so/saltstack/local/salt/elasticsearch/files/ingest/common /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
|
||
# since json file, we need to wrap with raw
|
||
sed -i '1s/^/{% raw %}\n/' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
|
||
sed -i -e '$a{% endraw %}\n' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
|
||
fi
|
||
|
||
# Generate FleetDM Service Account creds if they do not exist
|
||
if grep -q "fleet_sa_email" /opt/so/saltstack/local/pillar/secrets.sls; then
|
||
echo "FleetDM Service Account credentials already created..."
|
||
else
|
||
echo "Generating FleetDM Service Account credentials..."
|
||
FLEETSAPASS=$(get_random_value)
|
||
printf '%s\n'\
|
||
" fleet_sa_email: service.account@securityonion.invalid"\
|
||
" fleet_sa_password: $FLEETSAPASS"\
|
||
>> /opt/so/saltstack/local/pillar/secrets.sls
|
||
|
||
fi
|
||
|
||
sed -i -re 's/^(playbook_admin.*|playbook_automation.*)/ \1/g' /opt/so/saltstack/local/pillar/secrets.sls
|
||
|
||
INSTALLEDVERSION=2.3.90
|
||
}
|
||
|
||
up_to_2.3.100() {
|
||
fix_wazuh
|
||
|
||
echo "Adding receiver hostgroup with so-firewall"
|
||
if so-firewall addhostgroup receiver 2>&1 | grep -q 'Already exists'; then
|
||
echo 'receiver hostgroup already exists'
|
||
else
|
||
echo 'receiver hostgroup added'
|
||
fi
|
||
|
||
echo "Adding receiver to assigned_hostgroups.local.map.yaml"
|
||
grep -qxF " receiver:" /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml || sed -i -e '$a\ receiver:' /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml
|
||
|
||
INSTALLEDVERSION=2.3.100
|
||
}
|
||
|
||
up_to_2.3.110() {
|
||
sed -i 's|shards|index_template:\n template:\n settings:\n index:\n number_of_shards|g' /opt/so/saltstack/local/pillar/global.sls
|
||
INSTALLEDVERSION=2.3.110
|
||
}
|
||
|
||
up_to_2.3.120() {
|
||
# Stop thehive services since these will be broken in .120
|
||
so-thehive-stop
|
||
so-thehive-es-stop
|
||
so-cortex-stop
|
||
INSTALLEDVERSION=2.3.120
|
||
}
|
||
|
||
up_to_2.3.130() {
|
||
# Remove file for nav update
|
||
rm -f /opt/so/conf/navigator/layers/nav_layer_playbook.json
|
||
INSTALLEDVERSION=2.3.130
|
||
}
|
||
|
||
up_to_2.3.140() {
|
||
elastalert_indices_check
|
||
##
|
||
INSTALLEDVERSION=2.3.140
|
||
}
|
||
|
||
up_to_2.3.150() {
|
||
echo "Upgrading to 2.3.150"
|
||
INSTALLEDVERSION=2.3.150
|
||
}
|
||
|
||
up_to_2.3.160() {
|
||
echo "Upgrading to 2.3.160"
|
||
INSTALLEDVERSION=2.3.160
|
||
}
|
||
|
||
up_to_2.3.170() {
|
||
echo "Upgrading to 2.3.170"
|
||
INSTALLEDVERSION=2.3.170
|
||
}
|
||
|
||
up_to_2.3.180() {
|
||
echo "Upgrading to 2.3.180"
|
||
INSTALLEDVERSION=2.3.180
|
||
}
|
||
|
||
up_to_2.3.181() {
|
||
echo "Upgrading to 2.3.181"
|
||
INSTALLEDVERSION=2.3.181
|
||
}
|
||
|
||
up_to_2.3.190() {
|
||
echo "Upgrading to 2.3.190"
|
||
chown -R zeek:socore /nsm/zeek/extracted/complete
|
||
chmod 770 /nsm/zeek/extracted/complete
|
||
INSTALLEDVERSION=2.3.190
|
||
}
|
||
|
||
verify_upgradespace() {
|
||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||
echo "You are low on disk space."
|
||
return 1
|
||
else
|
||
return 0
|
||
fi
|
||
}
|
||
|
||
upgrade_space() {
|
||
if ! verify_upgradespace; then
|
||
clean_dockers
|
||
if ! verify_upgradespace; then
|
||
echo "There is not enough space to perform the upgrade. Please free up space and try again"
|
||
exit 0
|
||
fi
|
||
else
|
||
echo "You have enough space for upgrade. Proceeding with soup."
|
||
fi
|
||
}
|
||
|
||
unmount_update() {
|
||
cd /tmp
|
||
umount /tmp/soagupdate
|
||
}
|
||
|
||
update_airgap_rules() {
|
||
# Copy the rules over to update them for airgap.
|
||
rsync -av $UPDATE_DIR/agrules/* /nsm/repo/rules/
|
||
}
|
||
|
||
update_centos_repo() {
|
||
# Update the files in the repo
|
||
echo "Syncing new updates to /nsm/repo"
|
||
rsync -av $AGREPO/* /nsm/repo/
|
||
echo "Creating repo"
|
||
createrepo /nsm/repo
|
||
}
|
||
|
||
update_salt_mine() {
|
||
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host."
|
||
set +e
|
||
salt \* cmd.run cmd='MAININT=$(salt-call pillar.get host:mainint --out=newline_values_only) && salt-call mine.send name=network.ip_addrs interface="$MAININT"'
|
||
set -e
|
||
}
|
||
|
||
update_version() {
|
||
# Update the version to the latest
|
||
echo "Updating the Security Onion version file."
|
||
echo $NEWVERSION > /etc/soversion
|
||
echo $HOTFIXVERSION > /etc/sohotfix
|
||
sed -i "/ soversion:/c\ soversion: $NEWVERSION" /opt/so/saltstack/local/pillar/global.sls
|
||
}
|
||
|
||
upgrade_check() {
|
||
# Let's make sure we actually need to update.
|
||
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
|
||
HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
|
||
[[ -f /etc/sohotfix ]] && CURRENTHOTFIX=$(cat /etc/sohotfix)
|
||
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
|
||
echo "Checking to see if there are hotfixes needed"
|
||
if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
|
||
echo "You are already running the latest version of Security Onion."
|
||
exit 0
|
||
else
|
||
echo "We need to apply a hotfix"
|
||
is_hotfix=true
|
||
fi
|
||
else
|
||
is_hotfix=false
|
||
fi
|
||
|
||
}
|
||
|
||
upgrade_check_salt() {
|
||
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk '{print $2}')
|
||
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
|
||
echo "You are already running the correct version of Salt for Security Onion."
|
||
else
|
||
UPGRADESALT=1
|
||
fi
|
||
}
|
||
|
||
upgrade_salt() {
|
||
SALTUPGRADED=True
|
||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||
echo ""
|
||
# If CentOS
|
||
if [[ $OS == 'centos' ]]; then
|
||
echo "Removing yum versionlock for Salt."
|
||
echo ""
|
||
yum versionlock delete "salt-*"
|
||
echo "Updating Salt packages."
|
||
echo ""
|
||
set +e
|
||
run_check_net_err \
|
||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
||
"Could not update salt, please check $SOUP_LOG for details."
|
||
set -e
|
||
echo "Applying yum versionlock for Salt."
|
||
echo ""
|
||
yum versionlock add "salt-*"
|
||
# Else do Ubuntu things
|
||
elif [[ $OS == 'ubuntu' ]]; then
|
||
echo "Removing apt hold for Salt."
|
||
echo ""
|
||
apt-mark unhold "salt-common"
|
||
apt-mark unhold "salt-master"
|
||
apt-mark unhold "salt-minion"
|
||
echo "Updating Salt packages."
|
||
echo ""
|
||
set +e
|
||
run_check_net_err \
|
||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
||
"Could not update salt, please check $SOUP_LOG for details."
|
||
set -e
|
||
echo "Applying apt hold for Salt."
|
||
echo ""
|
||
apt-mark hold "salt-common"
|
||
apt-mark hold "salt-master"
|
||
apt-mark hold "salt-minion"
|
||
fi
|
||
|
||
echo "Checking if Salt was upgraded."
|
||
echo ""
|
||
# Check that Salt was upgraded
|
||
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
||
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
|
||
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
|
||
echo "Once the issue is resolved, run soup again."
|
||
echo "Exiting."
|
||
echo ""
|
||
exit 0
|
||
else
|
||
echo "Salt upgrade success."
|
||
echo ""
|
||
echo "Removing /opt/so/state files for patched Salt InfluxDB module and state. This is due to Salt being upgraded and needing to patch the files again."
|
||
rm -vrf /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdb_retention_policy.py.patched /opt/so/state/influxdbmod.py.patched
|
||
fi
|
||
|
||
}
|
||
|
||
update_repo() {
|
||
if [[ "$OS" == "centos" ]]; then
|
||
echo "Performing repo changes."
|
||
# Import GPG Keys
|
||
gpg_rpm_import
|
||
echo "Disabling fastestmirror."
|
||
disable_fastestmirror
|
||
echo "Deleting unneeded repo files."
|
||
DELREPOS=('CentOS-Base' 'CentOS-CR' 'CentOS-Debuginfo' 'docker-ce' 'CentOS-fasttrack' 'CentOS-Media' 'CentOS-Sources' 'CentOS-Vault' 'CentOS-x86_64-kernel' 'epel' 'epel-testing' 'saltstack' 'salt-latest' 'wazuh')
|
||
|
||
for DELREPO in "${DELREPOS[@]}"; do
|
||
if [[ -f "/etc/yum.repos.d/$DELREPO.repo" ]]; then
|
||
echo "Deleting $DELREPO.repo"
|
||
rm -f "/etc/yum.repos.d/$DELREPO.repo"
|
||
fi
|
||
done
|
||
if [[ $is_airgap -eq 1 ]]; then
|
||
# Copy the new repo file if not airgap
|
||
cp $UPDATE_DIR/salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
|
||
yum clean all
|
||
yum repolist
|
||
fi
|
||
elif [[ "$OS" == "ubuntu" ]]; then
|
||
ubuntu_version=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
|
||
|
||
if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then
|
||
OSVER=bionic
|
||
elif grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
|
||
OSVER=focal
|
||
else
|
||
echo "We do not support your current version of Ubuntu."
|
||
exit 1
|
||
fi
|
||
|
||
rm -f /etc/apt/sources.list.d/salt.list
|
||
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list
|
||
apt-get update
|
||
fi
|
||
}
|
||
|
||
verify_latest_update_script() {
|
||
# Check to see if the update scripts match. If not run the new one.
|
||
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
|
||
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
|
||
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
|
||
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
|
||
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
|
||
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-firewall | awk '{print $1}')
|
||
|
||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
|
||
echo "This version of the soup script is up to date. Proceeding."
|
||
else
|
||
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
cp $UPDATE_DIR/salt/common/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
|
||
echo ""
|
||
echo "soup has been updated. Please run soup again."
|
||
exit 0
|
||
fi
|
||
}
|
||
|
||
apply_hotfix() {
|
||
if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
|
||
fix_wazuh
|
||
elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
|
||
2_3_10_hotfix_1
|
||
else
|
||
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
|
||
fi
|
||
}
|
||
|
||
fix_wazuh() {
|
||
FILE="/nsm/wazuh/etc/ossec.conf"
|
||
echo "Detecting if $FILE needs corrected..."
|
||
if [ -f "$FILE" ]; then
|
||
if head -1 $FILE | grep -q "xml version"; then
|
||
echo "$FILE has an XML header; removing"
|
||
sed -i 1d $FILE
|
||
docker restart so-wazuh # cannot use so-wazuh-restart here because the salt-master service is stopped
|
||
else
|
||
echo "$FILE does not have an XML header, so no changes are necessary."
|
||
fi
|
||
else
|
||
echo "$FILE does not exist, so no changes are necessary."
|
||
fi
|
||
}
|
||
|
||
#upgrade salt to 3004.1
|
||
2_3_10_hotfix_1() {
|
||
systemctl_func "stop" "$cron_service_name"
|
||
# update mine items prior to stopping salt-minion and salt-master
|
||
update_salt_mine
|
||
stop_salt_minion
|
||
stop_salt_master
|
||
update_repo
|
||
# Does salt need upgraded. If so update it.
|
||
if [[ $UPGRADESALT -eq 1 ]]; then
|
||
echo "Upgrading Salt"
|
||
# Update the repo files so it can actually upgrade
|
||
upgrade_salt
|
||
fi
|
||
rm -f /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdbmod.py.patched /opt/so/state/influxdb_retention_policy.py.patched
|
||
systemctl_func "start" "salt-master"
|
||
salt-call state.apply salt.python3-influxdb -l info
|
||
systemctl_func "start" "salt-minion"
|
||
systemctl_func "start" "$cron_service_name"
|
||
|
||
}
|
||
|
||
main() {
|
||
trap 'check_err $?' EXIT
|
||
|
||
if [ -n "$BRANCH" ]; then
|
||
echo "SOUP will use the $BRANCH branch."
|
||
echo ""
|
||
fi
|
||
|
||
echo "### Preparing soup at $(date) ###"
|
||
echo ""
|
||
|
||
set_os
|
||
set_cron_service_name
|
||
if ! check_salt_master_status; then
|
||
echo "Could not talk to salt master"
|
||
echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
|
||
echo "SOUP will now attempt to start the salt-master service and exit."
|
||
exit 1
|
||
fi
|
||
|
||
echo "This node can communicate with the salt-master."
|
||
|
||
echo "Checking to see if this is a manager."
|
||
echo ""
|
||
require_manager
|
||
|
||
check_pillar_items
|
||
|
||
echo "Checking to see if this is an airgap install."
|
||
echo ""
|
||
check_airgap
|
||
if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then
|
||
echo "Missing file argument (-f <FILENAME>) for unattended airgap upgrade."
|
||
exit 0
|
||
fi
|
||
|
||
set_minionid
|
||
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
||
echo ""
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
# Let's mount the ISO since this is airgap
|
||
airgap_mounted
|
||
else
|
||
# if not airgap but -f was used
|
||
if [[ ! -z "$ISOLOC" ]]; then
|
||
airgap_mounted
|
||
AGDOCKER=/tmp/soagupdate/docker
|
||
fi
|
||
echo "Cloning Security Onion github repo into $UPDATE_DIR."
|
||
echo "Removing previous upgrade sources."
|
||
rm -rf $UPDATE_DIR
|
||
echo "Cloning the Security Onion Repo."
|
||
clone_to_tmp
|
||
fi
|
||
echo "Verifying we have the latest soup script."
|
||
verify_latest_update_script
|
||
es_version_check
|
||
es_indices_check
|
||
elastalert_indices_check
|
||
echo ""
|
||
set_palette
|
||
check_elastic_license
|
||
echo ""
|
||
check_local_mods
|
||
check_os_updates
|
||
|
||
echo "Generating new repo archive"
|
||
generate_and_clean_tarballs
|
||
if [ -f /usr/sbin/so-image-common ]; then
|
||
. /usr/sbin/so-image-common
|
||
else
|
||
add_common
|
||
fi
|
||
|
||
echo "Let's see if we need to update Security Onion."
|
||
upgrade_check
|
||
upgrade_space
|
||
|
||
echo "Checking for Salt Master and Minion updates."
|
||
upgrade_check_salt
|
||
set -e
|
||
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
update_centos_repo
|
||
yum clean all
|
||
check_os_updates
|
||
fi
|
||
|
||
if [ "$is_hotfix" == "true" ]; then
|
||
echo "Applying $HOTFIXVERSION hotfix"
|
||
copy_new_files
|
||
apply_hotfix
|
||
echo "Hotfix applied"
|
||
update_version
|
||
enable_highstate
|
||
salt-call state.highstate -l info queue=True
|
||
else
|
||
echo ""
|
||
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
|
||
echo ""
|
||
|
||
systemctl_func "stop" "$cron_service_name"
|
||
|
||
# update mine items prior to stopping salt-minion and salt-master
|
||
update_salt_mine
|
||
|
||
echo "Updating dockers to $NEWVERSION."
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
airgap_update_dockers
|
||
# if not airgap but -f was used
|
||
elif [[ ! -z "$ISOLOC" ]]; then
|
||
airgap_update_dockers
|
||
unmount_update
|
||
else
|
||
update_registry
|
||
set +e
|
||
update_docker_containers "soup"
|
||
set -e
|
||
fi
|
||
|
||
stop_salt_minion
|
||
|
||
stop_salt_master
|
||
|
||
update_repo
|
||
|
||
# Does salt need upgraded. If so update it.
|
||
if [[ $UPGRADESALT -eq 1 ]]; then
|
||
echo "Upgrading Salt"
|
||
# Update the repo files so it can actually upgrade
|
||
upgrade_salt
|
||
fi
|
||
|
||
preupgrade_changes
|
||
echo ""
|
||
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
echo "Updating Rule Files to the Latest."
|
||
update_airgap_rules
|
||
fi
|
||
|
||
# Only update the repo if its airgap
|
||
if [[ $is_airgap -eq 0 && $UPGRADESALT -ne 1 ]]; then
|
||
update_centos_repo
|
||
fi
|
||
|
||
echo ""
|
||
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
|
||
copy_new_files
|
||
echo ""
|
||
update_version
|
||
|
||
echo ""
|
||
echo "Locking down Salt Master for upgrade at $(date +"%T.%6N")."
|
||
masterlock
|
||
|
||
systemctl_func "start" "salt-master"
|
||
|
||
# Testing that salt-master is up by checking that is it connected to itself
|
||
set +e
|
||
echo "Waiting on the Salt Master service to be ready."
|
||
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
|
||
set -e
|
||
|
||
echo ""
|
||
echo "Ensuring python modules for Salt are installed and patched."
|
||
salt-call state.apply salt.python3-influxdb -l info queue=True
|
||
echo ""
|
||
|
||
# update the salt-minion configs here and start the minion
|
||
# since highstate are disabled above, minion start should not trigger a highstate
|
||
echo ""
|
||
echo "Ensuring salt-minion configs are up-to-date."
|
||
salt-call state.apply salt.minion -l info queue=True
|
||
echo ""
|
||
|
||
# Only regenerate osquery packages if Fleet is enabled
|
||
FLEET_MANAGER=$(lookup_pillar fleet_manager)
|
||
FLEET_NODE=$(lookup_pillar fleet_node)
|
||
if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
|
||
echo ""
|
||
echo "Regenerating Osquery Packages.... This will take several minutes."
|
||
salt-call state.apply fleet.event_gen-packages -l info queue=True
|
||
echo ""
|
||
fi
|
||
|
||
enable_highstate
|
||
|
||
echo ""
|
||
echo "Running a highstate. This could take several minutes."
|
||
set +e
|
||
salt-call state.highstate -l info queue=True
|
||
set -e
|
||
|
||
stop_salt_master
|
||
|
||
masterunlock
|
||
|
||
systemctl_func "start" "salt-master"
|
||
|
||
set +e
|
||
echo "Waiting on the Salt Master service to be ready."
|
||
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
|
||
set -e
|
||
|
||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||
salt-call state.highstate -l info queue=True
|
||
postupgrade_changes
|
||
[[ $is_airgap -eq 0 ]] && unmount_update
|
||
|
||
echo ""
|
||
echo "Upgrade to $NEWVERSION complete."
|
||
|
||
# Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error
|
||
set +e
|
||
|
||
echo "Checking the number of minions."
|
||
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
|
||
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
|
||
if [[ $is_airgap -eq 0 ]]; then
|
||
echo ""
|
||
echo "Cleaning repos on remote Security Onion nodes."
|
||
salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
|
||
echo ""
|
||
fi
|
||
fi
|
||
|
||
echo "Checking for local modifications."
|
||
check_local_mods skip-prompt
|
||
|
||
echo "Checking sudoers file."
|
||
check_sudoers
|
||
|
||
echo "Checking for necessary user migrations."
|
||
so-user migrate
|
||
|
||
systemctl_func "start" "$cron_service_name"
|
||
|
||
if [[ -n $lsl_msg ]]; then
|
||
case $lsl_msg in
|
||
'distributed')
|
||
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
|
||
echo " -> We recommend checking and adjusting the values as necessary."
|
||
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
|
||
;;
|
||
'single-node')
|
||
# We can assume the lsl_details array has been set if lsl_msg has this value
|
||
echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
|
||
echo " -> We recommend checking and adjusting the value as necessary."
|
||
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
|
||
;;
|
||
esac
|
||
fi
|
||
|
||
if [[ $NUM_MINIONS -gt 1 ]]; then
|
||
|
||
cat << EOF
|
||
|
||
|
||
|
||
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
|
||
|
||
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
||
|
||
If it looks like you’re missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
|
||
|
||
For more information, please see https://docs.securityonion.net/en/2.3/soup.html#distributed-deployments.
|
||
|
||
EOF
|
||
|
||
fi
|
||
fi
|
||
|
||
if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then
|
||
|
||
cat << EOF
|
||
|
||
|
||
A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80.
|
||
|
||
If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at https://docs.securityonion.net/en/2.3/elasticsearch.html.
|
||
|
||
EOF
|
||
|
||
fi
|
||
|
||
echo "### soup has been served at $(date) ###"
|
||
}
|
||
|
||
while getopts ":b:f:y" opt; do
|
||
case ${opt} in
|
||
b )
|
||
BATCHSIZE="$OPTARG"
|
||
if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then
|
||
echo "Batch size must be a number greater than 0."
|
||
exit 1
|
||
fi
|
||
;;
|
||
y )
|
||
if [[ ! -f /opt/so/state/yeselastic.txt ]]; then
|
||
echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
|
||
exit 1
|
||
else
|
||
UNATTENDED=true
|
||
fi
|
||
;;
|
||
f )
|
||
ISOLOC="$OPTARG"
|
||
;;
|
||
\? )
|
||
echo "Usage: soup [-b] [-y] [-f <iso location>]"
|
||
exit 1
|
||
;;
|
||
: )
|
||
echo "Invalid option: $OPTARG requires an argument"
|
||
exit 1
|
||
;;
|
||
esac
|
||
done
|
||
shift $((OPTIND - 1))
|
||
|
||
if [[ -z $UNATTENDED ]]; then
|
||
cat << EOF
|
||
|
||
SOUP - Security Onion UPdater
|
||
|
||
Please review the following for more information about the update process and recent updates:
|
||
https://docs.securityonion.net/soup
|
||
https://blog.securityonion.net
|
||
|
||
EOF
|
||
|
||
cat << EOF
|
||
Press Enter to continue or Ctrl-C to cancel.
|
||
EOF
|
||
|
||
read -r input
|
||
fi
|
||
|
||
main "$@" | tee -a $SOUP_LOG
|