Merge branch '2.4/dev' into kilo

This commit is contained in:
Jason Ertel
2023-10-25 09:04:36 -04:00
40 changed files with 2882 additions and 354 deletions

View File

@@ -406,12 +406,17 @@ function update_logstash_outputs() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
function checkMine() {
local func=$1
# make sure the minion sees itself in the mine since it needs to see itself for states as opposed to using salt-run
retry 20 1 "salt '$MINION_ID' mine.get '\*' '$func'" "$MINION_ID"
}
function updateMine() {
salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC"
}
function apply_ES_state() {
salt-call state.apply elasticsearch concurrent=True
retry 20 1 "salt '$MINION_ID' mine.update" True
}
function createEVAL() {
is_pcaplimit=true
add_elasticsearch_to_minion
@@ -547,8 +552,6 @@ function createSEARCHNODE() {
add_elasticsearch_to_minion
add_logstash_to_minion
add_telegraf_to_minion
updateMine
apply_ES_state
}
function createRECEIVER() {
@@ -563,6 +566,19 @@ function createDESKTOP() {
}
function testConnection() {
# the minion should be trying to auth every 10 seconds so 15 seconds should be more than enough time to see this in the log
# this retry was put in because it is possible that a minion is attempted to be pinged before it has authenticated and connected to the Salt master
# causing the first ping to fail and typically wouldn't be successful until the second ping
# this check may pass without the minion being authenticated if it was previously connected and the line exists in the log
retry 15 1 "grep 'Authentication accepted from $MINION_ID' /opt/so/log/salt/master"
local retauth=$?
if [[ $retauth != 0 ]]; then
echo "The Minion did not authenticate with the Salt master in the allotted time"
echo "Deleting the key"
deleteminion
exit 1
fi
retry 15 3 "salt '$MINION_ID' test.ping" True
local ret=$?
if [[ $ret != 0 ]]; then
@@ -582,9 +598,9 @@ if [[ "$OPERATION" = 'delete' ]]; then
deleteminion
fi
if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
if [[ "$OPERATION" == 'add' || "$OPERATION" == 'setup' ]]; then
# Skip this if its setup
if [ $OPERATION != 'setup' ]; then
if [[ $OPERATION == 'add' ]]; then
# Accept the salt key
acceptminion
# Test to see if the minion was accepted
@@ -605,8 +621,19 @@ if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
else
add_sensoroni_to_minion
fi
create$NODETYPE
echo "Minion file created for $MINION_ID"
if [[ "$OPERATION" == 'add' ]]; then
# tell the minion to populate the mine with data from mine_functions which is populated during setup
# this only needs to happen on non managers since they handle this during setup
# and they need to wait for ca creation to update the mine
updateMine
checkMine "network.ip_addrs"
# run this async so the cli doesn't wait for a return
salt "$MINION_ID" state.highstate --async
fi
fi
if [[ "$OPERATION" = 'test' ]]; then

View File

@@ -460,7 +460,6 @@ stop_salt_master() {
echo ""
echo "Killing any queued Salt jobs on the manager."
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
set -e
echo ""
echo "Storing salt-master pid."
@@ -468,6 +467,7 @@ stop_salt_master() {
echo "Found salt-master PID $MASTERPID"
systemctl_func "stop" "salt-master"
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
set -e
}
stop_salt_minion() {
@@ -480,14 +480,12 @@ stop_salt_minion() {
echo ""
echo "Killing Salt jobs on this node."
salt-call saltutil.kill_all_jobs --local
set -e
echo "Storing salt-minion pid."
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
echo "Found salt-minion PID $MINIONPID"
systemctl_func "stop" "salt-minion"
set +e
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
set -e
}
@@ -578,7 +576,7 @@ update_centos_repo() {
}
update_salt_mine() {
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host."
echo "Populating the mine with mine_functions for each host."
set +e
salt \* mine.update -b 50
set -e
@@ -620,6 +618,7 @@ upgrade_check_salt() {
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
echo "Salt needs to be upgraded to $NEWSALTVERSION."
UPGRADESALT=1
fi
}
@@ -628,22 +627,48 @@ upgrade_salt() {
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [[ $OS == 'centos' ]]; then
# If rhel family
if [[ $is_rpm ]]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if oracle run with -r to ignore repos set by bootstrap
if [[ $OS == 'oracle' ]]; then
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
set -e
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
echo "Checking if Salt was upgraded."
@@ -655,7 +680,7 @@ upgrade_salt() {
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 0
exit 1
else
echo "Salt upgrade success."
echo ""
@@ -691,13 +716,16 @@ verify_latest_update_script() {
# Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() {
# if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
# fix_wazuh
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
salt-call state.apply elasticfleet -l info queue=True
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
# 2_3_10_hotfix_1
# else
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
# fi
fi
}
@@ -733,14 +761,8 @@ main() {
echo ""
set_os
if ! check_salt_master_status; then
echo "Could not talk to salt master"
echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "SOUP will now attempt to start the salt-master service and exit."
exit 1
fi
echo "This node can communicate with the salt-master."
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "Checking to see if this is a manager."
echo ""
@@ -788,7 +810,7 @@ main() {
if [[ $is_airgap -eq 0 ]]; then
yum clean all
check_os_updates
elif [[ $OS == 'oel' ]]; then
elif [[ $OS == 'oracle' ]]; then
# sync remote repo down to local if not airgap
repo_sync
check_os_updates
@@ -805,7 +827,8 @@ main() {
echo "Hotfix applied"
update_version
enable_highstate
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
else
echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
@@ -826,7 +849,7 @@ main() {
else
update_registry
set +e
update_docker_containers "soup" "" "" "$SOUP_LOG"
update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
set -e
fi
@@ -841,6 +864,14 @@ main() {
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
# for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt
# * WARN: Not starting daemons on Debian based distributions
# is not working mostly because starting them is the default behaviour.
if [[ $is_deb ]]; then
stop_salt_minion
stop_salt_master
fi
fi
preupgrade_changes
@@ -878,7 +909,7 @@ main() {
# Testing that salt-master is up by checking that is it connected to itself
set +e
echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
# update the salt-minion configs here and start the minion
@@ -903,7 +934,8 @@ main() {
echo ""
echo "Running a highstate. This could take several minutes."
set +e
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
set -e
stop_salt_master
@@ -914,11 +946,12 @@ main() {
set +e
echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
postupgrade_changes
[[ $is_airgap -eq 0 ]] && unmount_update