Update to merge in 2.4/dev

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
This commit is contained in:
reyesj2
2023-11-29 13:41:23 -05:00
196 changed files with 13941 additions and 21237 deletions

View File

@@ -16,6 +16,7 @@ include:
- kibana.secrets
- manager.sync_es_users
- manager.elasticsearch
- manager.kibana
repo_log_dir:
file.directory:
@@ -26,6 +27,15 @@ repo_log_dir:
- user
- group
yara_log_dir:
file.directory:
- name: /opt/so/log/yarasync
- user: socore
- group: socore
- recurse:
- user
- group
repo_conf_dir:
file.directory:
- name: /opt/so/conf/reposync
@@ -51,22 +61,26 @@ manager_sbin:
- user: 939
- group: 939
- file_mode: 755
- exclude_pat:
- "*_test.py"
#manager_sbin_jinja:
# file.recurse:
# - name: /usr/sbin
# - source: salt://manager/tools/sbin_jinja
# - user: 939
# - group: 939
# - file_mode: 755
# - template: jinja
yara_update_scripts:
file.recurse:
- name: /usr/sbin/
- source: salt://manager/tools/sbin_jinja/
- user: socore
- group: socore
- file_mode: 755
- template: jinja
- defaults:
EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }}
so-repo-sync:
{% if MANAGERMERGED.reposync.enabled %}
{% if MANAGERMERGED.reposync.enabled %}
cron.present:
{% else %}
{% else %}
cron.absent:
{% endif %}
{% endif %}
- user: socore
- name: '/usr/sbin/so-repo-sync >> /opt/so/log/reposync/reposync.log 2>&1'
- identifier: so-repo-sync
@@ -82,7 +96,15 @@ socore_own_saltstack:
- user
- group
{% if STRELKAMERGED.rules.enabled %}
rules_dir:
file.directory:
- name: /nsm/rules/yara
- user: socore
- group: socore
- makedirs: True
{% if STRELKAMERGED.rules.enabled %}
strelkarepos:
file.managed:
- name: /opt/so/conf/strelka/repos.txt
@@ -91,67 +113,45 @@ strelkarepos:
- defaults:
STRELKAREPOS: {{ STRELKAMERGED.rules.repos }}
- makedirs: True
{% endif %}
yara_update_scripts:
file.recurse:
- name: /usr/sbin/
- source: salt://manager/tools/sbin_jinja/
- user: socore
- group: socore
- file_mode: 755
- template: jinja
- defaults:
EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }}
rules_dir:
file.directory:
- name: /nsm/rules/yara
- user: socore
- group: socore
- makedirs: True
{% if GLOBALS.airgap %}
remove_strelka-yara-download:
cron.absent:
- user: socore
- identifier: strelka-yara-download
strelka-yara-update:
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
cron.present:
{% else %}
cron.absent:
{% endif %}
- user: socore
- name: '/usr/sbin/so-yara-update >> /nsm/strelka/log/yara-update.log 2>&1'
- name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1'
- identifier: strelka-yara-update
- hour: '7'
- minute: '1'
strelka-yara-download:
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
cron.present:
{% else %}
cron.absent:
{% endif %}
- user: socore
- name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1'
- identifier: strelka-yara-download
- hour: '7'
- minute: '1'
{% if not GLOBALS.airgap %}
update_yara_rules:
cmd.run:
- name: /usr/sbin/so-yara-update
- onchanges:
- file: yara_update_scripts
{% else %}
remove_strelka-yara-update:
cron.absent:
- user: socore
- identifier: strelka-yara-update
strelka-yara-download:
cron.present:
- user: socore
- name: '/usr/sbin/so-yara-download >> /nsm/strelka/log/yara-download.log 2>&1'
- identifier: strelka-yara-download
- hour: '7'
- minute: '1'
download_yara_rules:
cmd.run:
- name: /usr/sbin/so-yara-download
- onchanges:
- file: yara_update_scripts
{% endif %}
{% endif %}
{% endif %}
{% else %}
{{sls}}_state_not_allowed:

8
salt/manager/kibana.sls Normal file
View File

@@ -0,0 +1,8 @@
kibana_curl_config_distributed:
file.managed:
- name: /opt/so/conf/kibana/curl.config
- source: salt://kibana/files/curl.config.template
- template: jinja
- mode: 600
- show_changes: False
- makedirs: True

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
echo ""
echo "Hosts/Networks that have access to login to the Security Onion Console:"
so-firewall includedhosts analyst

View File

@@ -1,19 +1,9 @@
#!/usr/bin/env python3
# Copyright 2014-2023 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import os
import subprocess

View File

@@ -187,15 +187,9 @@ function add_logstash_to_minion() {
# Security Onion Desktop
function add_desktop_to_minion() {
printf '%s\n'\
"host:"\
" mainint: '$MNIC'"\
"desktop:"\
" gui:"\
" enabled: true"\
"sensoroni:"\
" enabled: True"\
" config:"\
" node_description: '${NODE_DESCRIPTION//\'/''}'" >> $PILLARFILE
" enabled: true"\ >> $PILLARFILE
}
# Add basic host info to the minion file
@@ -245,6 +239,10 @@ function add_sensor_to_minion() {
echo " threads: '$CORECOUNT'" >> $PILLARFILE
echo "pcap:" >> $PILLARFILE
echo " enabled: True" >> $PILLARFILE
if [[ $is_pcaplimit ]]; then
echo " config:" >> $PILLARFILE
echo " diskfreepercentage: 60" >> $PILLARFILE
fi
echo " " >> $PILLARFILE
}
@@ -408,13 +406,19 @@ function update_logstash_outputs() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
function checkMine() {
local func=$1
# make sure the minion sees itself in the mine since it needs to see itself for states as opposed to using salt-run
retry 20 1 "salt '$MINION_ID' mine.get '\*' '$func'" "$MINION_ID"
}
function updateMine() {
salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC"
}
function apply_ES_state() {
salt-call state.apply elasticsearch concurrent=True
retry 20 1 "salt '$MINION_ID' mine.update" True
}
function createEVAL() {
is_pcaplimit=true
add_elasticsearch_to_minion
add_sensor_to_minion
add_strelka_to_minion
@@ -435,6 +439,7 @@ function createEVAL() {
}
function createSTANDALONE() {
is_pcaplimit=true
add_elasticsearch_to_minion
add_logstash_to_minion
add_sensor_to_minion
@@ -526,8 +531,9 @@ function createIDH() {
}
function createHEAVYNODE() {
is_pcaplimit=true
add_elasticsearch_to_minion
add_elastic_agent_to_minion
add_elastic_agent_to_minion
add_logstash_to_minion
add_sensor_to_minion
add_strelka_to_minion
@@ -546,8 +552,6 @@ function createSEARCHNODE() {
add_elasticsearch_to_minion
add_logstash_to_minion
add_telegraf_to_minion
updateMine
apply_ES_state
}
function createRECEIVER() {
@@ -561,7 +565,25 @@ function createKAFKANODE() {
# add_telegraf_to_minion
}
function createDESKTOP() {
add_desktop_to_minion
add_telegraf_to_minion
}
function testConnection() {
# the minion should be trying to auth every 10 seconds so 15 seconds should be more than enough time to see this in the log
# this retry was put in because it is possible that a minion is attempted to be pinged before it has authenticated and connected to the Salt master
# causing the first ping to fail and typically wouldn't be successful until the second ping
# this check may pass without the minion being authenticated if it was previously connected and the line exists in the log
retry 15 1 "grep 'Authentication accepted from $MINION_ID' /opt/so/log/salt/master"
local retauth=$?
if [[ $retauth != 0 ]]; then
echo "The Minion did not authenticate with the Salt master in the allotted time"
echo "Deleting the key"
deleteminion
exit 1
fi
retry 15 3 "salt '$MINION_ID' test.ping" True
local ret=$?
if [[ $ret != 0 ]]; then
@@ -581,9 +603,9 @@ if [[ "$OPERATION" = 'delete' ]]; then
deleteminion
fi
if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
if [[ "$OPERATION" == 'add' || "$OPERATION" == 'setup' ]]; then
# Skip this if its setup
if [ $OPERATION != 'setup' ]; then
if [[ $OPERATION == 'add' ]]; then
# Accept the salt key
acceptminion
# Test to see if the minion was accepted
@@ -604,8 +626,26 @@ if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
else
add_sensoroni_to_minion
fi
create$NODETYPE
echo "Minion file created for $MINION_ID"
if [[ "$OPERATION" == 'add' ]]; then
# tell the minion to populate the mine with data from mine_functions which is populated during setup
# this only needs to happen on non managers since they handle this during setup
# and they need to wait for ca creation to update the mine
updateMine
checkMine "network.ip_addrs"
# apply the elasticsearch state to the manager if a new searchnode was added
if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then
# calls so-common and set_minionid sets MINIONID to local minion id
set_minionid
salt $MINIONID state.apply elasticsearch queue=True --async
salt $MINIONID state.apply soc queue=True --async
fi
# run this async so the cli doesn't wait for a return
salt "$MINION_ID" state.highstate --async queue=True
fi
fi
if [[ "$OPERATION" = 'test' ]]; then

View File

@@ -11,6 +11,8 @@ set_version
set_os
salt_minion_count
set -e
curl --retry 5 --retry-delay 60 -A "reposync/$VERSION/$OS/$(uname -r)/$MINIONCOUNT" https://sigs.securityonion.net/checkup --output /tmp/checkup
dnf reposync --norepopath -g --delete -m -c /opt/so/conf/reposync/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/
createrepo /nsm/repo
createrepo /nsm/repo

View File

@@ -235,8 +235,8 @@ function updatePassword() {
# Update DB with new hash
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB), created_at=datetime('now'), updated_at=datetime('now') where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='password');" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
# Deactivate MFA
echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
[[ $? != 0 ]] && fail "Unable to update password"
fi
}
@@ -341,14 +341,19 @@ function syncElastic() {
" and ic.identity_id=i.id " \
" and ict.id=ic.identity_credential_type_id " \
" and ict.name='password' " \
" and instr(ic.config, 'hashed_password') " \
" and i.state == 'active' " \
"order by ici.identifier;" | \
sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath")
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
echo "${userData}" | \
jq -r '.user + ":" + .data.hashed_password' \
>> "$usersTmpFile"
user_data_formatted=$(echo "${userData}" | jq -r '.user + ":" + .data.hashed_password')
if lookup_salt_value "licensed_features" "" "pillar" | grep -x oidc; then
# generate random placeholder salt/hash for users without passwords
random_crypt=$(get_random_value 53)
user_data_formatted=$(echo "${user_data_formatted}" | sed -r "s/^(.+:)\$/\\1\$2a\$12${random_crypt}/")
fi
echo "${user_data_formatted}" >> "$usersTmpFile"
# Append the user roles
while IFS="" read -r rolePair || [ -n "$rolePair" ]; do

View File

@@ -0,0 +1,103 @@
#!/usr/bin/env python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import os
import sys
import time
import yaml
lockFile = "/tmp/so-yaml.lock"
def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:')
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
print(' help - Prints this usage information.')
print('')
print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
sys.exit(1)
def loadYaml(filename):
file = open(filename, "r")
content = file.read()
return yaml.safe_load(content)
def writeYaml(filename, content):
file = open(filename, "w")
return yaml.dump(content, file)
def removeKey(content, key):
pieces = key.split(".", 1)
if len(pieces) > 1:
removeKey(content[pieces[0]], pieces[1])
else:
content.pop(key, None)
def remove(args):
if len(args) != 2:
print('Missing filename or key arg', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
content = loadYaml(filename)
removeKey(content, key)
writeYaml(filename, content)
return 0
def main():
args = sys.argv[1:]
if len(args) < 1:
showUsage(None)
return
commands = {
"help": showUsage,
"remove": remove,
}
code = 1
try:
lockAttempts = 0
maxAttempts = 30
while lockAttempts < maxAttempts:
lockAttempts = lockAttempts + 1
try:
f = open(lockFile, "x")
f.close()
break
except Exception:
if lockAttempts == 1:
print("Waiting for lock file to be released from another process...")
time.sleep(2)
if lockAttempts == maxAttempts:
print("Lock file (" + lockFile + ") could not be created; proceeding without lock.")
cmd = commands.get(args[0], showUsage)
code = cmd(args[1:])
finally:
if os.path.exists(lockFile):
os.remove(lockFile)
sys.exit(code)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,107 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
import unittest
import importlib
soyaml = importlib.import_module("so-yaml")
class TestRemove(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd"]
soyaml.main()
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_main_help_locked(self):
filename = "/tmp/so-yaml.lock"
file = open(filename, "w")
file.write = "fake lock file"
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('time.sleep', new=MagicMock()) as mock_sleep:
sys.argv = ["cmd", "help"]
soyaml.main()
sysmock.assert_called()
mock_sleep.assert_called_with(2)
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_main_help(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.main()
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_remove(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false}")
file.close()
soyaml.remove([filename, "key1"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key2: false\n"
self.assertEqual(actual, expected)
def test_remove_nested(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false}")
file.close()
soyaml.remove([filename, "key1.child2"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\nkey2: false\n"
self.assertEqual(actual, expected)
def test_remove_nested_deep(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: ab } }, key2: false}")
file.close()
soyaml.remove([filename, "key1.child2.deep1"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep2: ab\nkey2: false\n"
self.assertEqual(actual, expected)
def test_remove_missing_args(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false}")
file.close()
soyaml.remove([filename])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "{key1: { child1: 123, child2: abc }, key2: false}"
self.assertEqual(actual, expected)
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")

View File

@@ -171,6 +171,13 @@ airgap_update_dockers() {
fi
}
backup_old_states_pillars() {
tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_default_states_pillars.tar.gz /opt/so/saltstack/default/
tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_local_states_pillars.tar.gz /opt/so/saltstack/local/
}
update_registry() {
docker stop so-dockerregistry
docker rm so-dockerregistry
@@ -303,6 +310,7 @@ check_log_size_limit() {
check_os_updates() {
# Check to see if there are OS updates
echo "Checking for OS updates."
NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
OSUPDATES=$(dnf -q list updates | grep -v docker | grep -v containerd | grep -v salt | grep -v Available | wc -l)
if [[ "$OSUPDATES" -gt 0 ]]; then
@@ -395,6 +403,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5
[[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10
[[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
[[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
true
}
@@ -406,7 +415,8 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
[[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
true
}
@@ -421,8 +431,7 @@ post_to_2.4.4() {
}
post_to_2.4.5() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
echo "Nothing to apply"
POSTVERSION=2.4.5
}
@@ -433,10 +442,29 @@ post_to_2.4.10() {
}
post_to_2.4.20() {
echo "Nothing to apply"
echo "Pruning unused docker volumes on all nodes - This process will run in the background."
salt --async \* cmd.run "docker volume prune -f"
POSTVERSION=2.4.20
}
post_to_2.4.30() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
salt-call state.apply ca queue=True
stop_salt_minion
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion"
salt-call state.apply nginx queue=True
enable_highstate
POSTVERSION=2.4.30
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
}
stop_salt_master() {
# kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
set +e
@@ -446,7 +474,6 @@ stop_salt_master() {
echo ""
echo "Killing any queued Salt jobs on the manager."
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
set -e
echo ""
echo "Storing salt-master pid."
@@ -454,6 +481,7 @@ stop_salt_master() {
echo "Found salt-master PID $MASTERPID"
systemctl_func "stop" "salt-master"
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
set -e
}
stop_salt_minion() {
@@ -466,14 +494,12 @@ stop_salt_minion() {
echo ""
echo "Killing Salt jobs on this node."
salt-call saltutil.kill_all_jobs --local
set -e
echo "Storing salt-minion pid."
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
echo "Found salt-minion PID $MINIONPID"
systemctl_func "stop" "salt-minion"
set +e
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
set -e
}
@@ -492,7 +518,7 @@ up_to_2.4.4() {
}
up_to_2.4.5() {
determine_elastic_agent_upgrade
echo "Nothing to do for 2.4.5"
INSTALLEDVERSION=2.4.5
}
@@ -509,6 +535,23 @@ up_to_2.4.20() {
INSTALLEDVERSION=2.4.20
}
up_to_2.4.30() {
# Remove older defend integration json & installed integration
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
. $UPDATE_DIR/salt/elasticfleet/tools/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
rm -f /opt/so/state/eaintegrations.txt
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
rm -f /opt/so/state/estemplates*.txt
INSTALLEDVERSION=2.4.30
}
determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap
@@ -554,7 +597,7 @@ update_airgap_rules() {
rsync -av $UPDATE_DIR/agrules/* /nsm/repo/rules/
}
update_centos_repo() {
update_airgap_repo() {
# Update the files in the repo
echo "Syncing new updates to /nsm/repo"
rsync -av $AGREPO/* /nsm/repo/
@@ -564,9 +607,9 @@ update_centos_repo() {
}
update_salt_mine() {
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host."
echo "Populating the mine with mine_functions for each host."
set +e
salt \* cmd.run cmd='MAININT=$(salt-call pillar.get host:mainint --out=newline_values_only) && salt-call mine.send name=network.ip_addrs interface="$MAININT"'
salt \* mine.update -b 50
set -e
}
@@ -606,6 +649,7 @@ upgrade_check_salt() {
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
echo "Salt needs to be upgraded to $NEWSALTVERSION."
UPGRADESALT=1
fi
}
@@ -614,22 +658,48 @@ upgrade_salt() {
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [[ $OS == 'centos' ]]; then
# If rhel family
if [[ $is_rpm ]]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if oracle run with -r to ignore repos set by bootstrap
if [[ $OS == 'oracle' ]]; then
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
set -e
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
echo "Checking if Salt was upgraded."
@@ -641,7 +711,7 @@ upgrade_salt() {
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 0
exit 1
else
echo "Salt upgrade success."
echo ""
@@ -677,13 +747,31 @@ verify_latest_update_script() {
# Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() {
# if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
# fix_wazuh
# elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
# 2_3_10_hotfix_1
# else
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
salt-call state.apply elasticfleet -l info queue=True
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
elif [[ "$INSTALLEDVERSION" == "2.4.30" ]] ; then
if [[ -f /etc/pki/managerssl.key.old ]]; then
echo "Skipping Certificate Generation"
else
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
so-kibana-restart --force
so-kibana-api-check
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
rm -f /opt/so/state/eaintegrations.txt
salt-call state.apply ca queue=True
stop_salt_minion
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion"
fi
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
# fi
fi
}
@@ -719,14 +807,8 @@ main() {
echo ""
set_os
if ! check_salt_master_status; then
echo "Could not talk to salt master"
echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "SOUP will now attempt to start the salt-master service and exit."
exit 1
fi
echo "This node can communicate with the salt-master."
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "Checking to see if this is a manager."
echo ""
@@ -762,9 +844,7 @@ main() {
fi
echo "Verifying we have the latest soup script."
verify_latest_update_script
echo "Checking for OS updates."
check_os_updates
echo "Let's see if we need to update Security Onion."
upgrade_check
upgrade_space
@@ -774,18 +854,29 @@ main() {
set -e
if [[ $is_airgap -eq 0 ]]; then
yum clean all
update_airgap_repo
dnf clean all
check_os_updates
elif [[ $OS == 'oracle' ]]; then
# sync remote repo down to local if not airgap
repo_sync
dnf clean all
check_os_updates
fi
if [ "$is_hotfix" == "true" ]; then
echo "Applying $HOTFIXVERSION hotfix"
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
if [[ ! "$MINIONID" =~ "_import" ]]; then
backup_old_states_pillars
fi
copy_new_files
apply_hotfix
echo "Hotfix applied"
update_version
enable_highstate
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
else
echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
@@ -806,7 +897,7 @@ main() {
else
update_registry
set +e
update_docker_containers "soup" "" "" "$SOUP_LOG"
update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
set -e
fi
@@ -821,6 +912,14 @@ main() {
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
# for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt
# * WARN: Not starting daemons on Debian based distributions
# is not working mostly because starting them is the default behaviour.
if [[ $is_deb ]]; then
stop_salt_minion
stop_salt_master
fi
fi
preupgrade_changes
@@ -831,9 +930,11 @@ main() {
update_airgap_rules
fi
# Only update the repo if its airgap
if [[ $is_airgap -eq 0 && $UPGRADESALT -ne 1 ]]; then
update_centos_repo
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
if [[ ! "$MINIONID" =~ "_import" ]]; then
echo ""
echo "Creating snapshots of default and local Salt states and pillars and saving to /nsm/backup/"
backup_old_states_pillars
fi
echo ""
@@ -851,7 +952,7 @@ main() {
# Testing that salt-master is up by checking that is it connected to itself
set +e
echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
# update the salt-minion configs here and start the minion
@@ -876,7 +977,8 @@ main() {
echo ""
echo "Running a highstate. This could take several minutes."
set +e
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
set -e
stop_salt_master
@@ -887,11 +989,12 @@ main() {
set +e
echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
postupgrade_changes
[[ $is_airgap -eq 0 ]] && unmount_update

View File

@@ -0,0 +1,96 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
. /usr/sbin/so-common
require_manager
# Inform user we are about to remove Elastic Fleet data
echo
echo "This script will remove the current Elastic Fleet install and all of its data and then rerun Elastic Fleet setup."
echo "This includes data previously ingested with Fleet such as Zeek and Suricata logs."
echo "Deployed Elastic Agents will no longer be enrolled and will need to be reinstalled."
echo "This script should only be used as a last resort to reinstall Elastic Fleet."
echo
echo "If you would like to proceed, type AGREE and hit ENTER."
echo
# Read user input
read INPUT
if [ "${INPUT^^}" != 'AGREE' ]; then exit 0; fi
status "Uninstalling all Elastic Agents on all Grid Nodes..."
salt \* cmd.run "elastic-agent uninstall -f" queue=True
status "Stopping Fleet Container..."
so-elastic-fleet-stop --force
status "Deleting Fleet Data from Pillars..."
so-yaml.py remove /opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls elasticfleet
sed -i "/fleet_grid_enrollment_token_general.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls
sed -i "/fleet_grid_enrollment_token_heavy.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls
status "Deleting Elastic Fleet data..."
# Check to make sure that Elasticsearch is up & ready
RETURN_CODE=0
wait_for_web_response "https://localhost:9200/_cat/indices/.kibana*" "green open" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
status "Elasticsearch not accessible, exiting script..."
exit 1
fi
ALIASES=".fleet-servers .fleet-policies-leader .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
for ALIAS in ${ALIASES}
do
# Get all concrete indices from alias
INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
# Delete all resolved indices
for INDX in ${INDXS}
do
status "Deleting $INDX"
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
done
done
status "Deleting Fleet-related Data Streams..."
DATASTREAMS="logs-suricata-so","logs-kratos-so","logs-soc-so","logs-zeek-so"
JSON_STRING=$( jq -n \
--arg DATASTREAMLIST "$DATASTREAMS" \
'{"dataStreams":[$DATASTREAMLIST]}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/index_management/delete_data_streams" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
status "Restarting Kibana..."
so-kibana-restart --force
status "Checking to make sure that Kibana API is up & ready..."
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
status "Kibana API not accessible, exiting script..."
exit 1
fi
status "Removing Integrations State File..."
rm -f /opt/so/state/eaintegrations.txt
status "Starting Elastic Fleet Setup..."
so-elastic-fleet-setup
status "Re-installing Elastic Agent on all Grid Nodes..."
salt \* state.apply elasticfleet.install_agent_grid queue=True
status "Elastic Fleet Reset complete...."