Merge remote-tracking branch 'remotes/origin/dev' into pipeline

This commit is contained in:
Mike Reeves
2021-05-24 09:14:03 -04:00
37 changed files with 8517 additions and 387 deletions

0
HOTFIX Normal file
View File

View File

@@ -1,6 +1,6 @@
## Security Onion 2.3.50
## Security Onion 2.3.51
Security Onion 2.3.50 is here!
Security Onion 2.3.51 is here!
## Screenshots

View File

@@ -1,17 +1,17 @@
### 2.3.50 ISO image built on 2021/04/27
### 2.3.51 ISO image built on 2021/04/27
### Download and Verify
2.3.50 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.50.iso
2.3.51 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.51.iso
MD5: C39CEA68B5A8AFC5CFFB2481797C0374
SHA1: 00AD9F29ABE3AB495136989E62EBB8FA00DA82C6
SHA256: D77AE370D7863837A989F6735413D1DD46B866D8D135A4C363B0633E3990387E
MD5: 7CFB525BEFC0A9F2ED148F5831E387FA
SHA1: 8CC34FCCC36822B309B8168AA706B3D1EC7F3BFD
SHA256: 9892C2546C9AE5A48015160F379B070F0BE30C89693B97F3F1E1592DDCE1DEE0
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.50.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.51.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.50.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.51.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.50.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.51.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.3.50.iso.sig securityonion-2.3.50.iso
gpg --verify securityonion-2.3.51.iso.sig securityonion-2.3.51.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Tue 27 Apr 2021 02:17:25 PM EDT using RSA key ID FE507013
gpg: Signature made Thu 20 May 2021 07:49:57 AM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1,208 +0,0 @@
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
{% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %}
{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
{% set ZEEKVER = salt['pillar.get']('global:mdengine', 'COMMUNITY') %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
eval:
containers:
- so-nginx
- so-telegraf
{% if GRAFANA == '1' %}
- so-influxdb
- so-grafana
{% endif %}
- so-dockerregistry
- so-soc
- so-kratos
- so-idstools
{% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
{% endif %}
- so-elasticsearch
- so-logstash
- so-kibana
- so-steno
- so-suricata
- so-zeek
- so-curator
- so-elastalert
{% if WAZUH != '0' %}
- so-wazuh
{% endif %}
- so-soctopus
{% if THEHIVE != '0' %}
- so-thehive
- so-thehive-es
- so-cortex
{% endif %}
{% if PLAYBOOK != '0' %}
- so-playbook
{% endif %}
{% if FREQSERVER != '0' %}
- so-freqserver
{% endif %}
{% if DOMAINSTATS != '0' %}
- so-domainstats
{% endif %}
heavy_node:
containers:
- so-nginx
- so-telegraf
- so-redis
- so-logstash
- so-elasticsearch
- so-curator
- so-steno
- so-suricata
- so-wazuh
- so-filebeat
{% if ZEEKVER != 'SURICATA' %}
- so-zeek
{% endif %}
helix:
containers:
- so-nginx
- so-telegraf
- so-idstools
- so-steno
- so-zeek
- so-redis
- so-logstash
- so-filebeat
hot_node:
containers:
- so-nginx
- so-telegraf
- so-logstash
- so-elasticsearch
- so-curator
manager_search:
containers:
- so-nginx
- so-telegraf
- so-soc
- so-kratos
- so-acng
- so-idstools
- so-redis
- so-logstash
- so-elasticsearch
- so-curator
- so-kibana
- so-elastalert
- so-filebeat
- so-soctopus
{% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
{% endif %}
{% if WAZUH != '0' %}
- so-wazuh
{% endif %}
- so-soctopus
{% if THEHIVE != '0' %}
- so-thehive
- so-thehive-es
- so-cortex
{% endif %}
{% if PLAYBOOK != '0' %}
- so-playbook
{% endif %}
{% if FREQSERVER != '0' %}
- so-freqserver
{% endif %}
{% if DOMAINSTATS != '0' %}
- so-domainstats
{% endif %}
manager:
containers:
- so-dockerregistry
- so-nginx
- so-telegraf
{% if GRAFANA == '1' %}
- so-influxdb
- so-grafana
{% endif %}
- so-soc
- so-kratos
- so-acng
- so-idstools
- so-redis
- so-elasticsearch
- so-logstash
- so-kibana
- so-elastalert
- so-filebeat
{% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
{% endif %}
{% if WAZUH != '0' %}
- so-wazuh
{% endif %}
- so-soctopus
{% if THEHIVE != '0' %}
- so-thehive
- so-thehive-es
- so-cortex
{% endif %}
{% if PLAYBOOK != '0' %}
- so-playbook
{% endif %}
{% if FREQSERVER != '0' %}
- so-freqserver
{% endif %}
{% if DOMAINSTATS != '0' %}
- so-domainstats
{% endif %}
parser_node:
containers:
- so-nginx
- so-telegraf
- so-logstash
search_node:
containers:
- so-nginx
- so-telegraf
- so-logstash
- so-elasticsearch
- so-curator
- so-filebeat
{% if WAZUH != '0' %}
- so-wazuh
{% endif %}
sensor:
containers:
- so-nginx
- so-telegraf
- so-steno
- so-suricata
{% if ZEEKVER != 'SURICATA' %}
- so-zeek
{% endif %}
- so-wazuh
- so-filebeat
warm_node:
containers:
- so-nginx
- so-telegraf
- so-elasticsearch
fleet:
containers:
{% if FLEETNODE %}
- so-mysql
- so-fleet
- so-redis
- so-filebeat
- so-nginx
- so-telegraf
{% endif %}

View File

@@ -0,0 +1,64 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
UPDATE_DIR=/tmp/sohotfixapply
if [ -z "$1" ]; then
echo "No tarball given. Please provide the filename so I can run the hotfix"
echo "so-airgap-hotfixapply /path/to/sohotfix.tar"
exit 1
else
if [ ! -f "$1" ]; then
echo "Unable to find $1. Make sure your path is correct and retry."
exit 1
else
echo "Determining if we need to apply this hotfix"
rm -rf $UPDATE_DIR
mkdir -p $UPDATE_DIR
tar xvf $1 -C $UPDATE_DIR
# Compare some versions
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
CURRENTHOTFIX=$(cat /etc/sohotfix)
INSTALLEDVERSION=$(cat /etc/soversion)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "Checking to see if there are hotfixes needed"
if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
echo "You are already running the latest version of Security Onion."
rm -rf $UPDATE_DIR
exit 1
else
echo "We need to apply a hotfix"
copy_new_files
echo $HOTFIXVERSION > /etc/sohotfix
salt-call state.highstate -l info queue=True
echo "The Hotfix $HOTFIXVERSION has been applied"
# Clean up
rm -rf $UPDATE_DIR
exit 0
fi
else
echo "This hotfix is not compatible with your current version. Download the latest ISO and run soup"
rm -rf $UPDATE_DIR
fi
fi
fi

View File

@@ -0,0 +1,33 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Get the latest code
rm -rf /tmp/sohotfix
mkdir -p /tmp/sohotfix
cd /tmp/sohotfix
git clone https://github.com/Security-Onion-Solutions/securityonion
if [ ! -d "/tmp/sohotfix/securityonion" ]; then
echo "I was unable to get the latest code. Check your internet and try again."
exit 1
else
echo "Looks like we have the code lets create the tarball."
cd /tmp/sohotfix/securityonion
tar cvf /tmp/sohotfix/sohotfix.tar HOTFIX VERSION salt pillar
echo ""
echo "Copy /tmp/sohotfix/sohotfix.tar to portable media and then copy it to your airgap manager."
exit 0
fi

View File

@@ -15,6 +15,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DEFAULT_SALT_DIR=/opt/so/saltstack/default
# Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
@@ -122,6 +124,16 @@ check_elastic_license() {
fi
}
copy_new_files() {
# Copy new files over to the salt dir
cd $UPDATE_DIR
rsync -a salt $DEFAULT_SALT_DIR/
rsync -a pillar $DEFAULT_SALT_DIR/
chown -R socore:socore $DEFAULT_SALT_DIR/
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
cd /tmp
}
disable_fastestmirror() {
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
}
@@ -475,6 +487,7 @@ wait_for_web_response() {
expected=$2
maxAttempts=${3:-300}
logfile=/root/wait_for_web_response.log
truncate -s 0 "$logfile"
attempt=0
while [[ $attempt -lt $maxAttempts ]]; do
attempt=$((attempt+1))

View File

@@ -0,0 +1,51 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
wdurregex="^[0-9]+w$"
ddurregex="^[0-9]+d$"
echo -e "\nThis script is used to reduce the size of InfluxDB by removing old data and retaining only the duration specified."
echo "The duration will need to be specified as an integer followed by the duration unit without a space."
echo -e "\nFor example, to purge all data but retain the past 12 weeks, specify 12w for the duration."
echo "The duration units are as follows:"
echo " w - week(s)"
echo " d - day(s)"
while true; do
echo ""
read -p 'Enter the duration of past data that you would like to retain: ' duration
duration=$(echo $duration | tr '[:upper:]' '[:lower:]')
if [[ "$duration" =~ $wdurregex ]] || [[ "$duration" =~ $ddurregex ]]; then
break
fi
echo -e "\nInvalid duration."
done
echo -e "\nInfluxDB will now be cleaned and leave only the past $duration worth of data."
read -r -p "Are you sure you want to continue? [y/N] " yorn
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
echo -e "\nCleaning InfluxDb and saving only the past $duration. This may could take several minutes depending on how much data needs to be cleaned."
if docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"DELETE FROM /.*/ WHERE \"time\" >= '2020-01-01T00:00:00.0000000Z' AND \"time\" <= now() - $duration\""; then
echo -e "\nInfluxDb clean complete."
else
echo -e "\nSomething went wrong with cleaning InfluxDB. Please verify that the so-influxdb Docker container is running, and check the log at /opt/so/log/influxdb/influxdb.log for any details."
fi
else
echo -e "\nExiting as requested."
fi

View File

@@ -0,0 +1,47 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
echo -e "\nThis script is used to reduce the size of InfluxDB by downsampling old data into the so_long_term retention policy."
echo -e "\nInfluxDB will now be migrated. This could take a few hours depending on how large the database is and hardware resources available."
read -r -p "Are you sure you want to continue? [y/N] " yorn
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
echo -e "\nMigrating InfluxDb started at `date`. This may take several hours depending on how much data needs to be moved."
day=0
startdate=`date`
while docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"SELECT mean(*) INTO \"so_long_term\".:MEASUREMENT FROM \"autogen\"./.*/ WHERE \"time\" >= '2020-07-21T00:00:00.0000000Z' + ${day}d AND \"time\" <= '2020-07-21T00:00:00.0000000Z' + $((day+1))d GROUP BY time(5m),*\""; do
# why 2020-07-21?
migrationdate=`date -d "2020-07-21 + ${day} days" +"%y-%m-%d"`
echo "Migration of $migrationdate started at $startdate and completed at `date`."
newdaytomigrate=$(date -d "$migrationdate + 1 days" +"%s")
today=$(date +"%s")
if [ $newdaytomigrate -ge $today ]; then
break
else
((day=day+1))
startdate=`date`
echo -e "\nMigrating the next day's worth of data."
fi
done
echo -e "\nInfluxDb data migration complete."
else
echo -e "\nExiting as requested."
fi

View File

@@ -22,5 +22,5 @@ salt-call state.apply playbook.db_init,playbook,playbook.automation_user_create
/usr/sbin/so-soctopus-restart
echo "Importing Plays - this will take some time...."
wait 5
sleep 5
/usr/sbin/so-playbook-ruleupdate

View File

@@ -21,9 +21,9 @@ UPDATE_DIR=/tmp/sogh/securityonion
INSTALLEDVERSION=$(cat /etc/soversion)
POSTVERSION=$INSTALLEDVERSION
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
DEFAULT_SALT_DIR=/opt/so/saltstack/default
BATCHSIZE=5
SOUP_LOG=/root/soup.log
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
WHATWOULDYOUSAYYAHDOHERE=soup
add_common() {
@@ -214,16 +214,6 @@ clone_to_tmp() {
fi
}
copy_new_files() {
# Copy new files over to the salt dir
cd $UPDATE_DIR
rsync -a salt $DEFAULT_SALT_DIR/
rsync -a pillar $DEFAULT_SALT_DIR/
chown -R socore:socore $DEFAULT_SALT_DIR/
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
cd /tmp
}
generate_and_clean_tarballs() {
local new_version
new_version=$(cat $UPDATE_DIR/VERSION)
@@ -284,6 +274,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" =~ rc.1 ]] && post_rc1_to_rc2
[[ "$POSTVERSION" == 2.3.20 || "$POSTVERSION" == 2.3.21 ]] && post_2.3.2X_to_2.3.30
[[ "$POSTVERSION" == 2.3.30 ]] && post_2.3.30_to_2.3.40
[[ "$POSTVERSION" == 2.3.50 ]] && post_2.3.5X_to_2.3.60
}
post_rc1_to_2.3.21() {
@@ -304,6 +295,10 @@ post_2.3.30_to_2.3.40() {
POSTVERSION=2.3.40
}
post_2.3.5X_to_2.3.60() {
POSTVERSION=2.3.60
}
rc1_to_rc2() {
@@ -572,16 +567,28 @@ update_version() {
# Update the version to the latest
echo "Updating the Security Onion version file."
echo $NEWVERSION > /etc/soversion
echo $HOTFIXVERSION > /etc/sohotfix
sed -i "/ soversion:/c\ soversion: $NEWVERSION" /opt/so/saltstack/local/pillar/global.sls
}
upgrade_check() {
# Let's make sure we actually need to update.
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
CURRENTHOTFIX=$(cat /etc/sohotfix 2>/dev/null)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "Checking to see if there are hotfixes needed"
if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
else
echo "We need to apply a hotfix"
is_hotfix=true
fi
else
is_hotfix=false
fi
}
upgrade_check_salt() {
@@ -709,124 +716,146 @@ upgrade_space
echo "Checking for Salt Master and Minion updates."
upgrade_check_salt
echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
echo ""
echo "Updating dockers to $NEWVERSION."
if [ $is_airgap -eq 0 ]; then
if [ "$is_hotfix" == "true" ]; then
echo "Applying $HOTFIXVERSION"
copy_new_files
echo ""
update_version
salt-call state.highstate -l info queue=True
else
echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
echo ""
echo "Updating dockers to $NEWVERSION."
if [ $is_airgap -eq 0 ]; then
airgap_update_dockers
update_centos_repo
yum clean all
check_os_updates
else
else
update_registry
update_docker_containers "soup"
fi
fi
echo ""
echo "Stopping Salt Minion service."
systemctl stop salt-minion
echo "Killing any remaining Salt Minion processes."
pkill -9 -ef /usr/bin/salt-minion
echo ""
echo "Stopping Salt Master service."
systemctl stop salt-master
echo ""
echo ""
echo "Stopping Salt Minion service."
systemctl stop salt-minion
echo "Killing any remaining Salt Minion processes."
pkill -9 -ef /usr/bin/salt-minion
echo ""
echo "Stopping Salt Master service."
systemctl stop salt-master
echo ""
preupgrade_changes_2.3.50_repo
preupgrade_changes_2.3.50_repo
# Does salt need upgraded. If so update it.
if [ "$UPGRADESALT" == "1" ]; then
# Does salt need upgraded. If so update it.
if [ "$UPGRADESALT" == "1" ]; then
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
fi
fi
echo "Checking if Salt was upgraded."
echo ""
# Check that Salt was upgraded
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk {'print $2'})
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
echo "Checking if Salt was upgraded."
echo ""
# Check that Salt was upgraded
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk {'print $2'})
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 1
else
else
echo "Salt upgrade success."
echo ""
fi
fi
preupgrade_changes
echo ""
preupgrade_changes
echo ""
if [ $is_airgap -eq 0 ]; then
if [ $is_airgap -eq 0 ]; then
echo "Updating Rule Files to the Latest."
update_airgap_rules
fi
fi
# Only update the repo if its airgap
if [[ $is_airgap -eq 0 ]] && [[ "$UPGRADESALT" != "1" ]]; then
update_centos_repo
fi
# Only update the repo if its airgap
if [[ $is_airgap -eq 0 ]] && [[ "$UPGRADESALT" != "1" ]]; then
update_centos_repo
fi
echo ""
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
copy_new_files
echo ""
update_version
echo ""
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
copy_new_files
echo ""
update_version
echo ""
echo "Locking down Salt Master for upgrade"
masterlock
echo ""
echo "Locking down Salt Master for upgrade"
masterlock
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
# Only regenerate osquery packages if Fleet is enabled
FLEET_MANAGER=$(lookup_pillar fleet_manager)
FLEET_NODE=$(lookup_pillar fleet_node)
if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
# Testing that that salt-master is up by checking that is it connected to itself
retry 50 10 "salt-call state.show_top -l error" || exit 1
echo ""
echo "Ensuring python modules for Salt are installed and patched."
salt-call state.apply salt.python3-influxdb -l info queue=True
echo ""
# Only regenerate osquery packages if Fleet is enabled
FLEET_MANAGER=$(lookup_pillar fleet_manager)
FLEET_NODE=$(lookup_pillar fleet_node)
if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
echo ""
echo "Regenerating Osquery Packages.... This will take several minutes."
salt-call state.apply fleet.event_gen-packages -l info queue=True
echo ""
fi
fi
echo ""
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True
echo ""
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
echo ""
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True
echo ""
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
echo ""
echo "Stopping Salt Master to remove ACL"
systemctl stop salt-master
echo ""
echo "Stopping Salt Master to remove ACL"
systemctl stop salt-master
masterunlock
masterunlock
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
echo "Running a highstate. This could take several minutes."
salt-call state.highstate -l info queue=True
postupgrade_changes
unmount_update
thehive_maint
echo ""
echo "Starting Salt Master service."
systemctl start salt-master
if [ "$UPGRADESALT" == "1" ]; then
# Testing that that salt-master is up by checking that is it connected to itself
retry 50 10 "salt-call state.show_top -l error" || exit 1
echo "Running a highstate. This could take several minutes."
salt-call state.highstate -l info queue=True
postupgrade_changes
unmount_update
thehive_maint
if [ "$UPGRADESALT" == "1" ]; then
if [ $is_airgap -eq 0 ]; then
echo ""
echo "Cleaning repos on remote Security Onion nodes."
salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
echo ""
fi
fi
fi
check_sudoers
check_sudoers
if [[ -n $lsl_msg ]]; then
if [[ -n $lsl_msg ]]; then
case $lsl_msg in
'distributed')
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
@@ -840,14 +869,16 @@ if [[ -n $lsl_msg ]]; then
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
;;
esac
fi
fi
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
if [ $NUM_MINIONS -gt 1 ]; then
if [ $NUM_MINIONS -gt 1 ]; then
cat << EOF
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
@@ -855,9 +886,12 @@ Each minion is on a random 15 minute check-in period and things like network ban
If it looks like youre missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
For more information, please see https://docs.securityonion.net/en/2.3/soup.html#distributed-deployments.
EOF
fi
fi
echo "### soup has been served at `date` ###"
}

View File

@@ -352,7 +352,7 @@
],
"measurement": "zeekcaptureloss",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -2176,7 +2176,7 @@
],
"measurement": "docker_container_mem",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [

View File

@@ -1647,7 +1647,7 @@
],
"measurement": "influxsize",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [

View File

@@ -1631,7 +1631,7 @@
],
"measurement": "influxsize",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [

View File

@@ -351,7 +351,7 @@
],
"measurement": "zeekcaptureloss",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -2866,7 +2866,7 @@
],
"measurement": "healthcheck",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [

View File

@@ -4486,7 +4486,7 @@
],
"measurement": "zeekcaptureloss",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [
@@ -5107,7 +5107,7 @@
],
"measurement": "influxsize",
"orderByTime": "ASC",
"policy": "autogen",
"policy": "default",
"refId": "A",
"resultFormat": "time_series",
"select": [

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,7 @@
{% set GRAFANA_SETTINGS = salt['grains.filter_by'](default_settings, default='grafana', merge=salt['pillar.get']('grafana', {})) %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] or (grains.role == 'so-eval' and GRAFANA == 1) %}
# Grafana all the things
grafanadir:

View File

@@ -0,0 +1,13 @@
influxdb:
retention_policies:
so_short_term:
default: True
duration: 30d
shard_duration: 1d
so_long_term:
default: False
duration: 0d
shard_duration: 7d
downsample:
so_long_term:
resolution: 5m

View File

@@ -2,12 +2,21 @@
{% if sls in allowed_states %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] or (grains.role == 'so-eval' and GRAFANA == 1) %}
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% import_yaml 'influxdb/defaults.yaml' as default_settings %}
{% set influxdb = salt['grains.filter_by'](default_settings, default='influxdb', merge=salt['pillar.get']('influxdb', {})) %}
{% from 'salt/map.jinja' import PYTHON3INFLUX with context %}
{% from 'salt/map.jinja' import PYTHONINFLUXVERSION with context %}
{% set PYTHONINFLUXVERSIONINSTALLED = salt['cmd.run']("python3 -c \"exec('try:import influxdb; print (influxdb.__version__)\\nexcept:print(\\'Module Not Found\\')')\"", python_shell=True) %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
include:
- salt.minion
- salt.python3-influxdb
# Influx DB
influxconfdir:
file.directory:
@@ -57,6 +66,70 @@ append_so-influxdb_so-status.conf:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-influxdb
# We have to make sure the influxdb module is the right version prior to state run since reload_modules is bugged
{% if PYTHONINFLUXVERSIONINSTALLED == PYTHONINFLUXVERSION %}
wait_for_influxdb:
http.query:
- name: 'https://{{MANAGER}}:8086/query?q=SHOW+DATABASES'
- ssl: True
- verify_ssl: False
- status: 200
- timeout: 30
- retry:
attempts: 5
interval: 60
telegraf_database:
influxdb_database.present:
- name: telegraf
- database: telegraf
- ssl: True
- verify_ssl: /etc/pki/ca.crt
- cert: ['/etc/pki/influxdb.crt', '/etc/pki/influxdb.key']
- influxdb_host: {{ MANAGER }}
- require:
- docker_container: so-influxdb
- sls: salt.python3-influxdb
- http: wait_for_influxdb
{% for rp in influxdb.retention_policies.keys() %}
{{rp}}_retention_policy:
influxdb_retention_policy.present:
- name: {{rp}}
- database: telegraf
- duration: {{influxdb.retention_policies[rp].duration}}
- shard_duration: {{influxdb.retention_policies[rp].shard_duration}}
- replication: 1
- default: {{influxdb.retention_policies[rp].get('default', 'False')}}
- ssl: True
- verify_ssl: /etc/pki/ca.crt
- cert: ['/etc/pki/influxdb.crt', '/etc/pki/influxdb.key']
- influxdb_host: {{ MANAGER }}
- require:
- docker_container: so-influxdb
- influxdb_database: telegraf_database
- file: influxdb_retention_policy.present_patch
- sls: salt.python3-influxdb
{% endfor %}
{% for dest_rp in influxdb.downsample.keys() %}
so_downsample_cq:
influxdb_continuous_query.present:
- name: so_downsample_cq
- database: telegraf
- query: SELECT mean(*) INTO "{{dest_rp}}".:MEASUREMENT FROM /.*/ GROUP BY time({{influxdb.downsample[dest_rp].resolution}}),*
- ssl: True
- verify_ssl: /etc/pki/ca.crt
- cert: ['/etc/pki/influxdb.crt', '/etc/pki/influxdb.key']
- influxdb_host: {{ MANAGER }}
- require:
- docker_container: so-influxdb
- influxdb_database: telegraf_database
- file: influxdb_continuous_query.present_patch
- sls: salt.python3-influxdb
{% endfor %}
{% endif %}
{% endif %}
{% else %}

View File

@@ -0,0 +1,4 @@
60c60
< database, name, query, resample_time, coverage_period
---
> database, name, query, resample_time, coverage_period, **client_args

View File

@@ -0,0 +1,16 @@
38c38
< hours = int(duration.split("h"))
---
> hours = int(duration.split("h")[0])
52c52
< def present(name, database, duration="7d", replication=1, default=False, **client_args):
---
> def present(name, database, duration="7d", replication=1, default=False, shard_duration="1d", **client_args):
77c77
< database, name, duration, replication, default, **client_args
---
> database, name, duration, replication, shard_duration, default, **client_args
119c119
< database, name, duration, replication, default, **client_args
---
> database, name, duration, replication, shard_duration, default, **client_args

View File

@@ -0,0 +1,16 @@
427c427
< database, name, duration, replication, default=False, **client_args
---
> database, name, duration, replication, shard_duration, default=False, **client_args
462c462
< client.create_retention_policy(name, duration, replication, database, default)
---
> client.create_retention_policy(name, duration, replication, database, default, shard_duration)
468c468
< database, name, duration, replication, default=False, **client_args
---
> database, name, duration, replication, shard_duration, default=False, **client_args
504c504
< client.alter_retention_policy(name, database, duration, replication, default)
---
> client.alter_retention_policy(name, database, duration, replication, default, shard_duration)

View File

@@ -0,0 +1,3 @@
patch_package:
pkg.installed:
- name: patch

View File

@@ -5,10 +5,22 @@
{% set SPLITCHAR = '+' %}
{% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/modules' %}
{% set PYTHONINFLUXVERSION = '5.3.1' %}
{% set PYTHON3INFLUX= 'influxdb == ' ~ PYTHONINFLUXVERSION %}
{% set PYTHON3INFLUXDEPS= ['certifi', 'chardet', 'python-dateutil', 'pytz', 'requests'] %}
{% set PYTHONINSTALLER = 'pip' %}
{% else %}
{% set SPLITCHAR = '-' %}
{% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/modules' %}
{% set PYTHONINFLUXVERSION = '5.3.1' %}
{% set PYTHON3INFLUX= 'securityonion-python3-influxdb' %}
{% set PYTHON3INFLUXDEPS= ['python36-certifi', 'python36-chardet', 'python36-dateutil', 'python36-pytz', 'python36-requests'] %}
{% set PYTHONINSTALLER = 'pkg' %}
{% endif %}
{% set INSTALLEDSALTVERSION = salt['pkg.version']('salt-minion').split(SPLITCHAR)[0] %}

View File

@@ -8,6 +8,7 @@
include:
- salt
- salt.helper-packages
- systemd.reload
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
@@ -43,12 +44,24 @@ hold_salt_packages:
{% endfor %}
{% endif %}
remove_info_log_level_logfile:
file.line:
- name: /etc/salt/minion
- match: "log_level_logfile: info"
- mode: delete
remove_info_log_level:
file.line:
- name: /etc/salt/minion
- match: "log_level: info"
- mode: delete
set_log_levels:
file.append:
- name: /etc/salt/minion
- text:
- "log_level: info"
- "log_level_logfile: info"
- "log_level: error"
- "log_level_logfile: error"
- listen_in:
- service: salt_minion_service
@@ -71,3 +84,7 @@ salt_minion_service:
- name: salt-minion
- enable: True
- onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}"
patch_pkg:
pkg.installed:
- name: patch

View File

@@ -0,0 +1,70 @@
{% from "salt/map.jinja" import SALT_STATE_CODE_PATH with context %}
{% from "salt/map.jinja" import SALT_MODULE_CODE_PATH with context %}
{% from "salt/map.jinja" import PYTHON3INFLUX with context %}
{% from "salt/map.jinja" import PYTHON3INFLUXDEPS with context %}
{% from "salt/map.jinja" import PYTHONINSTALLER with context %}
include:
- salt.helper-packages
python3_influxdb_dependencies:
{{PYTHONINSTALLER}}.installed:
- pkgs: {{ PYTHON3INFLUXDEPS }}
python3_influxdb:
{{PYTHONINSTALLER}}.installed:
- name: {{ PYTHON3INFLUX }}
# We circumvent the file.patch state putting ERROR in the log by using the unless and file.touch below
# https://github.com/saltstack/salt/pull/47010 and https://github.com/saltstack/salt/issues/52329
#https://github.com/saltstack/salt/issues/59766
influxdb_continuous_query.present_patch:
file.patch:
- name: {{ SALT_STATE_CODE_PATH }}/influxdb_continuous_query.py
- source: salt://salt/files/influxdb_continuous_query.py.patch
- require:
- {{PYTHONINSTALLER}}: python3_influxdb
- pkg: patch_package
- unless: ls /opt/so/state/influxdb_continuous_query.py.patched
influxdb_continuous_query.py.patched:
file.touch:
- name: /opt/so/state/influxdb_continuous_query.py.patched
- onchanges:
- file: influxdb_continuous_query.present_patch
#https://github.com/saltstack/salt/issues/59761
influxdb_retention_policy.present_patch:
file.patch:
- name: {{ SALT_STATE_CODE_PATH }}/influxdb_retention_policy.py
- source: salt://salt/files/influxdb_retention_policy.py.patch
- require:
- {{PYTHONINSTALLER}}: python3_influxdb
- pkg: patch_package
- unless: ls /opt/so/state/influxdb_retention_policy.py.patched
influxdb_retention_policy.py.patched:
file.touch:
- name: /opt/so/state/influxdb_retention_policy.py.patched
- onchanges:
- file: influxdb_retention_policy.present_patch
# We should be able to set reload_modules: True in this state in order to tell salt to reload its python modules due to us possibly installing
# and possibly modifying modules in this state. This is bugged according to https://github.com/saltstack/salt/issues/24925
influxdbmod.py_shard_duration_patch:
file.patch:
- name: {{ SALT_MODULE_CODE_PATH }}/influxdbmod.py
- source: salt://salt/files/influxdbmod.py.patch
- require:
- {{PYTHONINSTALLER}}: python3_influxdb
- pkg: patch_package
- unless: ls /opt/so/state/influxdbmod.py.patched
influxdbmod.py.patched:
file.touch:
- name: /opt/so/state/influxdbmod.py.patched
- onchanges:
- file: influxdbmod.py_shard_duration_patch

View File

@@ -18,6 +18,10 @@
"/joblookup?esid={:soc_id}",
"/joblookup?ncid={:network.community_id}"
]},
{ "name": "actionCyberChef", "description": "actionCyberChefHelp", "icon": "fas fa-bread-slice", "target": "_blank",
"links": [
"/cyberchef/#input={value|base64}"
]},
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "target": "_blank",
"links": [
"https://www.google.com/search?q={value}"

View File

@@ -18,6 +18,10 @@
"/joblookup?esid={:soc_id}",
"/joblookup?ncid={:network.community_id}"
]},
{ "name": "actionCyberChef", "description": "actionCyberChefHelp", "icon": "fas fa-bread-slice", "target": "_blank",
"links": [
"/cyberchef/#input={value|base64}"
]},
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "target": "_blank",
"links": [
"https://www.google.com/search?q={value}"

View File

@@ -54,7 +54,7 @@
"verifyCert": false
},
"influxdb": {
{%- if grains['role'] in ['so-import'] %}
{%- if grains['role'] in ['so-import'] or (grains['role'] == 'so-eval' and GRAFANA == 0) %}
"hostUrl": "",
{%- else %}
"hostUrl": "https://{{ MANAGERIP }}:8086",

View File

@@ -79,6 +79,7 @@ removeesp12dir:
- signing_policy: influxdb
- public_key: /etc/pki/influxdb.key
- CN: {{ manager }}
- subjectAltName: DNS:{{ HOSTNAME }}
- days_remaining: 0
- days_valid: 820
- backup: True

View File

@@ -78,6 +78,7 @@ zeekspoolownership:
file.directory:
- name: /nsm/zeek/spool
- user: 937
- max_depth: 0
- recurse:
- user

View File

@@ -34,7 +34,7 @@ ZEEKVERSION=ZEEK
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=distributed-sensor
HOSTNAME=Distributed-Sensor
install_type=SENSOR
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=

View File

@@ -1348,15 +1348,16 @@ filter_unused_nics() {
nic_list=()
for nic in "${filtered_nics[@]}"; do
local nic_mac=$(cat "/sys/class/net/${nic}/address" 2>/dev/null)
case $(cat "/sys/class/net/${nic}/carrier" 2>/dev/null) in
1)
nic_list+=("$nic" "Link UP " "OFF")
nic_list+=("$nic" "$nic_mac Link UP " "OFF")
;;
0)
nic_list+=("$nic" "Link DOWN " "OFF")
nic_list+=("$nic" "$nic_mac Link DOWN " "OFF")
;;
*)
nic_list+=("$nic" "Link UNKNOWN " "OFF")
nic_list+=("$nic" "$nic_mac Link UNKNOWN " "OFF")
;;
esac
done
@@ -2197,9 +2198,9 @@ saltify() {
retry 50 10 "apt-get -y install salt-minion=3003+ds-1 salt-common=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
if [[ $OSVER != 'xenial' ]]; then
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb" >> "$setup_log" 2>&1 || exit 1
else
retry 50 10 "apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb python-packaging" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb python-packaging python-influxdb" >> "$setup_log" 2>&1 || exit 1
fi
fi
}

View File

@@ -837,6 +837,13 @@ echo "1" > /root/accept_changes
set_progress_str 73 "Update playbook rules"
so-playbook-ruleupdate >> /root/setup_playbook_rule_update.log 2>&1 &
fi
if [[ "$GRAFANA" = 1 ]]; then
set_progress_str 74 "Installing InfluxDB and Grafana"
salt-call state.apply -l info influxdb >> $setup_log 2>&1
salt-call state.apply -l info grafana >> $setup_log 2>&1
fi
fi
if [[ "$OSQUERY" = 1 ]]; then
@@ -918,10 +925,11 @@ success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
if [[ $success != 0 ]]; then SO_ERROR=1; fi
# Check entire setup log for errors or unexpected salt states and ensure cron jobs are not reporting errors to root's mailbox
if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then
# Ignore "Status .* was not found" due to output from salt http.query or http.wait_for_successful_query states used with retry
if grep -E "ERROR|Result: False" $setup_log | grep -qvE "Status .* was not found" || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then
SO_ERROR=1
grep --color=never "ERROR" "$setup_log" > "$error_log"
grep --color=never "ERROR" "$setup_log" | grep -qvE "Status .* was not found" > "$error_log"
fi
if [[ -n $SO_ERROR ]]; then

View File

@@ -408,6 +408,7 @@ whiptail_enable_components() {
PLAYBOOK=0
STRELKA=0
if [[ $is_eval ]]; then
COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
"Select Components to install:" 20 75 8 \
GRAFANA "Enable Grafana for system monitoring" ON \
@@ -416,6 +417,17 @@ whiptail_enable_components() {
THEHIVE "Enable TheHive" ON \
PLAYBOOK "Enable Playbook" ON \
STRELKA "Enable Strelka" ON 3>&1 1>&2 2>&3)
else
COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
"Select Components to install:" 20 75 7 \
OSQUERY "Enable Fleet with osquery" ON \
WAZUH "Enable Wazuh" ON \
THEHIVE "Enable TheHive" ON \
PLAYBOOK "Enable Playbook" ON \
STRELKA "Enable Strelka" ON 3>&1 1>&2 2>&3)
export "GRAFANA=1"
fi
local exitstatus=$?
whiptail_check_exitstatus $exitstatus

Binary file not shown.