merge with 2.3.40

This commit is contained in:
m0duspwnens
2021-03-23 14:34:52 -04:00
53 changed files with 1920 additions and 1234 deletions

View File

@@ -90,7 +90,6 @@ commonpkgs:
- ntpdate
- jq
- python3-docker
- docker-ce
- curl
- ca-certificates
- software-properties-common
@@ -104,14 +103,17 @@ commonpkgs:
- python3-dateutil
- python3-m2crypto
- python3-mysqldb
- python3-packaging
- git
- patch
heldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.2.13-2
- docker-ce: 5:19.03.14~3-0~ubuntu-bionic
- containerd.io: 1.4.4-1
- docker-ce: 5:20.10.5~3-0~ubuntu-bionic
- docker-ce-cli: 5:20.10.5~3-0~ubuntu-bionic
- docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-bionic
- hold: True
- update_holds: True
@@ -137,6 +139,7 @@ commonpkgs:
- python36-dateutil
- python36-m2crypto
- python36-mysql
- python36-packaging
- yum-utils
- device-mapper-persistent-data
- lvm2
@@ -147,8 +150,10 @@ commonpkgs:
heldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.2.13-3.2.el7
- docker-ce: 3:19.03.14-3.el7
- containerd.io: 1.4.4-3.1.el7
- docker-ce: 3:20.10.5-3.el7
- docker-ce-cli: 1:20.10.5-3.el7
- docker-ce-rootless-extras: 20.10.5-3.el7
- hold: True
- update_holds: True
{% endif %}

View File

@@ -86,6 +86,19 @@ add_interface_bond0() {
fi
}
check_airgap() {
# See if this is an airgap install
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap: | awk '{print $2}')
if [[ "$AIRGAP" == "True" ]]; then
is_airgap=0
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
AGDOCKER=/tmp/soagupdate/docker
AGREPO=/tmp/soagupdate/Packages
else
is_airgap=1
fi
}
check_container() {
docker ps | grep "$1:" > /dev/null 2>&1
return $?
@@ -97,6 +110,46 @@ check_password() {
return $?
}
check_elastic_license() {
[ -n "$TESTING" ] && return
# See if the user has already accepted the license
if [ ! -f /opt/so/state/yeselastic.txt ]; then
elastic_license
else
echo "Elastic License has already been accepted"
fi
}
elastic_license() {
read -r -d '' message <<- EOM
\n
Starting in Elastic Stack version 7.11, the Elastic Stack binaries are only available under the Elastic License:
https://securityonion.net/elastic-license
Please review the Elastic License:
https://www.elastic.co/licensing/elastic-license
Do you agree to the terms of the Elastic License?
If so, type AGREE to accept the Elastic License and continue. Otherwise, press Enter to exit this program without making any changes.
EOM
AGREED=$(whiptail --title "Security Onion Setup" --inputbox \
"$message" 20 75 3>&1 1>&2 2>&3)
if [ "${AGREED^^}" = 'AGREE' ]; then
mkdir -p /opt/so/state
touch /opt/so/state/yeselastic.txt
else
echo "Starting in 2.3.40 you must accept the Elastic license if you want to run Security Onion."
exit 1
fi
}
fail() {
msg=$1
echo "ERROR: $msg"
@@ -250,6 +303,12 @@ set_minionid() {
MINIONID=$(lookup_grain id)
}
set_palette() {
if [ "$OS" == ubuntu ]; then
update-alternatives --set newt-palette /etc/newt/palette.original
fi
}
set_version() {
CURRENTVERSION=0.0.0
if [ -f /etc/soversion ]; then
@@ -340,6 +399,26 @@ valid_int() {
# {% raw %}
valid_proxy() {
local proxy=$1
local url_prefixes=( 'http://' 'https://' )
local has_prefix=false
for prefix in "${url_prefixes[@]}"; do
echo "$proxy" | grep -q "$prefix" && has_prefix=true && proxy=${proxy#"$prefix"} && break
done
local url_arr
mapfile -t url_arr <<< "$(echo "$proxy" | tr ":" "\n")"
local valid_url=true
if ! valid_ip4 "${url_arr[0]}" && ! valid_fqdn "${url_arr[0]}" && ! valid_hostname "${url_arr[0]}"; then
valid_url=false
fi
[[ $has_prefix == true ]] && [[ $valid_url == true ]] && return 0 || return 1
}
valid_string() {
local str=$1
local min_length=${2:-1}

View File

@@ -30,7 +30,7 @@ fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexkey)
CORTEX_KEY=$(lookup_pillar cortexorguserkey)
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
CORTEX_ORG_NAME=$(lookup_pillar cortexorgname)
CORTEX_USER=$USER

View File

@@ -30,7 +30,7 @@ fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexkey)
CORTEX_KEY=$(lookup_pillar cortexorguserkey)
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
CORTEX_USER=$USER

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env python3
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, argparse, re, docker
from packaging.version import Version, InvalidVersion
from itertools import groupby, chain
def get_image_name(string) -> str:
return ':'.join(string.split(':')[:-1])
def get_so_image_basename(string) -> str:
return get_image_name(string).split('/so-')[-1]
def get_image_version(string) -> str:
ver = string.split(':')[-1]
if ver == 'latest':
# Version doesn't like "latest", so use a high semver
return '999999.9.9'
else:
try:
Version(ver)
except InvalidVersion:
# Strip the last substring following a hyphen for automated branches
ver = '-'.join(ver.split('-')[:-1])
return ver
def main(quiet):
client = docker.from_env()
image_list = client.images.list(filters={ 'dangling': False })
# Map list of image objects to flattened list of tags (format: "name:version")
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
# Filter to only SO images (base name begins with "so-")
tag_list = list(filter(lambda x: re.match(r'^.*\/so-[^\/]*$', get_image_name(x)), tag_list))
# Group tags into lists by base name (sort by same projection first)
tag_list.sort(key=lambda x: get_so_image_basename(x))
grouped_tag_lists = [ list(it) for _, it in groupby(tag_list, lambda x: get_so_image_basename(x)) ]
no_prunable = True
for t_list in grouped_tag_lists:
try:
# Keep the 2 most current images
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
if len(t_list) <= 2:
continue
else:
no_prunable = False
for tag in t_list[2:]:
if not quiet: print(f'Removing image {tag}')
client.images.remove(tag)
except InvalidVersion as e:
print(f'so-{get_so_image_basename(t_list[0])}: {e.args[0]}', file=sys.stderr)
exit(1)
if no_prunable and not quiet:
print('No Security Onion images to prune')
if __name__ == "__main__":
main_parser = argparse.ArgumentParser(add_help=False)
main_parser.add_argument('-q', '--quiet', action='store_const', const=True, required=False)
args = main_parser.parse_args(sys.argv[1:])
main(args.quiet)

View File

@@ -0,0 +1,13 @@
. /usr/sbin/so-common
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic"
## This hackery will be removed if using Elastic Auth ##
# Let's snag a cookie from Kibana
THECOOKIE=$(curl -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
# Disable certain Features from showing up in the Kibana UI
echo
echo "Setting up default Space:"
curl -b "sid=$THECOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet"]} ' >> /opt/so/log/kibana/misc.log
echo

0
salt/common/tools/sbin/so-monitor-add Normal file → Executable file
View File

0
salt/common/tools/sbin/so-playbook-sigma-refresh Normal file → Executable file
View File

0
salt/common/tools/sbin/so-raid-status Normal file → Executable file
View File

27
salt/common/tools/sbin/so-rule Normal file → Executable file
View File

@@ -37,11 +37,9 @@ def print_err(string: str):
def check_apply(args: dict, prompt: bool = True):
cmd_arr = ['salt-call', 'state.apply', 'idstools', 'queue=True']
if args.apply:
print('Configuration updated. Applying idstools state...')
return subprocess.run(cmd_arr)
print('Configuration updated. Applying changes:')
return apply()
else:
if prompt:
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
@@ -51,12 +49,24 @@ def check_apply(args: dict, prompt: bool = True):
if answer.lower() in [ 'n', '' ]:
return 0
else:
print('Applying idstools state...')
return subprocess.run(cmd_arr)
print('Applying changes:')
return apply()
else:
return 0
def apply():
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'idstools.sync_files', 'queue=True']
update_cmd = ['so-rule-update']
print('Syncing config files...')
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
if cmd.returncode == 0:
print('Updating rules...')
return subprocess.run(update_cmd).returncode
else:
return cmd.returncode
def find_minion_pillar() -> str:
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
@@ -442,10 +452,7 @@ def main():
modify.print_help()
sys.exit(0)
if isinstance(exit_code, subprocess.CompletedProcess):
sys.exit(exit_code.returncode)
else:
sys.exit(exit_code)
sys.exit(exit_code)
if __name__ == '__main__':

0
salt/common/tools/sbin/so-suricata-testrule Normal file → Executable file
View File

View File

@@ -19,13 +19,12 @@
UPDATE_DIR=/tmp/sogh/securityonion
INSTALLEDVERSION=$(cat /etc/soversion)
POSTVERSION=$INSTALLEDVERSION
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
DEFAULT_SALT_DIR=/opt/so/saltstack/default
BATCHSIZE=5
SOUP_LOG=/root/soup.log
exec 3>&1 1>${SOUP_LOG} 2>&1
add_common() {
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
@@ -101,19 +100,6 @@ update_registry() {
salt-call state.apply registry queue=True
}
check_airgap() {
# See if this is an airgap install
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap: | awk '{print $2}')
if [[ "$AIRGAP" == "True" ]]; then
is_airgap=0
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
AGDOCKER=/tmp/soagupdate/docker
AGREPO=/tmp/soagupdate/Packages
else
is_airgap=1
fi
}
check_sudoers() {
if grep -q "so-setup" /etc/sudoers; then
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
@@ -243,22 +229,10 @@ masterunlock() {
fi
}
playbook() {
echo "Applying playbook settings"
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
salt-call state.apply playbook.OLD_db_init
rm -f /opt/so/rules/elastalert/playbook/*.yaml
so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 &
fi
if [[ "$INSTALLEDVERSION" != 2.3.30 ]]; then
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
fi
}
pillar_changes() {
preupgrade_changes() {
# This function is to add any new pillar items if needed.
echo "Checking to see if pillar changes are needed."
echo "Checking to see if changes are needed."
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
@@ -266,6 +240,34 @@ pillar_changes() {
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
}
postupgrade_changes() {
# This function is to add any new pillar items if needed.
echo "Running post upgrade processes."
[[ "$POSTVERSION" =~ rc.1 ]] && post_rc1_to_rc2
[[ "$POSTVERSION" == 2.3.20 || "$POSTVERSION" == 2.3.21 ]] && post_2.3.2X_to_2.3.30
[[ "$POSTVERSION" == 2.3.30 ]] && post_2.3.30_to_2.3.40
}
post_rc1_to_2.3.21() {
salt-call state.apply playbook.OLD_db_init
rm -f /opt/so/rules/elastalert/playbook/*.yaml
so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 &
POSTVERSION=2.3.21
}
post_2.3.2X_to_2.3.30() {
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
POSTVERSION=2.3.30
}
post_2.3.30_to_2.3.40() {
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
so-kibana-space-defaults
POSTVERSION=2.3.40
}
rc1_to_rc2() {
# Move the static file to global.sls
@@ -296,15 +298,14 @@ rc1_to_rc2() {
done </tmp/nodes.txt
# Add the nodes back using hostname
while read p; do
local NAME=$(echo $p | awk '{print $1}')
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
local IP=$(echo $p | awk '{print $2}')
echo "Adding the new cross cluster config for $NAME"
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
local NAME=$(echo $p | awk '{print $1}')
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
local IP=$(echo $p | awk '{print $2}')
echo "Adding the new cross cluster config for $NAME"
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
done </tmp/nodes.txt
INSTALLEDVERSION=rc.2
}
rc2_to_rc3() {
@@ -334,10 +335,10 @@ rc3_to_2.3.0() {
fi
{
echo "redis_settings:"
echo " redis_maxmemory: 827"
echo "playbook:"
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
echo "redis_settings:"
echo " redis_maxmemory: 827"
echo "playbook:"
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
} >> /opt/so/saltstack/local/pillar/global.sls
sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls
@@ -385,7 +386,6 @@ up_2.3.0_to_2.3.20(){
fi
INSTALLEDVERSION=2.3.20
}
up_2.3.2X_to_2.3.30() {
@@ -395,11 +395,11 @@ up_2.3.2X_to_2.3.30() {
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
done
# Change the IMAGEREPO
# Change the IMAGEREPO
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
# Strelka rule repo pillar addition
# Strelka rule repo pillar addition
if [ $is_airgap -eq 0 ]; then
# Add manager as default Strelka YARA rule repo
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
@@ -410,16 +410,26 @@ up_2.3.2X_to_2.3.30() {
check_log_size_limit
}
space_check() {
# Check to see if there is enough space
verify_upgradespace() {
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
if [ "$CURRENTSPACE" -lt "10" ]; then
echo "You are low on disk space. Upgrade will try and clean up space.";
clean_dockers
echo "You are low on disk space."
return 1
else
echo "Plenty of space for upgrading"
return 0
fi
}
upgrade_space() {
if ! verify_upgradespace; then
clean_dockers
if ! verify_upgradespace; then
echo "There is not enough space to perform the upgrade. Please free up space and try again"
exit 1
fi
else
echo "You have enough space for upgrade. Proceeding with soup."
fi
}
thehive_maint() {
@@ -427,16 +437,16 @@ thehive_maint() {
COUNT=0
THEHIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
echo "Migrating thehive databases if needed."
@@ -471,83 +481,84 @@ update_version() {
}
upgrade_check() {
# Let's make sure we actually need to update.
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
fi
# Let's make sure we actually need to update.
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
fi
}
upgrade_check_salt() {
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
UPGRADESALT=1
fi
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
UPGRADESALT=1
fi
}
upgrade_salt() {
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [ "$OS" == "centos" ]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages and restarting services."
echo ""
if [ $is_airgap -eq 0 ]; then
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
else
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
fi
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [ "$OS" == "ubuntu" ]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages and restarting services."
echo ""
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [ "$OS" == "centos" ]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages and restarting services."
echo ""
if [ $is_airgap -eq 0 ]; then
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
else
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
fi
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [ "$OS" == "ubuntu" ]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages and restarting services."
echo ""
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
}
verify_latest_update_script() {
# Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
# Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
echo "This version of the soup script is up to date. Proceeding."
else
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common queue=True
echo ""
echo "soup has been updated. Please run soup again."
exit 0
fi
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
echo "This version of the soup script is up to date. Proceeding."
else
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common queue=True
echo ""
echo "soup has been updated. Please run soup again."
exit 0
fi
}
main () {
echo "### Preparing soup at `date` ###"
while getopts ":b" opt; do
case "$opt" in
b ) # process option b
@@ -557,9 +568,10 @@ while getopts ":b" opt; do
echo "Batch size must be a number greater than 0."
exit 1
fi
;;
\? ) echo "Usage: cmd [-b]"
;;
;;
\? )
echo "Usage: cmd [-b]"
;;
esac
done
@@ -573,6 +585,8 @@ check_airgap
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
echo ""
set_os
set_palette
check_elastic_license
echo ""
if [ $is_airgap -eq 0 ]; then
# Let's mount the ISO since this is airgap
@@ -599,7 +613,7 @@ fi
echo "Let's see if we need to update Security Onion."
upgrade_check
space_check
upgrade_space
echo "Checking for Salt Master and Minion updates."
upgrade_check_salt
@@ -649,8 +663,7 @@ else
echo ""
fi
echo "Making pillar changes."
pillar_changes
preupgrade_changes
echo ""
if [ $is_airgap -eq 0 ]; then
@@ -704,7 +717,7 @@ echo "Starting Salt Master service."
systemctl start salt-master
echo "Running a highstate. This could take several minutes."
salt-call state.highstate -l info queue=True
playbook
postupgrade_changes
unmount_update
thehive_maint
@@ -736,6 +749,39 @@ if [[ -n $lsl_msg ]]; then
esac
fi
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
if [ $NUM_MINIONS -gt 1 ]; then
cat << EOF
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
If it looks like youre missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
For more information, please see https://docs.securityonion.net/en/2.3/soup.html#distributed-deployments.
EOF
fi
echo "### soup has been served at `date` ###"
}
main "$@" | tee /dev/fd/3
cat << EOF
SOUP - Security Onion UPdater
Please review the following for more information about the update process and recent updates:
https://docs.securityonion.net/soup
https://blog.securityonion.net
Please note that soup only updates Security Onion components and does NOT update the underlying operating system (OS). When you installed Security Onion, there was an option to automatically update the OS packages. If you did not enable this option, then you will want to ensure that the OS is fully updated before running soup.
Press Enter to continue or Ctrl-C to cancel.
EOF
read input
main "$@" | tee -a $SOUP_LOG

View File

@@ -4,12 +4,11 @@
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#

View File

@@ -1,86 +1,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0','2.3.1','2.3.2','2.3.10','2.3.20']%}
{% for VERSION in OLDVERSIONS %}
remove_images_{{ VERSION }}:
docker_image.absent:
- force: True
- images:
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-acng:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-cortex:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-curator:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elastalert:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-filebeat:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-fleet:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-fleet-launcher:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-freqserver:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-grafana:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-influxdb:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kibana:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kratos:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-minio:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-mysql:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-nginx:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-playbook:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-redis:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soctopus:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-steno:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-frontend:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-manager:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-backend:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-filestream:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-telegraf:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-es:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-wazuh:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-zeek:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-acng:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-thehive-cortex:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-curator:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-domainstats:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-elastalert:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-elasticsearch:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-filebeat:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-fleet:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-fleet-launcher:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-freqserver:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-grafana:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-idstools:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-influxdb:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-kibana:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-kratos:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-logstash:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-minio:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-mysql:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-nginx:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-pcaptools:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-playbook:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-redis:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-soc:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-soctopus:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-steno:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-strelka-frontend:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-strelka-manager:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-strelka-backend:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-strelka-filestream:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-suricata:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-telegraf:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-thehive:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-thehive-es:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-wazuh:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-zeek:{{ VERSION }}'
{% endfor %}
prune_images:
cmd.run:
- name: so-docker-prune
{% else %}

View File

@@ -4,6 +4,9 @@ from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class PlaybookESAlerter(Alerter):
"""
Use matched data to create alerts in elasticsearch
@@ -17,7 +20,7 @@ class PlaybookESAlerter(Alerter):
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
headers = {"Content-Type": "application/json"}
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
url = f"https://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False)
def get_info(self):

View File

@@ -32,8 +32,6 @@
{ "rename": { "field": "category", "target_field": "event.category", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } },
{ "lowercase": { "field": "event.dataset", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "destination.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "source.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },

View File

@@ -0,0 +1,70 @@
{
"description" : "http.status",
"processors" : [
{ "set": { "if": "ctx.http.status_code == 100", "field": "http.status_message", "value": "Continue" } },
{ "set": { "if": "ctx.http.status_code == 101", "field": "http.status_message", "value": "Switching Protocols" } },
{ "set": { "if": "ctx.http.status_code == 102", "field": "http.status_message", "value": "Processing" } },
{ "set": { "if": "ctx.http.status_code == 103", "field": "http.status_message", "value": "Early Hints" } },
{ "set": { "if": "ctx.http.status_code == 200", "field": "http.status_message", "value": "OK" } },
{ "set": { "if": "ctx.http.status_code == 201", "field": "http.status_message", "value": "Created" } },
{ "set": { "if": "ctx.http.status_code == 202", "field": "http.status_message", "value": "Accepted" } },
{ "set": { "if": "ctx.http.status_code == 203", "field": "http.status_message", "value": "Non-Authoritative Information" } },
{ "set": { "if": "ctx.http.status_code == 204", "field": "http.status_message", "value": "No Content" } },
{ "set": { "if": "ctx.http.status_code == 205", "field": "http.status_message", "value": "Reset Content" } },
{ "set": { "if": "ctx.http.status_code == 206", "field": "http.status_message", "value": "Partial Content" } },
{ "set": { "if": "ctx.http.status_code == 207", "field": "http.status_message", "value": "Multi-Status" } },
{ "set": { "if": "ctx.http.status_code == 208", "field": "http.status_message", "value": "Already Reported" } },
{ "set": { "if": "ctx.http.status_code == 226", "field": "http.status_message", "value": "IM Used" } },
{ "set": { "if": "ctx.http.status_code == 300", "field": "http.status_message", "value": "Multiple Choices" } },
{ "set": { "if": "ctx.http.status_code == 301", "field": "http.status_message", "value": "Moved Permanently" } },
{ "set": { "if": "ctx.http.status_code == 302", "field": "http.status_message", "value": "Found" } },
{ "set": { "if": "ctx.http.status_code == 303", "field": "http.status_message", "value": "See Other" } },
{ "set": { "if": "ctx.http.status_code == 304", "field": "http.status_message", "value": "Not Modified" } },
{ "set": { "if": "ctx.http.status_code == 305", "field": "http.status_message", "value": "Use Proxy" } },
{ "set": { "if": "ctx.http.status_code == 306", "field": "http.status_message", "value": "(Unused)" } },
{ "set": { "if": "ctx.http.status_code == 307", "field": "http.status_message", "value": "Temporary Redirect" } },
{ "set": { "if": "ctx.http.status_code == 308", "field": "http.status_message", "value": "Permanent Redirect" } },
{ "set": { "if": "ctx.http.status_code == 400", "field": "http.status_message", "value": "Bad Request" } },
{ "set": { "if": "ctx.http.status_code == 401", "field": "http.status_message", "value": "Unauthorized" } },
{ "set": { "if": "ctx.http.status_code == 402", "field": "http.status_message", "value": "Payment Required" } },
{ "set": { "if": "ctx.http.status_code == 403", "field": "http.status_message", "value": "Forbidden" } },
{ "set": { "if": "ctx.http.status_code == 404", "field": "http.status_message", "value": "Not Found" } },
{ "set": { "if": "ctx.http.status_code == 405", "field": "http.status_message", "value": "Method Not Allowed" } },
{ "set": { "if": "ctx.http.status_code == 406", "field": "http.status_message", "value": "Not Acceptable" } },
{ "set": { "if": "ctx.http.status_code == 407", "field": "http.status_message", "value": "Proxy Authentication Required" } },
{ "set": { "if": "ctx.http.status_code == 408", "field": "http.status_message", "value": "Request Timeout" } },
{ "set": { "if": "ctx.http.status_code == 409", "field": "http.status_message", "value": "Conflict" } },
{ "set": { "if": "ctx.http.status_code == 410", "field": "http.status_message", "value": "Gone" } },
{ "set": { "if": "ctx.http.status_code == 411", "field": "http.status_message", "value": "Length Required" } },
{ "set": { "if": "ctx.http.status_code == 412", "field": "http.status_message", "value": "Precondition Failed" } },
{ "set": { "if": "ctx.http.status_code == 413", "field": "http.status_message", "value": "Payload Too Large" } },
{ "set": { "if": "ctx.http.status_code == 414", "field": "http.status_message", "value": "URI Too Long" } },
{ "set": { "if": "ctx.http.status_code == 415", "field": "http.status_message", "value": "Unsupported Media Type" } },
{ "set": { "if": "ctx.http.status_code == 416", "field": "http.status_message", "value": "Range Not Satisfiable" } },
{ "set": { "if": "ctx.http.status_code == 417", "field": "http.status_message", "value": "Expectation Failed" } },
{ "set": { "if": "ctx.http.status_code == 421", "field": "http.status_message", "value": "Misdirected Request" } },
{ "set": { "if": "ctx.http.status_code == 422", "field": "http.status_message", "value": "Unprocessable Entity" } },
{ "set": { "if": "ctx.http.status_code == 423", "field": "http.status_message", "value": "Locked" } },
{ "set": { "if": "ctx.http.status_code == 424", "field": "http.status_message", "value": "Failed Dependency" } },
{ "set": { "if": "ctx.http.status_code == 425", "field": "http.status_message", "value": "Too Early" } },
{ "set": { "if": "ctx.http.status_code == 426", "field": "http.status_message", "value": "Upgrade Required" } },
{ "set": { "if": "ctx.http.status_code == 427", "field": "http.status_message", "value": "Unassigned" } },
{ "set": { "if": "ctx.http.status_code == 428", "field": "http.status_message", "value": "Precondition Required" } },
{ "set": { "if": "ctx.http.status_code == 429", "field": "http.status_message", "value": "Too Many Requests" } },
{ "set": { "if": "ctx.http.status_code == 430", "field": "http.status_message", "value": "Unassigned" } },
{ "set": { "if": "ctx.http.status_code == 431", "field": "http.status_message", "value": "Request Header Fields Too Large" } },
{ "set": { "if": "ctx.http.status_code == 451", "field": "http.status_message", "value": "Unavailable For Legal Reasons" } },
{ "set": { "if": "ctx.http.status_code == 500", "field": "http.status_message", "value": "Internal Server Error" } },
{ "set": { "if": "ctx.http.status_code == 501", "field": "http.status_message", "value": "Not Implemented" } },
{ "set": { "if": "ctx.http.status_code == 502", "field": "http.status_message", "value": "Bad Gateway" } },
{ "set": { "if": "ctx.http.status_code == 503", "field": "http.status_message", "value": "Service Unavailable" } },
{ "set": { "if": "ctx.http.status_code == 504", "field": "http.status_message", "value": "Gateway Timeout" } },
{ "set": { "if": "ctx.http.status_code == 505", "field": "http.status_message", "value": "HTTP Version Not Supported" } },
{ "set": { "if": "ctx.http.status_code == 506", "field": "http.status_message", "value": "Variant Also Negotiates" } },
{ "set": { "if": "ctx.http.status_code == 507", "field": "http.status_message", "value": "Insufficient Storage" } },
{ "set": { "if": "ctx.http.status_code == 508", "field": "http.status_message", "value": "Loop Detected" } },
{ "set": { "if": "ctx.http.status_code == 509", "field": "http.status_message", "value": "Unassigned" } },
{ "set": { "if": "ctx.http.status_code == 510", "field": "http.status_message", "value": "Not Extended" } },
{ "set": { "if": "ctx.http.status_code == 511", "field": "http.status_message", "value": "Network Authentication Required" } }
]
}

View File

@@ -1,13 +1,14 @@
{
"description" : "suricata.dhcp",
"processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.assigned_ip", "target_field": "dhcp.assigned_ip", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.client_mac", "target_field": "host.mac", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.dhcp_type", "target_field": "dhcp.message_types", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.assigned_ip", "target_field": "dhcp.assigned_ip", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.type", "target_field": "dhcp.type", "ignore_missing": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.assigned_ip", "target_field": "dhcp.assigned_ip", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.client_ip", "target_field": "client.address", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.client_mac", "target_field": "host.mac", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.dhcp_type", "target_field": "dhcp.message_types", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.hostname", "target_field": "host.hostname", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.type", "target_field": "dhcp.type", "ignore_missing": true } },
{ "rename": { "field": "message2.dhcp.id", "target_field": "dhcp.id", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]

View File

@@ -1,17 +1,18 @@
{
"description" : "suricata.http",
"processors" : [
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.http.hostname", "target_field": "http.virtual_host", "ignore_missing": true } },
{ "rename": { "field": "message2.http.http_user_agent", "target_field": "http.useragent", "ignore_missing": true } },
{ "rename": { "field": "message2.http.url", "target_field": "http.uri", "ignore_missing": true } },
{ "rename": { "field": "message2.http.http_content_type", "target_field": "file.resp_mime_types", "ignore_missing": true } },
{ "rename": { "field": "message2.http.http_user_agent", "target_field": "http.useragent", "ignore_missing": true } },
{ "rename": { "field": "message2.http.url", "target_field": "http.uri", "ignore_missing": true } },
{ "rename": { "field": "message2.http.http_content_type", "target_field": "file.resp_mime_types", "ignore_missing": true } },
{ "rename": { "field": "message2.http.http_refer", "target_field": "http.referrer", "ignore_missing": true } },
{ "rename": { "field": "message2.http.http_method", "target_field": "http.method", "ignore_missing": true } },
{ "rename": { "field": "message2.http.protocol", "target_field": "http.version", "ignore_missing": true } },
{ "rename": { "field": "message2.http.http_method", "target_field": "http.method", "ignore_missing": true } },
{ "rename": { "field": "message2.http.protocol", "target_field": "http.version", "ignore_missing": true } },
{ "rename": { "field": "message2.http.status", "target_field": "http.status_code", "ignore_missing": true } },
{ "rename": { "field": "message2.http.length", "target_field": "http.request.body.length", "ignore_missing": true } },
{ "rename": { "field": "message2.http.length", "target_field": "http.request.body.length", "ignore_missing": true } },
{ "pipeline": { "if": "ctx.http?.status_code != null", "name": "http.status" } },
{ "pipeline": { "name": "common" } }
]
}
}

View File

@@ -51,16 +51,29 @@
"match_mapping_type": "string",
"path_match": "*.ip",
"mapping": {
"type": "ip"
"type": "ip",
"fields" : {
"keyword" : {
"ignore_above" : 45,
"type" : "keyword"
}
}
}
}
},
{
"port": {
"match_mapping_type": "string",
"path_match": "*.port",
"mapping": {
"type": "integer"
"type": "integer",
"fields" : {
"keyword" : {
"ignore_above" : 6,
"type" : "keyword"
}
}
}
}
},

View File

@@ -26,15 +26,6 @@ iptables_fix_fwd:
- position: 1
- target: DOCKER-USER
# Allow related/established sessions
iptables_allow_established:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
# I like pings
iptables_allow_pings:
iptables.append:
@@ -77,17 +68,6 @@ enable_docker_user_fw_policy:
- out-interface: docker0
- position: 1
enable_docker_user_established:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
{% set count = namespace(value=0) %}
{% for chain, hg in assigned_hostgroups.chain.items() %}
{% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
@@ -120,6 +100,27 @@ enable_docker_user_established:
{% endfor %}
{% endfor %}
# Allow related/established sessions
iptables_allow_established:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- position: 1
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
enable_docker_user_established:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
# Block icmp timestamp response
block_icmp_timestamp_reply:
iptables.append:

View File

@@ -19,13 +19,12 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set ENGINE = salt['pillar.get']('global:mdengine', '') %}
{% set proxy = salt['pillar.get']('manager:proxy') %}
include:
- idstools.sync_files
# IDSTools Setup
idstoolsdir:
file.directory:
- name: /opt/so/conf/idstools/etc
- user: 939
- group: 939
- makedirs: True
idstoolslogdir:
file.directory:
@@ -34,14 +33,6 @@ idstoolslogdir:
- group: 939
- makedirs: True
idstoolsetcsync:
file.recurse:
- name: /opt/so/conf/idstools/etc
- source: salt://idstools/etc
- user: 939
- group: 939
- template: jinja
so-ruleupdatecron:
cron.present:
- name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1
@@ -49,28 +40,17 @@ so-ruleupdatecron:
- minute: '1'
- hour: '7'
rulesdir:
file.directory:
- name: /opt/so/rules/nids
- user: 939
- group: 939
- makedirs: True
# Don't show changes because all.rules can be large
synclocalnidsrules:
file.recurse:
- name: /opt/so/rules/nids/
- source: salt://idstools/
- user: 939
- group: 939
- show_changes: False
- include_pat: 'E@.rules'
so-idstools:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}
- hostname: so-idstools
- user: socore
{% if proxy %}
- environment:
- http_proxy={{ proxy }}
- https_proxy={{ proxy }}
- no_proxy={{ salt['pillar.get']('manager:no_proxy') }}
{% endif %}
- binds:
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
- /opt/so/rules/nids:/opt/so/rules/nids:rw

View File

@@ -0,0 +1,46 @@
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
idstoolsdir:
file.directory:
- name: /opt/so/conf/idstools/etc
- user: 939
- group: 939
- makedirs: True
idstoolsetcsync:
file.recurse:
- name: /opt/so/conf/idstools/etc
- source: salt://idstools/etc
- user: 939
- group: 939
- template: jinja
rulesdir:
file.directory:
- name: /opt/so/rules/nids
- user: 939
- group: 939
- makedirs: True
# Don't show changes because all.rules can be large
synclocalnidsrules:
file.recurse:
- name: /opt/so/rules/nids/
- source: salt://idstools/
- user: 939
- group: 939
- show_changes: False
- include_pat: 'E@.rules'

View File

@@ -10,6 +10,9 @@
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
include:
- salt.minion
# Influx DB
influxconfdir:
file.directory:
@@ -85,6 +88,7 @@ telegraf_database:
- require:
- docker_container: so-influxdb
- influxdb_database: telegraf_database
- file: influxdb_retention_policy.present_patch
{% endfor %}
{% for dest_rp in influxdb.downsample.keys() %}
@@ -101,6 +105,7 @@ so_downsample_{{measurement}}_cq:
- require:
- docker_container: so-influxdb
- influxdb_database: telegraf_database
- file: influxdb_continuous_query.present_patch
{% endfor %}
{% endfor %}

View File

@@ -3,6 +3,8 @@
# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%}
# {%- set MANAGER = salt['pillar.get']('global:url_base', '') %}
. /usr/sbin/so-common
# Copy template file
cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_objects.ndjson
@@ -14,9 +16,11 @@ cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_o
# SOCtopus and Manager
sed -i "s/PLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic"
## This hackery will be removed if using Elastic Auth ##
# Let's snag a cookie from Kibana
THECOOKIE=$(curl -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
# Load saved objects
curl -b "sid=$THECOOKIE" -L -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1
curl -b "sid=$THECOOKIE" -L -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson >> /opt/so/log/kibana/misc.log

File diff suppressed because one or more lines are too long

View File

@@ -94,21 +94,10 @@ kibanadashtemplate:
- user: 932
- group: 939
wait_for_kibana:
module.run:
- http.wait_for_successful_query:
- url: "http://{{MANAGER}}:5601/api/saved_objects/_find?type=config"
- wait_for: 900
- onchanges:
- file: kibanadashtemplate
so-kibana-config-load:
cmd.run:
- name: /usr/sbin/so-kibana-config-load
- cwd: /opt/so
- onchanges:
- wait_for_kibana
# Keep the setting correct
#KibanaHappy:

View File

@@ -25,8 +25,8 @@ events {
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
@@ -172,6 +172,8 @@ http {
location / {
auth_request /auth/sessions/whoami;
auth_request_set $userid $upstream_http_x_kratos_authenticated_identity_id;
proxy_set_header x-user-id $userid;
proxy_pass http://{{ manager_ip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
@@ -231,15 +233,15 @@ http {
}
{%- if airgap is sameas true %}
location /repo/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
location /repo/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
{%- endif %}
location /grafana/ {

File diff suppressed because one or more lines are too long

View File

@@ -89,7 +89,7 @@ def run():
# Update the Fleet host in the static pillar
for line in fileinput.input(STATICFILE, inplace=True):
line = re.sub(r'fleet_custom_hostname:.*\n', f"fleet_custom_hostname: {CUSTOMHOSTNAME}", line.rstrip())
line = re.sub(r'fleet_custom_hostname:.*$', f"fleet_custom_hostname: {CUSTOMHOSTNAME}", line.rstrip())
print(line)
return {}

View File

@@ -0,0 +1,4 @@
60c60
< database, name, query, resample_time, coverage_period
---
> database, name, query, resample_time, coverage_period, **client_args

View File

@@ -0,0 +1,4 @@
38c38
< hours = int(duration.split("h"))
---
> hours = int(duration.split("h")[0])

View File

@@ -36,4 +36,21 @@ salt_minion_service:
service.running:
- name: salt-minion
- enable: True
- onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}"
- onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}"
patch_pkg:
pkg.installed:
- name: patch
#https://github.com/saltstack/salt/issues/59766
influxdb_continuous_query.present_patch:
file.patch:
- name: /usr/lib/python3.6/site-packages/salt/states/influxdb_continuous_query.py
- source: salt://salt/files/influxdb_continuous_query.py.patch
#https://github.com/saltstack/salt/issues/59761
influxdb_retention_policy.present_patch:
file.patch:
- name: /usr/lib/python3.6/site-packages/salt/states/influxdb_retention_policy.py
- source: salt://salt/files/influxdb_retention_policy.py.patch

View File

@@ -1,52 +1,49 @@
{
"title": "Security Onion 2.3.30 is here!",
"title": "Security Onion 2.3.40 is here!",
"changes": [
{ "summary": "Zeek is now at version 3.0.13." },
{ "summary": "CyberChef is now at version 9.27.2." },
{ "summary": "Elastic components are now at version 7.10.2. This is the last version that uses the Apache license." },
{ "summary": "Suricata is now at version 6.0.1." },
{ "summary": "Salt is now at version 3002.5." },
{ "summary": "Suricata metadata parsing is now vastly improved." },
{ "summary": "If you choose Suricata for metadata parsing, it will now extract files from the network and send them to Strelka. You can add additional mime types <a href='https://github.com/Security-Onion-Solutions/securityonion/blob/dev/salt/idstools/sorules/extraction.rules'>here</a>." },
{ "summary": "It is now possible to filter Suricata events from being written to the logs. This is a new Suricata 6 feature. We have included some examples <a href='https://github.com/Security-Onion-Solutions/securityonion/blob/dev/salt/idstools/sorules/filters.rules'>here</a>." },
{ "summary": "The Kratos docker container will now perform DNS lookups locally before reaching out to the network DNS provider." },
{ "summary": "Network configuration is now more compatible with manually configured OpenVPN or Wireguard VPN interfaces." },
{ "summary": "<code>so-sensor-clean</code> will no longer spawn multiple instances." },
{ "summary": "Suricata eve.json logs will now be cleaned up after 7 days. This can be changed via the pillar setting." },
{ "summary": "Fixed a security issue where the backup directory had improper file permissions." },
{ "summary": "The automated backup script on the manager now backs up all keys along with the salt configurations. Backup retention is now set to 7 days." },
{ "summary": "Strelka logs are now being rotated properly." },
{ "summary": "Elastalert can now be customized via a pillar." },
{ "summary": "Introduced new script <code>so-monitor-add</code> that allows the user to easily add interfaces to the bond for monitoring." },
{ "summary": "Setup now validates all user input fields to give up-front feedback if an entered value is invalid." },
{ "summary": "There have been several changes to improve install reliability. Many install steps have had their validation processes reworked to ensure that required tasks have been completed before moving on to the next step of the install." },
{ "summary": "Users are now warned if they try to set <i>securityonion</i> as their hostname." },
{ "summary": "The ISO should now identify xvda and nvme devices as install targets." },
{ "summary": "At the end of the first stage of the ISO setup, the ISO device should properly unmount and eject." },
{ "summary": "The text selection of choosing Suricata vs Zeek for metadata is now more descriptive." },
{ "summary": "The logic for properly setting the <code>LOG_SIZE_LIMIT</code> variable has been improved." },
{ "summary": "When installing on Ubuntu, Setup will now wait for cloud init to complete before trying to start the install of packages." },
{ "summary": "The firewall state runs considerably faster now." },
{ "summary": "ICMP timestamps are now disabled." },
{ "summary": "Copyright dates on all Security Onion specific files have been updated." },
{ "summary": "<code>so-tcpreplay</code> (and indirectly <code>so-test</code>) should now work properly." },
{ "summary": "The Zeek packet loss script is now more accurate." },
{ "summary": "Grafana now includes an estimated EPS graph for events ingested on the manager." },
{ "summary": "Updated Elastalert to release 0.2.4-alt2 based on the <a href='https://github.com/jertel/elastalert'>jertel/elastalert</a> alt branch." },
{ "summary": "Pivots from Alerts/Hunts to action links will properly URI encode values." },
{ "summary": "Hunt timeline graph will properly scale the data point interval based on the search date range." },
{ "summary": "Grid interface will properly show <i>Search</i> as the node type instead of <i>so-node</i>." },
{ "summary": "Import node now supports airgap environments." },
{ "summary": "The so-mysql container will now show <i>healthy</i> when viewing the docker ps output." },
{ "summary": "The Soctopus configuration now uses private IPs instead of public IPs, allowing network communications to succeed within the grid." },
{ "summary": "The Correlate action in Hunt now groups the OR filters together to ensure subsequent user-added filters are correctly ANDed to the entire OR group." },
{ "summary": "Add support to <code>so-firewall</code> script to display existing port groups and host groups." },
{ "summary": "TheHive initialization during Security Onion setup will now properly check for a running ES instance and will retry connectivity checks to TheHive before proceeding." },
{ "summary": "Changes to the <i>.security</i> analyzer yields more accurate query results when using Playbook." },
{ "summary": "Several Hunt queries have been updated." },
{ "summary": "The pfSense firewall log parser has been updated to improve compatibility." },
{ "summary": "Kibana dashboard hyperlinks have been updated for faster navigation." },
{ "summary": "Added a new <code>so-rule</code> script to make it easier to disable, enable, and modify SIDs." },
{ "summary": "ISO now gives the option to just configure the network during setup." }
{ "summary": "FEATURE: Add option for HTTP Method Specification/POST to Hunt/Alerts Actions <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/2904\">#2904</a>" },
{ "summary": "FEATURE: Add option to configure proxy for various tools used during setup + persist the proxy configuration <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/529\">#529</a>" },
{ "summary": "FEATURE: Alerts/Hunt - Provide method for base64-encoding pivot value <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1749\">#1749</a>" },
{ "summary": "FEATURE: Allow users to customize links in SOC <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1248\">#1248</a>" },
{ "summary": "FEATURE: Display user who requested PCAP in SOC <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/2775\">#2775</a>" },
{ "summary": "FEATURE: Make SOC browser app connection timeouts adjustable <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/2408\">#2408</a>" },
{ "summary": "FEATURE: Move to FleetDM <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3483\">#3483</a>" },
{ "summary": "FEATURE: Reduce field cache expiration from 1d to 5m, and expose value as a salt pillar <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3537\">#3537</a>" },
{ "summary": "FEATURE: Refactor docker_clean salt state to use loop w/ inspection instead of hardcoded image list <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3113\">#3113</a>" },
{ "summary": "FEATURE: Run so-ssh-harden during setup <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1932\">#1932</a>" },
{ "summary": "FEATURE: SOC should only display links to tools that are enabled <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1643\">#1643</a>" },
{ "summary": "FEATURE: Update Sigmac Osquery Field Mappings <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3137\">#3137</a>" },
{ "summary": "FEATURE: User must accept the Elastic licence during setup <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3233\">#3233</a>" },
{ "summary": "FEATURE: soup should output more guidance for distributed deployments at the end <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3340\">#3340</a>" },
{ "summary": "FEATURE: soup should provide some initial information and then prompt the user to continue <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3486\">#3486</a>" },
{ "summary": "FIX: Add cronjob for so-suricata-eve-clean script <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3515\">#3515</a>" },
{ "summary": "FIX: Change Elasticsearch heap formula <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1686\">#1686</a>" },
{ "summary": "FIX: Create a post install version loop in soup <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3102\">#3102</a>" },
{ "summary": "FIX: Custom Kibana settings are not being applied properly on upgrades <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3254\">#3254</a>" },
{ "summary": "FIX: Hunt query issues with quotes <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3320\">#3320</a>" },
{ "summary": "FIX: IP Addresses don't work with .security <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3327\">#3327</a>" },
{ "summary": "FIX: Improve DHCP leases query in Hunt <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3395\">#3395</a>" },
{ "summary": "FIX: Improve Setup verbiage <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3422\">#3422</a>" },
{ "summary": "FIX: Improve Suricata DHCP logging and parsing <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3397\">#3397</a>" },
{ "summary": "FIX: Keep RELATED,ESTABLISHED rules at the top of iptables chains <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3288\">#3288</a>" },
{ "summary": "FIX: Populate http.status_message field <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3408\">#3408</a>" },
{ "summary": "FIX: Remove 'types removal' deprecation messages from elastic log. <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3345\">#3345</a>" },
{ "summary": "FIX: Reword + fix formatting on ES data storage prompt <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3205\">#3205</a>" },
{ "summary": "FIX: SMTP shoud read SNMP on Kibana SNMP view <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3413\">#3413</a>" },
{ "summary": "FIX: Sensors can temporarily show offline while processing large PCAP jobs <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3279\">#3279</a>" },
{ "summary": "FIX: Soup should log to the screen as well as to a file <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3467\">#3467</a>" },
{ "summary": "FIX: Strelka port 57314 not immediately relinquished upon restart <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3457\">#3457</a>" },
{ "summary": "FIX: Switch SOC to pull from fieldcaps API due to field caching changes in Kibana 7.11 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3502\">#3502</a>" },
{ "summary": "FIX: Syntax error in /etc/sysctl.d/99-reserved-ports.conf <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3308\">#3308</a>" },
{ "summary": "FIX: Telegraf hardcoded to use https and is not aware of elasticsearch features <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/2061\">#2061</a>" },
{ "summary": "FIX: Zeek Index Close and Delete Count for curator <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3274\">#3274</a>" },
{ "summary": "FIX: so-cortex-user-add and so-cortex-user-enable use wrong pillar value for api key <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3388\">#3388</a>" },
{ "summary": "FIX: so-rule does not completely apply change <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3289\">#3289</a>" },
{ "summary": "FIX: soup should recheck disk space after it tries to clean up. <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3235\">#3235</a>" },
{ "summary": "UPGRADE: Elastic 7.11.2 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3389\">#3389</a>" },
{ "summary": "UPGRADE: Suricata 6.0.2 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3217\">#3217</a>" },
{ "summary": "UPGRADE: Zeek 4 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3216\">#3216</a>" },
{ "summary": "UPGRADE: Zeek container to use Python 3 <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/1113\">#1113</a>" },
{ "summary": "UPGRADE: docker-ce to latest <a href=\"https://github.com/Security-Onion-Solutions/securityonion/issues/3493\">#3493</a>" }
]
}
}

View File

@@ -1,13 +1,23 @@
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %}
{%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
{%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
{%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
{%- set OSQUERY = salt['pillar.get']('manager:osquery', '0') %}
{%- set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
{%- set API_TIMEOUT = salt['pillar.get']('sensoroni:api_timeout_ms', 0) %}
{%- set WEBSOCKET_TIMEOUT = salt['pillar.get']('sensoroni:websocket_timeout_ms', 0) %}
{%- set TIP_TIMEOUT = salt['pillar.get']('sensoroni:tip_timeout_ms', 0) %}
{%- set CACHE_EXPIRATION = salt['pillar.get']('sensoroni:cache_expiration_ms', 0) %}
{%- set ES_FIELDCAPS_CACHE = salt['pillar.get']('sensoroni:es_fieldcaps_cache_ms', '300000') %}
{%- import_json "soc/files/soc/alerts.queries.json" as alerts_queries %}
{%- import_json "soc/files/soc/alerts.actions.json" as alerts_actions %}
{%- import_json "soc/files/soc/alerts.eventfields.json" as alerts_eventfields %}
{%- import_json "soc/files/soc/hunt.queries.json" as hunt_queries %}
{%- import_json "soc/files/soc/hunt.actions.json" as hunt_actions %}
{%- import_json "soc/files/soc/hunt.eventfields.json" as hunt_eventfields %}
{%- import_json "soc/files/soc/tools.json" as tools %}
{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %}
{
@@ -40,6 +50,7 @@
{%- endif %}
"username": "",
"password": "",
"cacheMs": {{ ES_FIELDCAPS_CACHE }},
"verifyCert": false
},
"sostatus": {
@@ -66,6 +77,26 @@
"docsUrl": "https://docs.securityonion.net/en/2.3/",
"cheatsheetUrl": "https://github.com/Security-Onion-Solutions/securityonion-docs/raw/2.3/images/cheat-sheet/Security-Onion-Cheat-Sheet.pdf",
{%- endif %}
"apiTimeoutMs": {{ API_TIMEOUT }},
"webSocketTimeoutMs": {{ WEBSOCKET_TIMEOUT }},
"tipTimeoutMs": {{ TIP_TIMEOUT }},
"cacheExpirationMs": {{ CACHE_EXPIRATION }},
"inactiveTools": [
{%- if PLAYBOOK == 0 %}
"toolPlaybook",
{%- endif %}
{%- if THEHIVE == 0 %}
"toolTheHive",
{%- endif %}
{%- if OSQUERY == 0 %}
"toolFleet",
{%- endif %}
{%- if GRAFANA == 0 %}
"toolGrafana",
{%- endif %}
"toolUnused"
],
"tools": {{ tools | json }},
"hunt": {
"advanced": true,
"groupItemsPerPage": 10,

View File

@@ -0,0 +1,9 @@
[
{ "name": "toolKibana", "description": "toolKibanaHelp", "icon": "fa-external-link-alt", "target": "so-kibana", "link": "/kibana/" },
{ "name": "toolGrafana", "description": "toolGrafanaHelp", "icon": "fa-external-link-alt", "target": "so-grafana", "link": "/grafana/d/so_overview" },
{ "name": "toolCyberchef", "description": "toolCyberchefHelp", "icon": "fa-external-link-alt", "target": "so-cyberchef", "link": "/cyberchef/" },
{ "name": "toolPlaybook", "description": "toolPlaybookHelp", "icon": "fa-external-link-alt", "target": "so-playbook", "link": "/playbook/projects/detection-playbooks/issues/" },
{ "name": "toolFleet", "description": "toolFleetHelp", "icon": "fa-external-link-alt", "target": "so-fleet", "link": "/fleet/" },
{ "name": "toolTheHive", "description": "toolTheHiveHelp", "icon": "fa-external-link-alt", "target": "so-thehive", "link": "/thehive/" },
{ "name": "toolNavigator", "description": "toolNavigatorHelp", "icon": "fa-external-link-alt", "target": "so-navigator", "link": "/navigator/" }
]

View File

@@ -19,7 +19,8 @@ files:
- '/nsm/strelka/unprocessed/*'
delete: false
gatekeeper: true
processed: '/nsm/strelka/processed'
response:
report: 5s
delta: 5s
staging: '/nsm/strelka/processed'
staging: '/nsm/strelka/staging'

View File

@@ -86,6 +86,13 @@ strelkaprocessed:
- group: 939
- makedirs: True
strelkastaging:
file.directory:
- name: /nsm/strelka/staging
- user: 939
- group: 939
- makedirs: True
strelkaunprocessed:
file.directory:
- name: /nsm/strelka/unprocessed
@@ -96,7 +103,7 @@ strelkaunprocessed:
# Check to see if Strelka frontend port is available
strelkaportavailable:
cmd.run:
- name: netstat -utanp | grep ":57314" | grep -qv docker && PROCESS=$(netstat -utanp | grep ":57314" | uniq) && echo "Another process ($PROCESS) appears to be using port 57314. Please terminate this process, or reboot to ensure a clean state so that Strelka can start properly." && exit 1 || exit 0
- name: netstat -utanp | grep ":57314" | grep -qvE 'docker|TIME_WAIT' && PROCESS=$(netstat -utanp | grep ":57314" | uniq) && echo "Another process ($PROCESS) appears to be using port 57314. Please terminate this process, or reboot to ensure a clean state so that Strelka can start properly." && exit 1 || exit 0
strelka_coordinator:
docker_container.running:
@@ -213,4 +220,4 @@ strelka_zeek_extracted_sync:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -179,6 +179,26 @@ disable_so-suricata_so-status.conf:
- month: '*'
- dayweek: '*'
so-suricata-eve-clean:
file.managed:
- name: /usr/sbin/so-suricata-eve-clean
- user: root
- group: root
- mode: 755
- template: jinja
- source: salt://suricata/cron/so-suricata-eve-clean
# Add eve clean cron
clean_suricata_eve_files:
cron.present:
- name: /usr/sbin/so-suricata-eve-clean > /dev/null 2>&1
- user: root
- minute: '*/5'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -61,7 +61,7 @@ suricata:
- sip
- dhcp:
enabled: "yes"
# extended: "no"
extended: "yes"
- ssh
#- stats:
# totals: "yes"
@@ -69,4 +69,4 @@ suricata:
# deltas: "no"
- flow
#- netflow
#- metadata
#- metadata

View File

@@ -618,11 +618,8 @@
# # Read stats from one or more Elasticsearch servers or clusters
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
[[inputs.elasticsearch]]
# ## specify a list of one or more Elasticsearch servers
# # you can add username and password to your url to use basic authentication:
# # servers = ["http://user:pass@localhost:9200"]
servers = ["https://{{ MANAGER }}:9200"]
insecure_skip_verify = true
{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
[[inputs.elasticsearch]]
servers = ["https://{{ NODEIP }}:9200"]

View File

@@ -1,3 +1,4 @@
{% set proxy = salt['pillar.get']('manager:proxy') -%}
[main]
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
@@ -11,7 +12,8 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }}
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release
clean_requirements_on_remove=1
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') %}
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') -%}
proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
{% elif proxy -%}
proxy={{ proxy }}
{% endif %}