diff --git a/files/master b/files/master
index fea77c2f7..42e7866d9 100644
--- a/files/master
+++ b/files/master
@@ -37,7 +37,9 @@ log_file: /opt/so/log/salt/master
#
file_roots:
base:
- - /opt/so/saltstack/salt
+ - /opt/so/saltstack/local/salt
+ - /opt/so/saltstack/default/salt
+
# The master_roots setting configures a master-only copy of the file_roots dictionary,
# used by the state compiler.
@@ -53,7 +55,8 @@ file_roots:
pillar_roots:
base:
- - /opt/so/saltstack/pillar
+ - /opt/so/saltstack/local/pillar
+ - /opt/so/saltstack/default/pillar
peer:
.*:
diff --git a/pillar/data/addtotab.sh b/pillar/data/addtotab.sh
index ad302607c..33a42a1b1 100644
--- a/pillar/data/addtotab.sh
+++ b/pillar/data/addtotab.sh
@@ -1,7 +1,8 @@
#!/usr/bin/env bash
# This script adds sensors/nodes/etc to the nodes tab
-
+default_salt_dir=/opt/so/saltstack/default
+local_salt_dir=/opt/so/saltstack/local
TYPE=$1
NAME=$2
IPADDRESS=$3
@@ -15,7 +16,7 @@ MONINT=$9
#HOTNAME=$11
echo "Seeing if this host is already in here. If so delete it"
-if grep -q $NAME "/opt/so/saltstack/pillar/data/$TYPE.sls"; then
+if grep -q $NAME "$local_salt_dir/pillar/data/$TYPE.sls"; then
echo "Node Already Present - Let's re-add it"
awk -v blah=" $NAME:" 'BEGIN{ print_flag=1 }
{
@@ -31,27 +32,29 @@ if grep -q $NAME "/opt/so/saltstack/pillar/data/$TYPE.sls"; then
if ( print_flag == 1 )
print $0
-} ' /opt/so/saltstack/pillar/data/$TYPE.sls > /opt/so/saltstack/pillar/data/tmp.$TYPE.sls
-mv /opt/so/saltstack/pillar/data/tmp.$TYPE.sls /opt/so/saltstack/pillar/data/$TYPE.sls
+} ' $local_salt_dir/pillar/data/$TYPE.sls > $local_salt_dir/pillar/data/tmp.$TYPE.sls
+mv $local_salt_dir/pillar/data/tmp.$TYPE.sls $local_salt_dir/pillar/data/$TYPE.sls
echo "Deleted $NAME from the tab. Now adding it in again with updated info"
fi
-echo " $NAME:" >> /opt/so/saltstack/pillar/data/$TYPE.sls
-echo " ip: $IPADDRESS" >> /opt/so/saltstack/pillar/data/$TYPE.sls
-echo " manint: $MANINT" >> /opt/so/saltstack/pillar/data/$TYPE.sls
-echo " totalcpus: $CPUS" >> /opt/so/saltstack/pillar/data/$TYPE.sls
-echo " guid: $GUID" >> /opt/so/saltstack/pillar/data/$TYPE.sls
-echo " rootfs: $ROOTFS" >> /opt/so/saltstack/pillar/data/$TYPE.sls
-echo " nsmfs: $NSM" >> /opt/so/saltstack/pillar/data/$TYPE.sls
+echo " $NAME:" >> $local_salt_dir/pillar/data/$TYPE.sls
+echo " ip: $IPADDRESS" >> $local_salt_dir/pillar/data/$TYPE.sls
+echo " manint: $MANINT" >> $local_salt_dir/pillar/data/$TYPE.sls
+echo " totalcpus: $CPUS" >> $local_salt_dir/pillar/data/$TYPE.sls
+echo " guid: $GUID" >> $local_salt_dir/pillar/data/$TYPE.sls
+echo " rootfs: $ROOTFS" >> $local_salt_dir/pillar/data/$TYPE.sls
+echo " nsmfs: $NSM" >> $local_salt_dir/pillar/data/$TYPE.sls
if [ $TYPE == 'sensorstab' ]; then
- echo " monint: $MONINT" >> /opt/so/saltstack/pillar/data/$TYPE.sls
- salt-call state.apply common queue=True
+ echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls
+ salt-call state.apply grafana queue=True
fi
if [ $TYPE == 'evaltab' ]; then
- echo " monint: $MONINT" >> /opt/so/saltstack/pillar/data/$TYPE.sls
- salt-call state.apply common queue=True
- salt-call state.apply utility queue=True
+ echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls
+ if [ ! $10 ]; then
+ salt-call state.apply grafana queue=True
+ salt-call state.apply utility queue=True
+ fi
fi
#if [ $TYPE == 'nodestab' ]; then
-# echo " nodetype: $NODETYPE" >> /opt/so/saltstack/pillar/data/$TYPE.sls
-# echo " hotname: $HOTNAME" >> /opt/so/saltstack/pillar/data/$TYPE.sls
+# echo " nodetype: $NODETYPE" >> $local_salt_dir/pillar/data/$TYPE.sls
+# echo " hotname: $HOTNAME" >> $local_salt_dir/pillar/data/$TYPE.sls
#fi
diff --git a/pillar/data/evaltab.sls b/pillar/data/evaltab.sls
deleted file mode 100644
index 496542c18..000000000
--- a/pillar/data/evaltab.sls
+++ /dev/null
@@ -1 +0,0 @@
-evaltab:
diff --git a/pillar/data/mastersearchtab.sls b/pillar/data/mastersearchtab.sls
deleted file mode 100644
index 7e48930ab..000000000
--- a/pillar/data/mastersearchtab.sls
+++ /dev/null
@@ -1 +0,0 @@
-mastersearchtab:
diff --git a/pillar/data/mastertab.sls b/pillar/data/mastertab.sls
deleted file mode 100644
index daf832a5f..000000000
--- a/pillar/data/mastertab.sls
+++ /dev/null
@@ -1 +0,0 @@
-mastertab:
diff --git a/pillar/data/nodestab.sls b/pillar/data/nodestab.sls
deleted file mode 100644
index b30173cca..000000000
--- a/pillar/data/nodestab.sls
+++ /dev/null
@@ -1 +0,0 @@
-nodestab:
diff --git a/pillar/data/sensorstab.sls b/pillar/data/sensorstab.sls
deleted file mode 100644
index 60032a938..000000000
--- a/pillar/data/sensorstab.sls
+++ /dev/null
@@ -1 +0,0 @@
-sensorstab:
diff --git a/pillar/firewall/addfirewall.sh b/pillar/firewall/addfirewall.sh
index fa1f1c617..c30451aa5 100644
--- a/pillar/firewall/addfirewall.sh
+++ b/pillar/firewall/addfirewall.sh
@@ -1,13 +1,13 @@
#!/usr/bin/env bash
# This script adds ip addresses to specific rule sets defined by the user
-
+local_salt_dir=/opt/so/saltstack/local
POLICY=$1
IPADDRESS=$2
-if grep -q $2 "/opt/so/saltstack/pillar/firewall/$1.sls"; then
+if grep -q $2 "$local_salt_dir/pillar/firewall/$1.sls"; then
echo "Firewall Rule Already There"
else
- echo " - $2" >> /opt/so/saltstack/pillar/firewall/$1.sls
+ echo " - $2" >> $local_salt_dir/pillar/firewall/$1.sls
salt-call state.apply firewall queue=True
fi
diff --git a/pillar/logstash/master.sls b/pillar/logstash/master.sls
index 61d1bb6d1..1ff41b43c 100644
--- a/pillar/logstash/master.sls
+++ b/pillar/logstash/master.sls
@@ -2,5 +2,6 @@ logstash:
pipelines:
master:
config:
+ - so/0009_input_beats.conf
- so/0010_input_hhbeats.conf
- so/9999_output_redis.conf.jinja
diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls
index b4e42a8a3..6b3d0422e 100644
--- a/pillar/logstash/search.sls
+++ b/pillar/logstash/search.sls
@@ -5,12 +5,12 @@ logstash:
- so/0900_input_redis.conf.jinja
- so/9000_output_zeek.conf.jinja
- so/9002_output_import.conf.jinja
+ - so/9034_output_syslog.conf.jinja
- so/9100_output_osquery.conf.jinja
- so/9400_output_suricata.conf.jinja
- so/9500_output_beats.conf.jinja
- so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja
templates:
- - so/so-beats-template.json
- so/so-common-template.json
- so/so-zeek-template.json
diff --git a/salt/common/init.sls b/salt/common/init.sls
index 09d71114b..0ecba198d 100644
--- a/salt/common/init.sls
+++ b/salt/common/init.sls
@@ -62,6 +62,7 @@ commonpkgs:
- python3-dateutil
- python3-m2crypto
- python3-mysqldb
+ - git
heldpackages:
pkg.installed:
- pkgs:
@@ -96,12 +97,13 @@ commonpkgs:
- device-mapper-persistent-data
- lvm2
- openssl
+ - git
heldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.2.13-3.2.el7
- - docker-ce: 3:19.03.9-3.el7
+ - docker-ce: 3:19.03.11-3.el7
- hold: True
- update_holds: True
{% endif %}
@@ -128,4 +130,4 @@ utilsyncscripts:
- group: 0
- file_mode: 755
- template: jinja
- - source: salt://common/tools/sbin
\ No newline at end of file
+ - source: salt://common/tools/sbin
diff --git a/salt/common/tools/sbin/so-allow b/salt/common/tools/sbin/so-allow
index bede282b3..9be770bed 100755
--- a/salt/common/tools/sbin/so-allow
+++ b/salt/common/tools/sbin/so-allow
@@ -17,6 +17,9 @@
. /usr/sbin/so-common
+default_salt_dir=/opt/so/saltstack/default
+local_salt_dir=/opt/so/saltstack/local
+
SKIP=0
while getopts "abowi:" OPTION
@@ -80,10 +83,10 @@ if [ "$SKIP" -eq 0 ]; then
fi
echo "Adding $IP to the $FULLROLE role. This can take a few seconds"
-/opt/so/saltstack/pillar/firewall/addfirewall.sh $FULLROLE $IP
+$default_salt_dir/pillar/firewall/addfirewall.sh $FULLROLE $IP
# Check if Wazuh enabled
-if grep -q -R "wazuh: 1" /opt/so/saltstack/pillar/*; then
+if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then
# If analyst, add to Wazuh AR whitelist
if [ "$FULLROLE" == "analyst" ]; then
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
diff --git a/salt/common/tools/sbin/so-bro-logs b/salt/common/tools/sbin/so-bro-logs
index 1593ead81..173d23029 100755
--- a/salt/common/tools/sbin/so-bro-logs
+++ b/salt/common/tools/sbin/so-bro-logs
@@ -1,11 +1,12 @@
#!/bin/bash
+local_salt_dir=/opt/so/saltstack/local
bro_logs_enabled() {
- echo "brologs:" > /opt/so/saltstack/pillar/brologs.sls
- echo " enabled:" >> /opt/so/saltstack/pillar/brologs.sls
+ echo "brologs:" > $local_salt_dir/pillar/brologs.sls
+ echo " enabled:" >> $local_salt_dir/pillar/brologs.sls
for BLOG in ${BLOGS[@]}; do
- echo " - $BLOG" | tr -d '"' >> /opt/so/saltstack/pillar/brologs.sls
+ echo " - $BLOG" | tr -d '"' >> $local_salt_dir/pillar/brologs.sls
done
}
diff --git a/salt/common/tools/sbin/so-cortex-restart b/salt/common/tools/sbin/so-cortex-restart
index ef0e3e4fe..841ca1bb6 100755
--- a/salt/common/tools/sbin/so-cortex-restart
+++ b/salt/common/tools/sbin/so-cortex-restart
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
@@ -17,4 +17,5 @@
. /usr/sbin/so-common
-/usr/sbin/so-restart cortex $1
+/usr/sbin/so-stop cortex $1
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-cortex-start b/salt/common/tools/sbin/so-cortex-start
index a08969cab..92fe88bb5 100755
--- a/salt/common/tools/sbin/so-cortex-start
+++ b/salt/common/tools/sbin/so-cortex-start
@@ -17,4 +17,4 @@
. /usr/sbin/so-common
-/usr/sbin/so-start cortex $1
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-cortex-stop b/salt/common/tools/sbin/so-cortex-stop
index a13d1e2e3..727b2c7fa 100755
--- a/salt/common/tools/sbin/so-cortex-stop
+++ b/salt/common/tools/sbin/so-cortex-stop
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
diff --git a/salt/common/tools/sbin/so-docker-refresh b/salt/common/tools/sbin/so-docker-refresh
new file mode 100644
index 000000000..3c1a2e301
--- /dev/null
+++ b/salt/common/tools/sbin/so-docker-refresh
@@ -0,0 +1,112 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+got_root(){
+ if [ "$(id -u)" -ne 0 ]; then
+ echo "This script must be run using sudo!"
+ exit 1
+ fi
+}
+
+master_check() {
+ # Check to see if this is a master
+ MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
+ if [ $MASTERCHECK == 'so-eval' ] || [ $MASTERCHECK == 'so-master' ] || [ $MASTERCHECK == 'so-mastersearch' ] || [ $MASTERCHECK == 'so-standalone' ] || [ $MASTERCHECK == 'so-helix' ]; then
+ echo "This is a master. We can proceed"
+ else
+ echo "Please run soup on the master. The master controls all updates."
+ exit 1
+ fi
+}
+
+update_docker_containers() {
+
+ # Download the containers from the interwebs
+ for i in "${TRUSTED_CONTAINERS[@]}"
+ do
+ # Pull down the trusted docker image
+ echo "Downloading $i"
+ docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
+ # Tag it with the new registry destination
+ docker tag soshybridhunter/$i $HOSTNAME:5000/soshybridhunter/$i
+ docker push $HOSTNAME:5000/soshybridhunter/$i
+ done
+
+}
+
+version_check() {
+ if [ -f /etc/soversion ]; then
+ VERSION=$(cat /etc/soversion)
+ else
+ echo "Unable to detect version. I will now terminate."
+ exit 1
+ fi
+}
+got_root
+master_check
+version_check
+
+# Use the hostname
+HOSTNAME=$(hostname)
+BUILD=HH
+# List all the containers
+if [ $MASTERCHECK != 'so-helix' ]; then
+ TRUSTED_CONTAINERS=( \
+ "so-acng:$BUILD$VERSION" \
+ "so-thehive-cortex:$BUILD$VERSION" \
+ "so-curator:$BUILD$VERSION" \
+ "so-domainstats:$BUILD$VERSION" \
+ "so-elastalert:$BUILD$VERSION" \
+ "so-elasticsearch:$BUILD$VERSION" \
+ "so-filebeat:$BUILD$VERSION" \
+ "so-fleet:$BUILD$VERSION" \
+ "so-fleet-launcher:$BUILD$VERSION" \
+ "so-freqserver:$BUILD$VERSION" \
+ "so-grafana:$BUILD$VERSION" \
+ "so-idstools:$BUILD$VERSION" \
+ "so-influxdb:$BUILD$VERSION" \
+ "so-kibana:$BUILD$VERSION" \
+ "so-kratos:$BUILD$VERSION" \
+ "so-logstash:$BUILD$VERSION" \
+ "so-mysql:$BUILD$VERSION" \
+ "so-navigator:$BUILD$VERSION" \
+ "so-nginx:$BUILD$VERSION" \
+ "so-playbook:$BUILD$VERSION" \
+ "so-redis:$BUILD$VERSION" \
+ "so-soc:$BUILD$VERSION" \
+ "so-soctopus:$BUILD$VERSION" \
+ "so-steno:$BUILD$VERSION" \
+ "so-strelka:$BUILD$VERSION" \
+ "so-suricata:$BUILD$VERSION" \
+ "so-telegraf:$BUILD$VERSION" \
+ "so-thehive:$BUILD$VERSION" \
+ "so-thehive-es:$BUILD$VERSION" \
+ "so-wazuh:$BUILD$VERSION" \
+ "so-zeek:$BUILD$VERSION" )
+ else
+ TRUSTED_CONTAINERS=( \
+ "so-filebeat:$BUILD$VERSION" \
+ "so-idstools:$BUILD$VERSION" \
+ "so-logstash:$BUILD$VERSION" \
+ "so-nginx:$BUILD$VERSION" \
+ "so-redis:$BUILD$VERSION" \
+ "so-steno:$BUILD$VERSION" \
+ "so-suricata:$BUILD$VERSION" \
+ "so-telegraf:$BUILD$VERSION" \
+ "so-zeek:$BUILD$VERSION" )
+ fi
+
+update_docker_containers
\ No newline at end of file
diff --git a/salt/common/tools/sbin/so-elasticsearch-templates b/salt/common/tools/sbin/so-elasticsearch-templates
index efe5f8345..829e2a68d 100755
--- a/salt/common/tools/sbin/so-elasticsearch-templates
+++ b/salt/common/tools/sbin/so-elasticsearch-templates
@@ -15,12 +15,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+default_salt_dir=/opt/so/saltstack/default
ELASTICSEARCH_HOST="{{ MASTERIP}}"
ELASTICSEARCH_PORT=9200
#ELASTICSEARCH_AUTH=""
# Define a default directory to load pipelines from
-ELASTICSEARCH_TEMPLATES="/opt/so/saltstack/salt/logstash/pipelines/templates/so/"
+ELASTICSEARCH_TEMPLATES="$default_salt_dir/salt/logstash/pipelines/templates/so/"
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
diff --git a/salt/common/tools/sbin/so-features-enable b/salt/common/tools/sbin/so-features-enable
index a37743960..6ba9252a9 100755
--- a/salt/common/tools/sbin/so-features-enable
+++ b/salt/common/tools/sbin/so-features-enable
@@ -15,10 +15,11 @@
# along with this program. If not, see .
. /usr/sbin/so-common
+local_salt_dir=/opt/so/saltstack/local
-VERSION=$(grep soversion /opt/so/saltstack/pillar/static.sls | cut -d':' -f2|sed 's/ //g')
+VERSION=$(grep soversion $local_salt_dir/pillar/static.sls | cut -d':' -f2|sed 's/ //g')
# Modify static.sls to enable Features
-sed -i 's/features: False/features: True/' /opt/so/saltstack/pillar/static.sls
+sed -i 's/features: False/features: True/' $local_salt_dir/pillar/static.sls
SUFFIX="-features"
TRUSTED_CONTAINERS=( \
"so-elasticsearch:$VERSION$SUFFIX" \
diff --git a/salt/common/tools/sbin/so-helix-apikey b/salt/common/tools/sbin/so-helix-apikey
index 529ab93e4..c58d2ad89 100755
--- a/salt/common/tools/sbin/so-helix-apikey
+++ b/salt/common/tools/sbin/so-helix-apikey
@@ -1,4 +1,7 @@
#!/bin/bash
+
+local_salt_dir=/opt/so/saltstack/local
+
got_root() {
# Make sure you are root
@@ -10,13 +13,13 @@ got_root() {
}
got_root
-if [ ! -f /opt/so/saltstack/pillar/fireeye/init.sls ]; then
+if [ ! -f $local_salt_dir/pillar/fireeye/init.sls ]; then
echo "This is nto configured for Helix Mode. Please re-install."
exit
else
echo "Enter your Helix API Key: "
read APIKEY
- sed -i "s/^ api_key.*/ api_key: $APIKEY/g" /opt/so/saltstack/pillar/fireeye/init.sls
+ sed -i "s/^ api_key.*/ api_key: $APIKEY/g" $local_salt_dir/pillar/fireeye/init.sls
docker stop so-logstash
docker rm so-logstash
echo "Restarting Logstash for updated key"
diff --git a/salt/common/tools/sbin/so-saltstack-update b/salt/common/tools/sbin/so-saltstack-update
new file mode 100644
index 000000000..d4e380b61
--- /dev/null
+++ b/salt/common/tools/sbin/so-saltstack-update
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+default_salt_dir=/opt/so/saltstack/default
+clone_to_tmp() {
+
+ # TODO Need to add a air gap option
+ # Make a temp location for the files
+ mkdir /tmp/sogh
+ cd /tmp/sogh
+ #git clone -b dev https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
+ git clone https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
+ cd /tmp
+
+}
+
+copy_new_files() {
+
+ # Copy new files over to the salt dir
+ cd /tmp/sogh/securityonion-saltstack
+ git checkout $BRANCH
+ rsync -a --exclude-from 'exclude-list.txt' salt $default_salt_dir/
+ rsync -a --exclude-from 'exclude-list.txt' pillar $default_salt_dir/
+ chown -R socore:socore $default_salt_dir/salt
+ chown -R socore:socore $default_salt_dir/pillar
+ chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh
+ rm -rf /tmp/sogh
+}
+
+got_root(){
+ if [ "$(id -u)" -ne 0 ]; then
+ echo "This script must be run using sudo!"
+ exit 1
+ fi
+}
+
+got_root
+if [ $# -ne 1 ] ; then
+ BRANCH=master
+else
+ BRANCH=$1
+fi
+clone_to_tmp
+copy_new_files
\ No newline at end of file
diff --git a/salt/common/tools/sbin/so-start b/salt/common/tools/sbin/so-start
index a198377a1..690950373 100755
--- a/salt/common/tools/sbin/so-start
+++ b/salt/common/tools/sbin/so-start
@@ -32,5 +32,5 @@ fi
case $1 in
"all") salt-call state.highstate queue=True;;
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
- *) if docker ps | grep -q so-$1; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
+ *) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
esac
diff --git a/salt/common/tools/sbin/so-thehive-es-restart b/salt/common/tools/sbin/so-thehive-es-restart
new file mode 100755
index 000000000..d58caecdc
--- /dev/null
+++ b/salt/common/tools/sbin/so-thehive-es-restart
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+/usr/sbin/so-stop thehive-es $1
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-thehive-es-start b/salt/common/tools/sbin/so-thehive-es-start
new file mode 100755
index 000000000..92fe88bb5
--- /dev/null
+++ b/salt/common/tools/sbin/so-thehive-es-start
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-thehive-es-stop b/salt/common/tools/sbin/so-thehive-es-stop
new file mode 100755
index 000000000..cf9cc2310
--- /dev/null
+++ b/salt/common/tools/sbin/so-thehive-es-stop
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+/usr/sbin/so-stop thehive-es $1
diff --git a/salt/common/tools/sbin/so-thehive-restart b/salt/common/tools/sbin/so-thehive-restart
index 08cd8318e..4b28c0030 100755
--- a/salt/common/tools/sbin/so-thehive-restart
+++ b/salt/common/tools/sbin/so-thehive-restart
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
diff --git a/salt/common/tools/sbin/so-thehive-stop b/salt/common/tools/sbin/so-thehive-stop
index b326f699c..6c56e0473 100755
--- a/salt/common/tools/sbin/so-thehive-stop
+++ b/salt/common/tools/sbin/so-thehive-stop
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
diff --git a/salt/common/tools/sbin/so-zeek-stats b/salt/common/tools/sbin/so-zeek-stats
new file mode 100644
index 000000000..656da7f04
--- /dev/null
+++ b/salt/common/tools/sbin/so-zeek-stats
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# Show Zeek stats (capstats, netstats)
+
+show_stats() {
+ echo '##############'
+ echo '# Zeek Stats #'
+ echo '##############'
+ echo
+ echo "Average throughput:"
+ echo
+ docker exec -it so-zeek /opt/zeek/bin/zeekctl capstats
+ echo
+ echo "Average packet loss:"
+ echo
+ docker exec -it so-zeek /opt/zeek/bin/zeekctl netstats
+ echo
+}
+
+if docker ps | grep -q zeek; then
+ show_stats
+else
+ echo "Zeek is not running! Try starting it with 'so-zeek-start'." && exit 1;
+fi
diff --git a/salt/curator/init.sls b/salt/curator/init.sls
index d064b9f20..37b4fac87 100644
--- a/salt/curator/init.sls
+++ b/salt/curator/init.sls
@@ -89,7 +89,7 @@ curdel:
so-curatorcloseddeletecron:
cron.present:
- - name: /usr/sbin/so-curator-closed-delete
+ - name: /usr/sbin/so-curator-closed-delete > /opt/so/log/curator/cron-closed-delete.log 2>&1
- user: root
- minute: '*'
- hour: '*'
@@ -99,7 +99,7 @@ so-curatorcloseddeletecron:
so-curatorclosecron:
cron.present:
- - name: /usr/sbin/so-curator-close
+ - name: /usr/sbin/so-curator-close > /opt/so/log/curator/cron-close.log 2>&1
- user: root
- minute: '*'
- hour: '*'
@@ -109,7 +109,7 @@ so-curatorclosecron:
so-curatordeletecron:
cron.present:
- - name: /usr/sbin/so-curator-delete
+ - name: /usr/sbin/so-curator-delete > /opt/so/log/curator/cron-delete.log 2>&1
- user: root
- minute: '*'
- hour: '*'
diff --git a/salt/elasticsearch/files/ingest/beats.common b/salt/elasticsearch/files/ingest/beats.common
new file mode 100644
index 000000000..0e93abb03
--- /dev/null
+++ b/salt/elasticsearch/files/ingest/beats.common
@@ -0,0 +1,35 @@
+{
+ "description" : "beats.common",
+ "processors" : [
+ {"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}},
+ { "set": { "if": "ctx.winlog?.channel != null", "field": "dataset", "value": "wel-{{winlog.channel}}", "override": true } },
+ { "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } },
+ { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.category", "value": "host,process,network", "override": true } },
+ { "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.category", "value": "host,process", "override": true } },
+ { "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } },
+ { "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } },
+ { "pipeline": { "name": "common" } }
+ ]
+}
\ No newline at end of file
diff --git a/salt/elasticsearch/files/ingest/syslog b/salt/elasticsearch/files/ingest/syslog
new file mode 100644
index 000000000..1af0bc1c8
--- /dev/null
+++ b/salt/elasticsearch/files/ingest/syslog
@@ -0,0 +1,17 @@
+{
+ "description" : "syslog",
+ "processors" : [
+ {
+ "dissect": {
+ "field": "message",
+ "pattern" : "%{message}",
+ "on_failure": [ { "drop" : { } } ]
+ },
+ "remove": {
+ "field": [ "type", "agent" ],
+ "ignore_failure": true
+ }
+ },
+ { "pipeline": { "name": "common" } }
+ ]
+}
diff --git a/salt/elasticsearch/files/ingest/zeek.conn b/salt/elasticsearch/files/ingest/zeek.conn
index 49d775291..5e3ae9c79 100644
--- a/salt/elasticsearch/files/ingest/zeek.conn
+++ b/salt/elasticsearch/files/ingest/zeek.conn
@@ -17,7 +17,7 @@
{ "rename": { "field": "message2.orig_ip_bytes", "target_field": "client.ip_bytes", "ignore_missing": true } },
{ "rename": { "field": "message2.resp_pkts", "target_field": "server.packets", "ignore_missing": true } },
{ "rename": { "field": "message2.resp_ip_bytes", "target_field": "server.ip_bytes", "ignore_missing": true } },
- { "rename": { "field": "message2.tunnel_parents", "target_field": "connection.tunnel_parents", "ignore_missing": true } },
+ { "rename": { "field": "message2.tunnel_parents", "target_field": "log.id.tunnel_parents", "ignore_missing": true } },
{ "rename": { "field": "message2.orig_cc", "target_field": "client.country_code","ignore_missing": true } },
{ "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } },
{ "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } },
diff --git a/salt/elasticsearch/files/ingest/zeek.radius b/salt/elasticsearch/files/ingest/zeek.radius
index c74330690..715f41478 100644
--- a/salt/elasticsearch/files/ingest/zeek.radius
+++ b/salt/elasticsearch/files/ingest/zeek.radius
@@ -5,7 +5,7 @@
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.username", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message2.mac", "target_field": "host.mac", "ignore_missing": true } },
- { "rename": { "field": "message2.framed_addr", "target_field": "framed_addr", "ignore_missing": true } },
+ { "rename": { "field": "message2.framed_addr", "target_field": "radius.framed_address", "ignore_missing": true } },
{ "rename": { "field": "message2.remote_ip", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "message2.connect_info", "target_field": "radius.connect_info", "ignore_missing": true } },
{ "rename": { "field": "message2.reply_msg", "target_field": "radius.reply_message", "ignore_missing": true } },
diff --git a/salt/elasticsearch/files/ingest/zeek.tunnels b/salt/elasticsearch/files/ingest/zeek.tunnels
index bcddb61df..4cc7c8d5e 100644
--- a/salt/elasticsearch/files/ingest/zeek.tunnels
+++ b/salt/elasticsearch/files/ingest/zeek.tunnels
@@ -3,7 +3,7 @@
"processors" : [
{ "remove": { "field": ["host"], "ignore_failure": true } },
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
- { "rename": { "field": "message2.uid", "target_field": "uid", "ignore_missing": true } },
+ { "rename": { "field": "message2.uid", "target_field": "log.id.uid", "ignore_missing": true } },
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index 1c4bee013..76c26b51d 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -75,6 +75,32 @@ filebeat.modules:
filebeat.inputs:
#------------------------------ Log prospector --------------------------------
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
+
+ - type: udp
+ enabled: true
+ host: "0.0.0.0:514"
+ fields:
+ module: syslog
+ dataset: syslog
+ pipeline: "syslog"
+ index: "so-syslog-%{+yyyy.MM.dd}"
+ processors:
+ - drop_fields:
+ fields: ["source", "prospector", "input", "offset", "beat"]
+ fields_under_root: true
+
+ - type: tcp
+ enabled: true
+ host: "0.0.0.0:514"
+ fields:
+ module: syslog
+ dataset: syslog
+ pipeline: "syslog"
+ index: "so-syslog-%{+yyyy.MM.dd}"
+ processors:
+ - drop_fields:
+ fields: ["source", "prospector", "input", "offset", "beat"]
+ fields_under_root: true
{%- if BROVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
- type: log
diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls
index 409594b2d..897bb3937 100644
--- a/salt/filebeat/init.sls
+++ b/salt/filebeat/init.sls
@@ -57,12 +57,14 @@ so-filebeat:
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /nsm/zeek:/nsm/zeek:ro
- /nsm/strelka/log:/nsm/strelka/log:ro
- - /opt/so/log/suricata:/suricata:ro
+ - /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
+ - port_bindings:
+ - 0.0.0.0:514:514/udp
- watch:
- file: /opt/so/conf/filebeat/etc/filebeat.yml
diff --git a/salt/firewall/init.sls b/salt/firewall/init.sls
index a91ea20d0..1f96df882 100644
--- a/salt/firewall/init.sls
+++ b/salt/firewall/init.sls
@@ -137,6 +137,18 @@ enable_wazuh_manager_1514_udp_{{ip}}:
- position: 1
- save: True
+# Allow syslog
+enable_syslog_514_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: tcp
+ - source: {{ ip }}
+ - dport: 514
+ - position: 1
+ - save: True
+
# Rules if you are a Master
{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
#This should be more granular
diff --git a/salt/hive/thehive/scripts/hive_init b/salt/hive/thehive/scripts/hive_init
deleted file mode 100755
index b1ef62d68..000000000
--- a/salt/hive/thehive/scripts/hive_init
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
-{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
-{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
-{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
-
-hive_init(){
- sleep 120
- HIVE_IP="{{MASTERIP}}"
- HIVE_USER="{{HIVEUSER}}"
- HIVE_PASSWORD="{{HIVEPASSWORD}}"
- HIVE_KEY="{{HIVEKEY}}"
- SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
-
- echo -n "Waiting for TheHive..."
- COUNT=0
- HIVE_CONNECTED="no"
- while [[ "$COUNT" -le 240 ]]; do
- curl --output /dev/null --silent --head --fail -k "https://$HIVE_IP/thehive"
- if [ $? -eq 0 ]; then
- HIVE_CONNECTED="yes"
- echo "connected!"
- break
- else
- ((COUNT+=1))
- sleep 1
- echo -n "."
- fi
- done
-
- if [ "$HIVE_CONNECTED" == "yes" ]; then
-
- # Migrate DB
- curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate"
-
- # Create intial TheHive user
- curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
-
- # Pre-load custom fields
- #
- # reputation
- curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
-
-
- touch /opt/so/state/thehive.txt
- else
- echo "We experienced an issue connecting to TheHive!"
- fi
-}
-
-if [ -f /opt/so/state/thehive.txt ]; then
- exit 0
-else
- rm -f garbage_file
- while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
- do
- echo "Waiting for Elasticsearch..."
- rm -f garbage_file
- sleep 1
- done
- rm -f garbage_file
- sleep 5
- hive_init
-fi
diff --git a/salt/idstools/init.sls b/salt/idstools/init.sls
index eba5cfd26..d145a4e15 100644
--- a/salt/idstools/init.sls
+++ b/salt/idstools/init.sls
@@ -39,7 +39,7 @@ idstoolsetcsync:
so-ruleupdatecron:
cron.present:
- - name: /usr/sbin/so-rule-update.sh > /opt/so/log/idstools/download.log
+ - name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1
- user: root
- minute: '1'
- hour: '7'
@@ -58,11 +58,6 @@ synclocalnidsrules:
- user: 939
- group: 939
-ruleslink:
- file.symlink:
- - name: /opt/so/saltstack/salt/suricata/rules
- - target: /opt/so/rules/nids
-
so-idstools:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls
index ba0e015f4..1118b6807 100644
--- a/salt/logstash/init.sls
+++ b/salt/logstash/init.sls
@@ -198,7 +198,7 @@ so-logstash:
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{%- if grains['role'] == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro
- - /opt/so/log/suricata:/suricata:ro
+ - /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /opt/so/log/fleet/:/osquery/logs:ro
diff --git a/salt/logstash/pipelines/config/custom/place_custom_config_in_local b/salt/logstash/pipelines/config/custom/place_custom_config_in_local
new file mode 100644
index 000000000..55c386a67
--- /dev/null
+++ b/salt/logstash/pipelines/config/custom/place_custom_config_in_local
@@ -0,0 +1 @@
+# For custom logstash configs, they should be placed in /opt/so/saltstack/local/salt/logstash/pipelines/config/custom/
diff --git a/salt/logstash/pipelines/config/so/0009_input_beats.conf b/salt/logstash/pipelines/config/so/0009_input_beats.conf
new file mode 100644
index 000000000..a5c1d491c
--- /dev/null
+++ b/salt/logstash/pipelines/config/so/0009_input_beats.conf
@@ -0,0 +1,6 @@
+input {
+ beats {
+ port => "5044"
+ tags => [ "beat-ext" ]
+ }
+}
\ No newline at end of file
diff --git a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja
index 35d3cf7dc..7b35af576 100644
--- a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja
@@ -3,24 +3,21 @@
{%- else %}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Updated by: Doug Burks
-# Last Update: 5/15/2017
filter {
- if "syslog" in [tags] and "test_data" not in [tags] {
+ if [module] =~ "syslog" {
mutate {
- ##add_tag => [ "conf_file_9034"]
- }
+ ##add_tag => [ "conf_file_9000"]
+ }
}
}
output {
- if "syslog" in [tags] and "test_data" not in [tags] {
+ if [module] =~ "syslog" {
elasticsearch {
+ pipeline => "%{module}"
hosts => "{{ ES }}"
index => "so-syslog-%{+YYYY.MM.dd}"
- template_name => "logstash"
+ template_name => "so-common"
template => "/so-common-template.json"
template_overwrite => true
}
diff --git a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja
index dcfefa852..e50c04eee 100644
--- a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja
@@ -3,22 +3,15 @@
{%- else %}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
{%- endif %}
-# Author: Wes Lambert
-# Last Update: 09/14/2018
-filter {
- if "beat" in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9500"]
- }
- }
-}
+
output {
- if "beat" in [tags] {
+ if "beat-ext" in [tags] {
elasticsearch {
+ pipeline => "beats.common"
hosts => "{{ ES }}"
index => "so-beats-%{+YYYY.MM.dd}"
- template_name => "so-beats"
- template => "/so-beats-template.json"
+ template_name => "so-common"
+ template => "/so-common-template.json"
template_overwrite => true
}
}
diff --git a/salt/logstash/pipelines/templates/custom/Drop.Your.Custom.Templates.Here.conf b/salt/logstash/pipelines/templates/custom/Drop.Your.Custom.Templates.Here.conf
deleted file mode 100644
index 9ee9e27b5..000000000
--- a/salt/logstash/pipelines/templates/custom/Drop.Your.Custom.Templates.Here.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-# Reference /usr/share/logstash/pipeline.custom/templates/YOURTEMPLATE.json
-#
diff --git a/salt/logstash/pipelines/templates/custom/place_custom_template_in_local b/salt/logstash/pipelines/templates/custom/place_custom_template_in_local
new file mode 100644
index 000000000..af38c6107
--- /dev/null
+++ b/salt/logstash/pipelines/templates/custom/place_custom_template_in_local
@@ -0,0 +1,2 @@
+# Reference /usr/share/logstash/pipeline.custom/templates/YOURTEMPLATE.json
+# For custom logstash templates, they should be placed in /opt/so/saltstack/local/salt/logstash/pipelines/templates/custom/
diff --git a/salt/logstash/pipelines/templates/so/so-beats-template.json b/salt/logstash/pipelines/templates/so/so-beats-template.json
deleted file mode 100644
index 858f1d5e5..000000000
--- a/salt/logstash/pipelines/templates/so/so-beats-template.json
+++ /dev/null
@@ -1,1286 +0,0 @@
-{
- "index_patterns": [
- "so-beats-*"
- ],
- "mappings": {
- "_meta": {
- "version": "6.1.3"
- },
- "date_detection": false,
- "dynamic_templates": [
- {
- "fields": {
- "mapping": {
- "type": "keyword"
- },
- "match_mapping_type": "string",
- "path_match": "fields.*"
- }
- },
- {
- "docker.container.labels": {
- "mapping": {
- "type": "keyword"
- },
- "match_mapping_type": "string",
- "path_match": "docker.container.labels.*"
- }
- },
- {
- "strings_as_keyword": {
- "mapping": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "match_mapping_type": "string"
- }
- }
- ],
- "properties": {
- "@timestamp": {
- "type": "date"
- },
- "event_data": {
- "type":"object",
- "dynamic": true
- },
- "beat_host": {
- "type":"object",
- "dynamic": true
- },
- "activity_id": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "beat": {
- "properties": {
- "hostname": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "timezone": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "version": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "username":{
- "type":"text",
- "fields": {
- "keyword":{
- "type":"keyword"
- }
- }
- },
- "computer_name": {
- "type": "text",
- "fields":{
- "keyword":{
- "type":"keyword"
- }
- }
- },
- "docker": {
- "properties": {
- "container": {
- "properties": {
- "id": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "image": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "labels": {
- "type": "object"
- },
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- },
- "error": {
- "properties": {
- "code": {
- "type": "long"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "type": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "event_id": {
- "type": "long"
- },
- "fields": {
- "type": "object"
- },
- "keywords": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "kubernetes": {
- "properties": {
- "annotations": {
- "type": "object"
- },
- "container": {
- "properties": {
- "image": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "labels": {
- "type": "object"
- },
- "namespace": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "pod": {
- "properties": {
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- },
- "level": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "log_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "message_error": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "meta": {
- "properties": {
- "cloud": {
- "properties": {
- "availability_zone": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "instance_id": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "instance_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "machine_type": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "project_id": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "provider": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "region": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- },
- "opcode": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "process_id": {
- "type": "long"
- },
- "provider_guid": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "record_number": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "related_activity_id": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "source_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "tags": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "task": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "thread_id": {
- "type": "long"
- },
- "type": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "user": {
- "properties": {
- "domain": {
- "type": "keyword"
- },
- "identifier": {
- "type": "keyword"
- },
- "name": {
- "type": "keyword"
- },
- "type": {
- "type": "keyword"
- }
- }
- },
- "user_data": {
- "type": "object",
- "dynamic": "true"
- },
- "version": {
- "type": "keyword"
- },
- "xml": {
- "norms": false,
- "type": "text"
- },
- "apache2": {
- "properties": {
- "access": {
- "properties": {
- "agent": {
- "norms": false,
- "type": "text"
- },
- "body_sent": {
- "properties": {
- "bytes": {
- "type": "long"
- }
- }
- },
- "geoip": {
- "properties": {
- "city_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "continent_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "country_iso_code": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "location": {
- "type": "geo_point"
- },
- "region_name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "http_version": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "method": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "referrer": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "remote_ip": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "response_code": {
- "type": "long"
- },
- "url": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "user_agent": {
- "properties": {
- "device": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "major": {
- "type": "long"
- },
- "minor": {
- "type": "long"
- },
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "os": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "os_major": {
- "type": "long"
- },
- "os_minor": {
- "type": "long"
- },
- "os_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "patch": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "user_name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "error": {
- "properties": {
- "client": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "code": {
- "type": "long"
- },
- "level": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "module": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "pid": {
- "type": "long"
- },
- "tid": {
- "type": "long"
- },
- "type": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- },
- "auditd": {
- "properties": {
- "log": {
- "properties": {
- "a0": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "acct": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "geoip": {
- "properties": {
- "city_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "continent_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "country_iso_code": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "location": {
- "type": "geo_point"
- },
- "region_name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "item": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "items": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "new_auid": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "new_ses": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "old_auid": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "old_ses": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "pid": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "ppid": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "record_type": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "res": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "sequence": {
- "type": "long"
- }
- }
- }
- }
- },
- "fileset": {
- "properties": {
- "module": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "icinga": {
- "properties": {
- "debug": {
- "properties": {
- "facility": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "severity": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "main": {
- "properties": {
- "facility": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "severity": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "startup": {
- "properties": {
- "facility": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "severity": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- },
- "kafka": {
- "properties": {
- "log": {
- "properties": {
- "class": {
- "norms": false,
- "type": "text"
- },
- "component": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "level": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "timestamp": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "trace": {
- "properties": {
- "class": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "full": {
- "norms": false,
- "type": "text"
- },
- "message": {
- "norms": false,
- "type": "text"
- }
- }
- }
- }
- }
- }
- },
- "logstash": {
- "properties": {
- "log": {
- "properties": {
- "level": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "log_event": {
- "type": "object"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "module": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "thread": {
- "norms": false,
- "type": "text"
- }
- }
- },
- "slowlog": {
- "properties": {
- "event": {
- "norms": false,
- "type": "text"
- },
- "level": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "module": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "plugin_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "plugin_params": {
- "norms": false,
- "type": "text"
- },
- "plugin_params_object": {
- "type": "object"
- },
- "plugin_type": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "thread": {
- "norms": false,
- "type": "text"
- },
- "took_in_millis": {
- "type": "long"
- },
- "took_in_nanos": {
- "type": "long"
- }
- }
- }
- }
- },
- "mysql": {
- "properties": {
- "error": {
- "properties": {
- "level": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "thread_id": {
- "type": "long"
- },
- "timestamp": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "slowlog": {
- "properties": {
- "host": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "id": {
- "type": "long"
- },
- "ip": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "lock_time": {
- "properties": {
- "sec": {
- "type": "float"
- }
- }
- },
- "query": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "query_time": {
- "properties": {
- "sec": {
- "type": "float"
- }
- }
- },
- "rows_examined": {
- "type": "long"
- },
- "rows_sent": {
- "type": "long"
- },
- "timestamp": {
- "type": "long"
- },
- "user": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- },
- "nginx": {
- "properties": {
- "access": {
- "properties": {
- "agent": {
- "norms": false,
- "type": "text"
- },
- "body_sent": {
- "properties": {
- "bytes": {
- "type": "long"
- }
- }
- },
- "geoip": {
- "properties": {
- "city_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "continent_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "country_iso_code": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "location": {
- "type": "geo_point"
- },
- "region_name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "http_version": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "method": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "referrer": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "remote_ip": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "response_code": {
- "type": "long"
- },
- "url": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "user_agent": {
- "properties": {
- "device": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "major": {
- "type": "long"
- },
- "minor": {
- "type": "long"
- },
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "os": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "os_major": {
- "type": "long"
- },
- "os_minor": {
- "type": "long"
- },
- "os_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "patch": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "user_name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "error": {
- "properties": {
- "connection_id": {
- "type": "long"
- },
- "level": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "pid": {
- "type": "long"
- },
- "tid": {
- "type": "long"
- }
- }
- }
- }
- },
- "offset": {
- "type": "long"
- },
- "postgresql": {
- "properties": {
- "log": {
- "properties": {
- "database": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "duration": {
- "type": "float"
- },
- "level": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "query": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "thread_id": {
- "type": "long"
- },
- "timestamp": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "timezone": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "user": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- },
- "prospector": {
- "properties": {
- "type": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "read_timestamp": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "redis": {
- "properties": {
- "log": {
- "properties": {
- "level": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "norms": false,
- "type": "text"
- },
- "pid": {
- "type": "long"
- },
- "role": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "slowlog": {
- "properties": {
- "args": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "cmd": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "duration": {
- "properties": {
- "us": {
- "type": "long"
- }
- }
- },
- "id": {
- "type": "long"
- },
- "key": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- },
- "stream": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "system": {
- "properties": {
- "auth": {
- "properties": {
- "groupadd": {
- "properties": {
- "gid": {
- "type": "long"
- },
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "hostname": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "pid": {
- "type": "long"
- },
- "program": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "ssh": {
- "properties": {
- "dropped_ip": {
- "type": "ip"
- },
- "event": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "geoip": {
- "properties": {
- "city_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "continent_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "country_iso_code": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "location": {
- "type": "geo_point"
- },
- "region_name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "ip": {
- "type": "ip"
- },
- "method": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "port": {
- "type": "long"
- },
- "signature": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "sudo": {
- "properties": {
- "command": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "error": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "pwd": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "tty": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "user": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "timestamp": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "user": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "useradd": {
- "properties": {
- "gid": {
- "type": "long"
- },
- "home": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "shell": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "uid": {
- "type": "long"
- }
- }
- }
- }
- },
- "syslog": {
- "properties": {
- "hostname": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "message": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "pid": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "program": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "timestamp": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- },
- "traefik": {
- "properties": {
- "access": {
- "properties": {
- "agent": {
- "norms": false,
- "type": "text"
- },
- "backend_url": {
- "norms": false,
- "type": "text"
- },
- "body_sent": {
- "properties": {
- "bytes": {
- "type": "long"
- }
- }
- },
- "frontend_name": {
- "norms": false,
- "type": "text"
- },
- "geoip": {
- "properties": {
- "city_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "continent_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "country_iso_code": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "location": {
- "type": "geo_point"
- },
- "region_name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "http_version": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "method": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "referrer": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "remote_ip": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "request_count": {
- "type": "long"
- },
- "response_code": {
- "type": "long"
- },
- "url": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "user_agent": {
- "properties": {
- "device": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "major": {
- "type": "long"
- },
- "minor": {
- "type": "long"
- },
- "name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "os": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "os_major": {
- "type": "long"
- },
- "os_minor": {
- "type": "long"
- },
- "os_name": {
- "ignore_above": 1024,
- "type": "keyword"
- },
- "patch": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- },
- "user_name": {
- "ignore_above": 1024,
- "type": "keyword"
- }
- }
- }
- }
- }
- }
- },
- "order": 1,
- "settings": {
- "index": {
- "mapping": {
- "total_fields": {
- "limit": 10000
- }
- },
- "number_of_replicas": 0,
- "number_of_shards": 1,
- "refresh_interval": "30s"
- }
- }
-}
diff --git a/salt/master/files/add_minion.sh b/salt/master/files/add_minion.sh
index 220317193..40d1c6adf 100755
--- a/salt/master/files/add_minion.sh
+++ b/salt/master/files/add_minion.sh
@@ -1,10 +1,10 @@
#!/usr/bin/env bash
# This script adds pillar and schedule files securely
-
+local_salt_dir=/opt/so/saltstack/local
MINION=$1
echo "Adding $1"
- cp /tmp/$MINION/pillar/$MINION.sls /opt/so/saltstack/pillar/minions/
- cp /tmp/$MINION/schedules/* /opt/so/saltstack/salt/patch/os/schedules/
+ cp /tmp/$MINION/pillar/$MINION.sls $local_salt_dir/pillar/minions/
+ cp --parents /tmp/$MINION/schedules/* $local_salt_dir/salt/patch/os/schedules/
rm -rf /tmp/$MINION
\ No newline at end of file
diff --git a/salt/master/init.sls b/salt/master/init.sls
index 4dab12a68..3c6b81e5e 100644
--- a/salt/master/init.sls
+++ b/salt/master/init.sls
@@ -61,6 +61,7 @@ so-aptcacherng:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
- hostname: so-acng
+ - restart_policy: always
- port_bindings:
- 0.0.0.0:3142:3142
- binds:
diff --git a/salt/nginx/etc/nginx.conf.so-eval b/salt/nginx/etc/nginx.conf.so-eval
index 336d27343..0d793f70a 100644
--- a/salt/nginx/etc/nginx.conf.so-eval
+++ b/salt/nginx/etc/nginx.conf.so-eval
@@ -134,7 +134,7 @@ http {
proxy_set_header Connection "Upgrade";
}
- location ~ ^/auth/.*?(whoami|login|logout) {
+ location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ masterip }}:4433;
proxy_read_timeout 90;
diff --git a/salt/nginx/etc/nginx.conf.so-master b/salt/nginx/etc/nginx.conf.so-master
index 33edb9c3e..2178b6017 100644
--- a/salt/nginx/etc/nginx.conf.so-master
+++ b/salt/nginx/etc/nginx.conf.so-master
@@ -134,7 +134,7 @@ http {
proxy_set_header Connection "Upgrade";
}
- location ~ ^/auth/.*?(whoami|login|logout) {
+ location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ masterip }}:4433;
proxy_read_timeout 90;
diff --git a/salt/nginx/etc/nginx.conf.so-mastersearch b/salt/nginx/etc/nginx.conf.so-mastersearch
index 33edb9c3e..2178b6017 100644
--- a/salt/nginx/etc/nginx.conf.so-mastersearch
+++ b/salt/nginx/etc/nginx.conf.so-mastersearch
@@ -134,7 +134,7 @@ http {
proxy_set_header Connection "Upgrade";
}
- location ~ ^/auth/.*?(whoami|login|logout) {
+ location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ masterip }}:4433;
proxy_read_timeout 90;
diff --git a/salt/nginx/etc/nginx.conf.so-standalone b/salt/nginx/etc/nginx.conf.so-standalone
index 33edb9c3e..2178b6017 100644
--- a/salt/nginx/etc/nginx.conf.so-standalone
+++ b/salt/nginx/etc/nginx.conf.so-standalone
@@ -134,7 +134,7 @@ http {
proxy_set_header Connection "Upgrade";
}
- location ~ ^/auth/.*?(whoami|login|logout) {
+ location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ masterip }}:4433;
proxy_read_timeout 90;
diff --git a/salt/nodered/files/nodered_load_flows b/salt/nodered/files/nodered_load_flows
index c48fcd692..5617b1022 100644
--- a/salt/nodered/files/nodered_load_flows
+++ b/salt/nodered/files/nodered_load_flows
@@ -1,5 +1,6 @@
{%- set ip = salt['pillar.get']('static:masterip', '') -%}
#!/bin/bash
+default_salt_dir=/opt/so/saltstack/default
echo "Waiting for connection"
until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do
@@ -7,5 +8,5 @@ until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do
sleep 1
done
echo "Loading flows..."
-curl -XPOST -v -H "Content-Type: application/json" -d @/opt/so/saltstack/salt/nodered/so_flows.json {{ ip }}:1880/flows
+curl -XPOST -v -H "Content-Type: application/json" -d @$default_salt_dir/salt/nodered/so_flows.json {{ ip }}:1880/flows
echo "Done loading..."
diff --git a/salt/nodered/init.sls b/salt/nodered/init.sls
index cb1068d30..c501445a2 100644
--- a/salt/nodered/init.sls
+++ b/salt/nodered/init.sls
@@ -36,7 +36,7 @@ nodered:
noderedflows:
file.recurse:
- - name: /opt/so/saltstack/salt/nodered/
+ - name: /opt/so/saltstack/default/salt/nodered/
- source: salt://nodered/files
- user: 947
- group: 939
diff --git a/salt/playbook/files/playbook_db_init.sh b/salt/playbook/files/playbook_db_init.sh
index c77b93df1..713575f97 100644
--- a/salt/playbook/files/playbook_db_init.sh
+++ b/salt/playbook/files/playbook_db_init.sh
@@ -1,5 +1,7 @@
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
#!/bin/sh
-docker cp /opt/so/saltstack/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql
+default_salt_dir=/opt/so/saltstack/default
+
+docker cp $default_salt_dir/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql
docker exec so-mysql /bin/bash -c "/usr/bin/mysql -b -uroot -p{{MYSQLPASS}} < /tmp/playbook_db_init.sql"
\ No newline at end of file
diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls
index a5242f561..fec93b71e 100644
--- a/salt/playbook/init.sls
+++ b/salt/playbook/init.sls
@@ -86,15 +86,22 @@ so-playbook:
{% endif %}
+playbooklogdir:
+ file.directory:
+ - name: /opt/so/log/playbook
+ - user: 939
+ - group: 939
+ - makedirs: True
+
so-playbooksynccron:
cron.present:
- - name: /usr/sbin/so-playbook-sync
+ - name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1
- user: root
- minute: '*/5'
so-playbookruleupdatecron:
cron.present:
- - name: /usr/sbin/so-playbook-ruleupdate
+ - name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1
- user: root
- minute: '1'
- hour: '6'
\ No newline at end of file
diff --git a/salt/reactor/fleet.sls b/salt/reactor/fleet.sls
index c7bade3ab..9c8023a71 100644
--- a/salt/reactor/fleet.sls
+++ b/salt/reactor/fleet.sls
@@ -9,9 +9,9 @@ import subprocess
def run():
MINIONID = data['id']
ACTION = data['data']['action']
-
- STATICFILE = '/opt/so/saltstack/pillar/static.sls'
- SECRETSFILE = '/opt/so/saltstack/pillar/secrets.sls'
+ local_salt_dir = /opt/so/saltstack/local
+ STATICFILE = local_salt_dir + '/pillar/static.sls'
+ SECRETSFILE = local_salt_dir + '/pillar/secrets.sls'
if MINIONID.split('_')[-1] in ['master','eval','fleet','mastersearch','standalone']:
if ACTION == 'enablefleet':
@@ -58,7 +58,7 @@ def run():
PACKAGEVERSION += 1
# Run Docker container that will build the packages
- gen_packages = subprocess.run(["docker", "run","--rm", "--mount", "type=bind,source=/opt/so/saltstack/salt/fleet/packages,target=/output", \
+ gen_packages = subprocess.run(["docker", "run","--rm", "--mount", "type=bind,ssource=" + local_salt_dir + "/salt/fleet/packages,target=/output", \
"--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MASTER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
f"{ESECRET}", f"{PACKAGEHOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii')
diff --git a/salt/registry/init.sls b/salt/registry/init.sls
index ed56d25ae..9ee44d1de 100644
--- a/salt/registry/init.sls
+++ b/salt/registry/init.sls
@@ -42,6 +42,7 @@ so-dockerregistry:
docker_container.running:
- image: registry:2
- hostname: so-registry
+ - restart_policy: always
- port_bindings:
- 0.0.0.0:5000:5000
- binds:
diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json
deleted file mode 100644
index d53fe0a66..000000000
--- a/salt/sensoroni/files/sensoroni.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
-{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
-{
- "logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
- "server": {
- "bindAddress": "0.0.0.0:9822",
- "maxPacketCount": 5000,
- "htmlDir": "html",
- "modules": {
- "filedatastore": {
- "jobDir": "jobs"
- },
- "securityonion": {
- "elasticsearchHost": "http://{{ MASTERIP }}:9200",
- "elasticsearchUsername": "",
- "elasticsearchPassword": "",
- "elasticsearchVerifyCert": false
- },
- "statickeyauth": {
- "anonymousCidr": "172.17.0.0/24",
- "apiKey": "{{ SENSORONIKEY }}"
- }
- }
- }
-}
diff --git a/salt/sensoroni/init.sls b/salt/sensoroni/init.sls
deleted file mode 100644
index 441394df6..000000000
--- a/salt/sensoroni/init.sls
+++ /dev/null
@@ -1,45 +0,0 @@
-{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-
-sensoronidir:
- file.directory:
- - name: /opt/so/conf/sensoroni
- - user: 939
- - group: 939
- - makedirs: True
-
-sensoronidatadir:
- file.directory:
- - name: /nsm/sensoroni/jobs
- - user: 939
- - group: 939
- - makedirs: True
-
-sensoronilogdir:
- file.directory:
- - name: /opt/so/log/sensoroni
- - user: 939
- - group: 939
- - makedirs: True
-
-sensoronisync:
- file.recurse:
- - name: /opt/so/conf/sensoroni
- - source: salt://sensoroni/files
- - user: 939
- - group: 939
- - template: jinja
-
-so-sensoroni:
- docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-sensoroni:{{ VERSION }}
- - hostname: sensoroni
- - name: so-sensoroni
- - binds:
- - /nsm/sensoroni/jobs:/opt/sensoroni/jobs:rw
- - /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
- - /opt/so/log/sensoroni/:/opt/sensoroni/logs/:rw
- - port_bindings:
- - 0.0.0.0:9822:9822
- - watch:
- - file: /opt/so/conf/sensoroni
diff --git a/salt/soc/files/kratos/kratos.yaml b/salt/soc/files/kratos/kratos.yaml
index e5a970557..7939ec35b 100644
--- a/salt/soc/files/kratos/kratos.yaml
+++ b/salt/soc/files/kratos/kratos.yaml
@@ -42,7 +42,7 @@ urls:
login_ui: https://{{ WEBACCESS }}/login/
registration_ui: https://{{ WEBACCESS }}/login/
error_ui: https://{{ WEBACCESS }}/login/
- settings_ui: https://{{ WEBACCESS }}/
+ settings_ui: https://{{ WEBACCESS }}/?r=/settings
verify_ui: https://{{ WEBACCESS }}/
mfa_ui: https://{{ WEBACCESS }}/
diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json
index 76770e2bd..e668aa286 100644
--- a/salt/soc/files/soc/soc.json
+++ b/salt/soc/files/soc/soc.json
@@ -93,55 +93,55 @@
{ "name": "Wazuh/OSSEC Users", "description": "Show all Wazuh alerts grouped by username", "query": "event.module:ossec AND event.dataset:alert | groupby user.name"},
{ "name": "Sysmon Events", "description": "Show all Sysmon logs grouped by event_id", "query": "event_type:sysmon | groupby event_id"},
{ "name": "Sysmon Usernames", "description": "Show all Sysmon logs grouped by username", "query": "event_type:sysmon | groupby username"},
- { "name": "Zeek Notice", "description": "Show notices from Zeek", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note notice.message"},
- { "name": "Connections", "description": "Connections grouped by IP and Port", "query": "event.module:zeek AND event.dataset:conn | groupby source.ip destination.ip network.protocol destination.port"},
- { "name": "Connections", "description": "Connections grouped by Service", "query": "event.module:zeek AND event.dataset:conn | groupby network.protocol destination.port"},
- { "name": "Connections", "description": "Connections grouped by destination country", "query": "event.module:zeek AND event.dataset:conn | groupby destination.geo.country_name"},
- { "name": "Connections", "description": "Connections grouped by source country", "query": "event.module:zeek AND event.dataset:conn | groupby source.geo.country_name"},
- { "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.module:zeek AND event.dataset:dce_rpc | groupby dce_rpc.operation"},
- { "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain dhcp.requested_address"},
- { "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.module:zeek AND event.dataset:dhcp | groupby dhcp.message_types"},
- { "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.module:zeek AND event.dataset:dnp3 | groupby dnp3.fc_reply"},
- { "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.name destination.port"},
- { "name": "DNS", "description": "DNS queries grouped by type", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.type_name destination.port"},
- { "name": "DNS", "description": "DNS highest registered domain", "query": "event.module:zeek AND event.dataset:dns | groupby dns.highest_registered_domain.keyword"},
- { "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.module:zeek AND event.dataset:dns | groupby dns.parent_domain.keyword"},
- { "name": "Files", "description": "Files grouped by mimetype", "query": "event.module:zeek AND event.dataset:files | groupby file.mime_type source.ip"},
- { "name": "FTP", "description": "FTP grouped by argument", "query": "event.module:zeek AND event.dataset:ftp | groupby ftp.argument"},
- { "name": "FTP", "description": "FTP grouped by command", "query": "event.module:zeek AND event.dataset:ftp | groupby ftp.command"},
- { "name": "FTP", "description": "FTP grouped by username", "query": "event.module:zeek AND event.dataset:ftp | groupby ftp.user"},
- { "name": "HTTP", "description": "HTTP grouped by destination port", "query": "event.module:zeek AND event.dataset:http | groupby destination.port"},
- { "name": "HTTP", "description": "HTTP grouped by method", "query": "event.module:zeek AND event.dataset:http | groupby http.method"},
- { "name": "HTTP", "description": "HTTP grouped by status code", "query": "event.module:zeek AND event.dataset:http | groupby http.status_code"},
- { "name": "HTTP", "description": "HTTP grouped by status message", "query": "event.module:zeek AND event.dataset:http | groupby http.status_message"},
- { "name": "HTTP", "description": "HTTP grouped by user agent", "query": "event.module:zeek AND event.dataset:http | groupby http.useragent"},
- { "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.module:zeek AND event.dataset:http | groupby http.virtual_host"},
- { "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.module:zeek AND event.dataset:http AND file.resp_mime_types:dosexec | groupby http.virtual_host"},
- { "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.module:zeek AND event.dataset:intel | groupby intel.indicator"},
- { "name": "IRC", "description": "IRC grouped by command", "query": "event.module:zeek AND event.dataset:irc | groupby irc.command.type"},
- { "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.module:zeek AND event.dataset:kerberos | groupby kerberos.service"},
- { "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.module:zeek AND event.dataset:modbus | groupby modbus.function"},
- { "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.module:zeek AND event.dataset:mysql | groupby mysql.command"},
- { "name": "NOTICE", "description": "Zeek notice logs grouped by note", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note"},
- { "name": "NOTICE", "description": "Zeek notice logs grouped by message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.message"},
- { "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.module:zeek AND event.dataset:ntlm | groupby ntlm.server.dns.name"},
- { "name": "PE", "description": "PE files list", "query": "event.module:zeek AND event.dataset:pe | groupby file.machine file.os file.subsystem"},
- { "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.module:zeek AND event.dataset:radius | groupby user.name.keyword"},
- { "name": "RDP", "description": "RDP grouped by client name", "query": "event.module:zeek AND event.dataset:rdp | groupby client.name"},
- { "name": "RFB", "description": "RFB grouped by desktop name", "query": "event.module:zeek AND event.dataset:rfb | groupby rfb.desktop.name"},
- { "name": "Signatures", "description": "Zeek signatures grouped by signature id", "query": "event.module:zeek AND event.dataset:signatures | groupby signature_id"},
- { "name": "SIP", "description": "SIP grouped by user agent", "query": "event.module:zeek AND event.dataset:sip | groupby client.user_agent"},
- { "name": "SMB_Files", "description": "SMB files grouped by action", "query": "event.module:zeek AND event.dataset:smb_files | groupby file.action"},
- { "name": "SMB_Mapping", "description": "SMB mapping grouped by path", "query": "event.module:zeek AND event.dataset:smb_mapping | groupby smb.path"},
- { "name": "SMTP", "description": "SMTP grouped by subject", "query": "event.module:zeek AND event.dataset:smtp | groupby smtp.subject"},
- { "name": "SNMP", "description": "SNMP grouped by version and string", "query": "event.module:zeek AND event.dataset:snmp | groupby snmp.community snmp.version"},
- { "name": "Software", "description": "List of software seen on the network", "query": "event.module:zeek AND event.dataset:software | groupby software.type software.name"},
- { "name": "SSH", "description": "SSH grouped by version", "query": "event.module:zeek AND event.dataset:ssh | groupby ssh.version"},
- { "name": "SSL", "description": "SSL grouped by version and server name", "query": "event.module:zeek AND event.dataset:ssl | groupby ssl.version ssl.server_name"},
- { "name": "SYSLOG", "description": "SYSLOG grouped by severity and facility ", "query": "event.module:zeek AND event.dataset:syslog | groupby syslog.severity syslog.facility"},
- { "name": "Tunnels", "description": "Tunnels grouped by action", "query": "event.module:zeek AND event.dataset:tunnels | groupby event.action"},
- { "name": "Weird", "description": "Zeek weird log grouped by name", "query": "event.module:zeek AND event.dataset:weird | groupby weird.name"},
- { "name": "x509", "description": "x.509 grouped by key length", "query": "event.module:zeek AND event.dataset:x509 | groupby x509.certificate.key.length"},
+ { "name": "Zeek Notice", "description": "Show notices from Zeek", "query": "event.dataset:notice | groupby notice.note notice.message"},
+ { "name": "Connections", "description": "Connections grouped by IP and Port", "query": "event.dataset:conn | groupby source.ip destination.ip network.protocol destination.port"},
+ { "name": "Connections", "description": "Connections grouped by Service", "query": "event.dataset:conn | groupby network.protocol destination.port"},
+ { "name": "Connections", "description": "Connections grouped by destination country", "query": "event.dataset:conn | groupby destination.geo.country_name"},
+ { "name": "Connections", "description": "Connections grouped by source country", "query": "event.dataset:conn | groupby source.geo.country_name"},
+ { "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.dataset:dce_rpc | groupby dce_rpc.operation"},
+ { "name": "DHCP", "description": "DHCP leases", "query": "event.dataset:dhcp | groupby host.hostname host.domain"},
+ { "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.dataset:dhcp | groupby dhcp.message_types"},
+ { "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.dataset:dnp3 | groupby dnp3.fc_reply"},
+ { "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.dataset:dns | groupby dns.query.name destination.port"},
+ { "name": "DNS", "description": "DNS queries grouped by type", "query": "event.dataset:dns | groupby dns.query.type_name destination.port"},
+ { "name": "DNS", "description": "DNS highest registered domain", "query": "event.dataset:dns | groupby dns.highest_registered_domain.keyword"},
+ { "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.dataset:dns | groupby dns.parent_domain.keyword"},
+ { "name": "Files", "description": "Files grouped by mimetype", "query": "event.dataset:files | groupby file.mime_type source.ip"},
+ { "name": "FTP", "description": "FTP grouped by argument", "query": "event.dataset:ftp | groupby ftp.argument"},
+ { "name": "FTP", "description": "FTP grouped by command", "query": "event.dataset:ftp | groupby ftp.command"},
+ { "name": "FTP", "description": "FTP grouped by username", "query": "event.dataset:ftp | groupby ftp.user"},
+ { "name": "HTTP", "description": "HTTP grouped by destination port", "query": "event.dataset:http | groupby destination.port"},
+ { "name": "HTTP", "description": "HTTP grouped by method", "query": "event.dataset:http | groupby http.method"},
+ { "name": "HTTP", "description": "HTTP grouped by status code and message", "query": "event.dataset:http | groupby http.status_code http.status_message"},
+ { "name": "HTTP", "description": "HTTP grouped by user agent", "query": "event.dataset:http | groupby http.useragent"},
+ { "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.dataset:http | groupby http.virtual_host"},
+ { "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.dataset:http AND file.resp_mime_types:dosexec | groupby http.virtual_host"},
+ { "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.dataset:intel | groupby intel.indicator"},
+ { "name": "IRC", "description": "IRC grouped by command", "query": "event.dataset:irc | groupby irc.command.type"},
+ { "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.dataset:kerberos | groupby kerberos.service"},
+ { "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.dataset:modbus | groupby modbus.function"},
+ { "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.dataset:mysql | groupby mysql.command"},
+ { "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.dataset:notice | groupby notice.note notice.message"},
+ { "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.dataset:ntlm | groupby ntlm.server.dns.name"},
+ { "name": "PE", "description": "PE files list", "query": "event.dataset:pe | groupby file.machine file.os file.subsystem"},
+ { "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.dataset:radius | groupby user.name.keyword"},
+ { "name": "RDP", "description": "RDP grouped by client name", "query": "event.dataset:rdp | groupby client.name"},
+ { "name": "RFB", "description": "RFB grouped by desktop name", "query": "event.dataset:rfb | groupby rfb.desktop.name"},
+ { "name": "Signatures", "description": "Zeek signatures grouped by signature id", "query": "event.dataset:signatures | groupby signature_id"},
+ { "name": "SIP", "description": "SIP grouped by user agent", "query": "event.dataset:sip | groupby client.user_agent"},
+ { "name": "SMB_Files", "description": "SMB files grouped by action", "query": "event.dataset:smb_files | groupby file.action"},
+ { "name": "SMB_Mapping", "description": "SMB mapping grouped by path", "query": "event.dataset:smb_mapping | groupby smb.path"},
+ { "name": "SMTP", "description": "SMTP grouped by subject", "query": "event.dataset:smtp | groupby smtp.subject"},
+ { "name": "SNMP", "description": "SNMP grouped by version and string", "query": "event.dataset:snmp | groupby snmp.community snmp.version"},
+ { "name": "Software", "description": "List of software seen on the network", "query": "event.dataset:software | groupby software.type software.name"},
+ { "name": "SSH", "description": "SSH grouped by version", "query": "event.dataset:ssh | groupby ssh.version"},
+ { "name": "SSL", "description": "SSL grouped by version and server name", "query": "event.dataset:ssl | groupby ssl.version ssl.server_name"},
+ { "name": "SYSLOG", "description": "SYSLOG grouped by severity and facility ", "query": "event.dataset:syslog | groupby syslog.severity syslog.facility"},
+ { "name": "Tunnels", "description": "Tunnels grouped by action", "query": "event.dataset:tunnels | groupby event.action"},
+ { "name": "Weird", "description": "Zeek weird log grouped by name", "query": "event.dataset:weird | groupby weird.name"},
+ { "name": "x509", "description": "x.509 grouped by key length", "query": "event.dataset:x509 | groupby x509.certificate.key.length"},
+ { "name": "x509", "description": "x.509 grouped by issuer", "query": "event.dataset:x509 | groupby x509.certificate.issuer"},
+ { "name": "x509", "description": "x.509 grouped by subject", "query": "event.dataset:x509 | groupby x509.certificate.subject"},
{ "name": "Firewall", "description": "Firewall events grouped by action", "query": "event_type:firewall | groupby action"}
]
}
diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls
index 330e727f0..ff30c3c1a 100644
--- a/salt/soctopus/init.sls
+++ b/salt/soctopus/init.sls
@@ -1,5 +1,7 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
+{%- set MASTER_URL = salt['pillar.get']('master:url_base', '') %}
+{%- set MASTER_IP = salt['pillar.get']('static:masterip', '') %}
soctopusdir:
file.directory:
@@ -69,3 +71,5 @@ so-soctopus:
- /opt/so/conf/navigator/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
- port_bindings:
- 0.0.0.0:7000:7000
+ - extra_hosts:
+ - {{MASTER_URL}}:{{MASTER_IP}}
diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls
index 897ab84d0..17ac6adf0 100644
--- a/salt/ssl/init.sls
+++ b/salt/ssl/init.sls
@@ -86,17 +86,17 @@ chownilogstashfilebeatp8:
# Create Symlinks to the keys so I can distribute it to all the things
filebeatdir:
file.directory:
- - name: /opt/so/saltstack/salt/filebeat/files
- - mkdirs: True
+ - name: /opt/so/saltstack/local/salt/filebeat/files
+ - makedirs: True
fbkeylink:
file.symlink:
- - name: /opt/so/saltstack/salt/filebeat/files/filebeat.p8
+ - name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.p8
- target: /etc/pki/filebeat.p8
fbcrtlink:
file.symlink:
- - name: /opt/so/saltstack/salt/filebeat/files/filebeat.crt
+ - name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt
- target: /etc/pki/filebeat.crt
# Create a cert for the docker registry
diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml
index 5a0121b63..c87c75447 100644
--- a/salt/suricata/files/suricata.yaml
+++ b/salt/suricata/files/suricata.yaml
@@ -1,28 +1,28 @@
%YAML 1.1
---
-{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
-{%- if grains['role'] == 'so-eval' %}
-{%- set MTU = 1500 %}
-{%- elif grains['role'] == 'so-helix' %}
-{%- set MTU = 9000 %}
-{%- else %}
-{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
-{%- endif %}
-{%- if salt['pillar.get']('sensor:homenet') %}
- {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
-{%- else %}
- {%- set homenet = salt['pillar.get']('static:hnmaster', '') %}
-{%- endif %}
+ {%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
+ {%- if grains['role'] == 'so-eval' %}
+ {%- set MTU = 1500 %}
+ {%- elif grains['role'] == 'so-helix' %}
+ {%- set MTU = 9000 %}
+ {%- else %}
+ {%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
+ {%- endif %}
+ {%- if salt['pillar.get']('sensor:homenet') %}
+ {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
+ {%- else %}
+ {%- set homenet = salt['pillar.get']('static:hnmaster', '') %}
+ {%- endif %}
# Suricata configuration file. In addition to the comments describing all
# options in this file, full documentation can be found at:
-# https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml
+# https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html
##
## Step 1: inform Suricata about your network
##
vars:
- # more specifc is better for alert accuracy and performance
+ # more specific is better for alert accuracy and performance
address-groups:
HOME_NET: "[{{ homenet }}]"
#HOME_NET: "[192.168.0.0/16]"
@@ -39,6 +39,7 @@ vars:
DNS_SERVERS: "$HOME_NET"
TELNET_SERVERS: "$HOME_NET"
AIM_SERVERS: "$EXTERNAL_NET"
+ DC_SERVERS: "$HOME_NET"
DNP3_SERVER: "$HOME_NET"
DNP3_CLIENT: "$HOME_NET"
MODBUS_CLIENT: "$HOME_NET"
@@ -55,23 +56,11 @@ vars:
MODBUS_PORTS: 502
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
FTP_PORTS: 21
-
+ VXLAN_PORTS: 4789
+ TEREDO_PORTS: 3544
##
-## Step 2: select the rules to enable or disable
-##
-
-default-rule-path: /etc/suricata/rules
-rule-files:
- - all.rules
-
-classification-file: /etc/suricata/classification.config
-reference-config-file: /etc/suricata/reference.config
-# threshold-file: /usr/local/etc/suricata/threshold.config
-
-
-##
-## Step 3: select outputs to enable
+## Step 2: select outputs to enable
##
# The default logging directory. Any log or output file will be
@@ -85,6 +74,13 @@ stats:
# The interval field (in seconds) controls at what interval
# the loggers are invoked.
interval: 30
+ # Add decode events as stats.
+ #decoder-events: true
+ # Decoder event prefix in stats. Has been 'decoder' before, but that leads
+ # to missing events in the eve.stats records. See issue #2225.
+ #decoder-events-prefix: "decoder.event"
+ # Add stream events as stats.
+ #stream-events: false
# Configure the type of alert (and other) logging you would like.
outputs:
@@ -99,10 +95,9 @@ outputs:
- eve-log:
enabled: yes
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
- filename: eve.json
+ filename: /nsm/eve.json
rotate-interval: day
- community-id: true
- community-id-seed: 0
+
#prefix: "@cee: " # prefix to prepend to each log entry
# the following are valid when type: syslog above
#identity: "suricata"
@@ -124,63 +119,141 @@ outputs:
# pipelining:
# enabled: yes ## set enable to yes to enable query pipelining
# batch-size: 10 ## number of entry to keep in buffer
+
+ # Include top level metadata. Default yes.
+ #metadata: no
+
+ # include the name of the input pcap file in pcap file processing mode
+ pcap-file: false
+
+ # Community Flow ID
+ # Adds a 'community_id' field to EVE records. These are meant to give
+ # a records a predictable flow id that can be used to match records to
+ # output of other tools such as Bro.
+ #
+ # Takes a 'seed' that needs to be same across sensors and tools
+ # to make the id less predictable.
+
+ # enable/disable the community id feature.
+ community-id: true
+ # Seed value for the ID output. Valid values are 0-65535.
+ community-id-seed: 0
+
+ # HTTP X-Forwarded-For support by adding an extra field or overwriting
+ # the source or destination IP address (depending on flow direction)
+ # with the one reported in the X-Forwarded-For HTTP header. This is
+ # helpful when reviewing alerts for traffic that is being reverse
+ # or forward proxied.
+ xff:
+ enabled: no
+ # Two operation modes are available, "extra-data" and "overwrite".
+ mode: extra-data
+ # Two proxy deployments are supported, "reverse" and "forward". In
+ # a "reverse" deployment the IP address used is the last one, in a
+ # "forward" deployment the first IP address is used.
+ deployment: reverse
+ # Header name where the actual IP address will be reported, if more
+ # than one IP address is present, the last IP address will be the
+ # one taken into consideration.
+ header: X-Forwarded-For
+
types:
- alert:
- # payload: yes # enable dumping payload in Base64
- # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
- # payload-printable: yes # enable dumping payload in printable (lossy) format
- # packet: yes # enable dumping of packet (without stream segments)
- # http-body: yes # enable dumping of http body in Base64
- # http-body-printable: yes # enable dumping of http body in printable format
- metadata:
- app-layer: false
- flow: false
- rule:
- metadata: true
- raw: true
+ payload: no # enable dumping payload in Base64
+ payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
+ payload-printable: yes # enable dumping payload in printable (lossy) format
+ packet: yes # enable dumping of packet (without stream segments)
+ metadata:
+ app-layer: false
+ flow: false
+ rule:
+ metadata: true
+ raw: true
+
+ # http-body: yes # Requires metadata; enable dumping of http body in Base64
+ # http-body-printable: yes # Requires metadata; enable dumping of http body in printable format
# Enable the logging of tagged packets for rules using the
# "tag" keyword.
tagged-packets: no
-
- # HTTP X-Forwarded-For support by adding an extra field or overwriting
- # the source or destination IP address (depending on flow direction)
- # with the one reported in the X-Forwarded-For HTTP header. This is
- # helpful when reviewing alerts for traffic that is being reverse
- # or forward proxied.
- xff:
- enabled: no
- # Two operation modes are available, "extra-data" and "overwrite".
- mode: extra-data
- # Two proxy deployments are supported, "reverse" and "forward". In
- # a "reverse" deployment the IP address used is the last one, in a
- # "forward" deployment the first IP address is used.
- deployment: reverse
- # Header name where the actual IP address will be reported, if more
- # than one IP address is present, the last IP address will be the
- # one taken into consideration.
- header: X-Forwarded-For
+ #- anomaly:
+ # Anomaly log records describe unexpected conditions such
+ # as truncated packets, packets with invalid IP/UDP/TCP
+ # length values, and other events that render the packet
+ # invalid for further processing or describe unexpected
+ # behavior on an established stream. Networks which
+ # experience high occurrences of anomalies may experience
+ # packet processing degradation.
+ #
+ # Anomalies are reported for the following:
+ # 1. Decode: Values and conditions that are detected while
+ # decoding individual packets. This includes invalid or
+ # unexpected values for low-level protocol lengths as well
+ # as stream related events (TCP 3-way handshake issues,
+ # unexpected sequence number, etc).
+ # 2. Stream: This includes stream related events (TCP
+ # 3-way handshake issues, unexpected sequence number,
+ # etc).
+ # 3. Application layer: These denote application layer
+ # specific conditions that are unexpected, invalid or are
+ # unexpected given the application monitoring state.
+ #
+ # By default, anomaly logging is disabled. When anomaly
+ # logging is enabled, applayer anomaly reporting is
+ # enabled.
+ # enabled: no
+ #
+ # Choose one or more types of anomaly logging and whether to enable
+ # logging of the packet header for packet anomalies.
+ # types:
+ # decode: no
+ # stream: no
+ # applayer: yes
+ #packethdr: no
#- http:
- # extended: no # enable this for extended logging information
+ # extended: yes # enable this for extended logging information
# custom allows additional http fields to be included in eve-log
# the example below adds three additional fields when uncommented
#custom: [Accept-Encoding, Accept-Language, Authorization]
+ # set this value to one and only one among {both, request, response}
+ # to dump all http headers for every http request and/or response
+ # dump-all-headers: none
#- dns:
- # control logging of queries and answers
- # default yes, no to disable
- # query: no # enable logging of DNS queries
- # answer: no # enable logging of DNS answers
- # control which RR types are logged
- # all enabled if custom not specified
- #custom: [a, aaaa, cname, mx, ns, ptr, txt]
+ # This configuration uses the new DNS logging format,
+ # the old configuration is still available:
+ # https://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html#dns-v1-format
+
+ # As of Suricata 5.0, version 2 of the eve dns output
+ # format is the default.
+ #version: 2
+
+ # Enable/disable this logger. Default: enabled.
+ #enabled: yes
+
+ # Control logging of requests and responses:
+ # - requests: enable logging of DNS queries
+ # - responses: enable logging of DNS answers
+ # By default both requests and responses are logged.
+ #requests: no
+ #responses: no
+
+ # Format of answer logging:
+ # - detailed: array item per answer
+ # - grouped: answers aggregated by type
+ # Default: all
+ #formats: [detailed, grouped]
+
+ # Types to log, based on the query type.
+ # Default: all.
+ #types: [a, aaaa, cname, mx, ns, ptr, txt]
#- tls:
- # extended: no # enable this for extended logging information
+ # extended: yes # enable this for extended logging information
# output TLS transaction where the session is resumed using a
# session id
#session-resumption: no
# custom allows to control which tls fields that are included
# in eve-log
- #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain]
+ #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s]
#- files:
# force-magic: no # force logging magic on all logged files
# force logging of checksums, available hash functions are md5,
@@ -204,60 +277,42 @@ outputs:
#md5: [body, subject]
#- dnp3
+ #- ftp
+ #- rdp
#- nfs
- #- ssh:
+ #- smb
+ #- tftp
+ #- ikev2
+ #- krb5
+ #- snmp
+ #- sip
+ #- dhcp:
+ # enabled: yes
+ # When extended mode is on, all DHCP messages are logged
+ # with full detail. When extended mode is off (the
+ # default), just enough information to map a MAC address
+ # to an IP address is logged.
+ # extended: no
+ #- ssh
#- stats:
# totals: yes # stats for all threads merged together
# threads: no # per thread stats
# deltas: no # include delta values
# bi-directional flows
- #- flow:
+ #- flow
# uni-directional flows
#- netflow
- # Vars log flowbits and other packet and flow vars
- #- vars
- # alert output for use with Barnyard2
+ # Metadata event type. Triggered whenever a pktvar is saved
+ # and will include the pktvars, flowvars, flowbits and
+ # flowints.
+ #- metadata
+
+ # deprecated - unified2 alert format for use with Barnyard2
- unified2-alert:
enabled: no
- filename: unified2.alert
-
- # File size limit. Can be specified in kb, mb, gb. Just a number
- # is parsed as bytes.
- #limit: 32mb
-
- # By default unified2 log files have the file creation time (in
- # unix epoch format) appended to the filename. Set this to yes to
- # disable this behaviour.
- #nostamp: no
-
- # Sensor ID field of unified2 alerts.
- #sensor-id: 0
-
- # Include payload of packets related to alerts. Defaults to true, set to
- # false if payload is not required.
- #payload: yes
-
- # HTTP X-Forwarded-For support by adding the unified2 extra header or
- # overwriting the source or destination IP address (depending on flow
- # direction) with the one reported in the X-Forwarded-For HTTP header.
- # This is helpful when reviewing alerts for traffic that is being reverse
- # or forward proxied.
- xff:
- enabled: no
- # Two operation modes are available, "extra-data" and "overwrite". Note
- # that in the "overwrite" mode, if the reported IP address in the HTTP
- # X-Forwarded-For header is of a different version of the packet
- # received, it will fall-back to "extra-data" mode.
- mode: extra-data
- # Two proxy deployments are supported, "reverse" and "forward". In
- # a "reverse" deployment the IP address used is the last one, in a
- # "forward" deployment the first IP address is used.
- deployment: reverse
- # Header name where the actual IP address will be reported, if more
- # than one IP address is present, the last IP address will be the
- # one taken into consideration.
- header: X-Forwarded-For
+ # for further options see:
+ # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#alert-output-for-use-with-barnyard2-unified2-alert
# a line based log of HTTP requests (no alerts)
- http-log:
@@ -266,7 +321,7 @@ outputs:
append: yes
#extended: yes # enable this for extended logging information
#custom: yes # enabled the custom logging format (defined by customformat)
-
+ #customformat: ""
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
# a line based log of TLS handshake parameters (no alerts)
@@ -276,6 +331,7 @@ outputs:
append: yes
#extended: yes # Log extended information like fingerprint
#custom: yes # enabled the custom logging format (defined by customformat)
+ #customformat: ""
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
# output TLS transaction where the session is resumed using a
# session id
@@ -286,13 +342,6 @@ outputs:
enabled: no
#certs-log-dir: certs # directory to store the certificates files
- # a line based log of DNS requests and/or replies (no alerts)
- - dns-log:
- enabled: no
- filename: dns.log
- append: yes
- #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
# Packet log... log packets in pcap format. 3 modes of operation: "normal"
# "multi" and "sguil".
#
@@ -334,6 +383,17 @@ outputs:
# If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit"
max-files: 2000
+ # Compression algorithm for pcap files. Possible values: none, lz4.
+ # Enabling compression is incompatible with the sguil mode. Note also
+ # that on Windows, enabling compression will *increase* disk I/O.
+ compression: none
+
+ # Further options for lz4 compression. The compression level can be set
+ # to a value between 0 and 16, where higher values result in higher
+ # compression.
+ #lz4-checksum: no
+ #lz4-level: 0
+
mode: normal # normal, multi or sguil.
# Directory to place pcap files. If not provided the default log
@@ -352,7 +412,7 @@ outputs:
append: yes
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
- # alert output to prelude (http://www.prelude-technologies.com/) only
+ # alert output to prelude (https://www.prelude-siem.org/) only
# available if Suricata has been compiled with --enable-prelude
- alert-prelude:
enabled: no
@@ -360,14 +420,14 @@ outputs:
log-packet-content: no
log-packet-header: yes
- # Stats.log contains data from various counters of the suricata engine.
+ # Stats.log contains data from various counters of the Suricata engine.
- stats:
enabled: yes
filename: stats.log
append: yes # append to file (yes) or overwrite it (no)
totals: yes # stats for all threads merged together
threads: no # per thread stats
- #null-values: yes # print counters that have value 0
+ null-values: yes # print counters that have value 0
# a line based alerts log similar to fast.log into syslog
- syslog:
@@ -379,60 +439,89 @@ outputs:
#level: Info ## possible levels: Emergency, Alert, Critical,
## Error, Warning, Notice, Info, Debug
- # a line based information for dropped packets in IPS mode
+ # deprecated a line based information for dropped packets in IPS mode
- drop:
enabled: no
- filename: drop.log
- append: yes
- #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+ # further options documented at:
+ # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#drop-log-a-line-based-information-for-dropped-packets
- # output module to store extracted files to disk
+ # Output module for storing files on disk. Files are stored in a
+ # directory names consisting of the first 2 characters of the
+ # SHA256 of the file. Each file is given its SHA256 as a filename.
#
- # The files are stored to the log-dir in a format "file." where is
- # an incrementing number starting at 1. For each file "file." a meta
- # file "file..meta" is created.
+ # When a duplicate file is found, the existing file is touched to
+ # have its timestamps updated.
#
- # File extraction depends on a lot of things to be fully done:
- # - file-store stream-depth. For optimal results, set this to 0 (unlimited)
- # - http request / response body sizes. Again set to 0 for optimal results.
- # - rules that contain the "filestore" keyword.
+ # Unlike the older filestore, metadata is not written out by default
+ # as each file should already have a "fileinfo" record in the
+ # eve.log. If write-fileinfo is set to yes, the each file will have
+ # one more associated .json files that consists of the fileinfo
+ # record. A fileinfo file will be written for each occurrence of the
+ # file seen using a filename suffix to ensure uniqueness.
+ #
+ # To prune the filestore directory see the "suricatactl filestore
+ # prune" command which can delete files over a certain age.
- file-store:
- enabled: no # set to yes to enable
- log-dir: files # directory to store the files
- force-magic: no # force logging magic on all stored files
- # force logging of checksums, available hash functions are md5,
- # sha1 and sha256
- #force-hash: [md5]
- force-filestore: no # force storing of all files
- # override global stream-depth for sessions in which we want to
- # perform file extraction. Set to 0 for unlimited.
+ version: 2
+ enabled: no
+
+ # Set the directory for the filestore. If the path is not
+ # absolute will be be relative to the default-log-dir.
+ #dir: filestore
+
+ # Write out a fileinfo record for each occurrence of a
+ # file. Disabled by default as each occurrence is already logged
+ # as a fileinfo record to the main eve-log.
+ #write-fileinfo: yes
+
+ # Force storing of all files. Default: no.
+ #force-filestore: yes
+
+ # Override the global stream-depth for sessions in which we want
+ # to perform file extraction. Set to 0 for unlimited.
#stream-depth: 0
- #waldo: file.waldo # waldo file to store the file_id across runs
- # uncomment to disable meta file writing
- #write-meta: no
- # uncomment the following variable to define how many files can
+
+ # Uncomment the following variable to define how many files can
# remain open for filestore by Suricata. Default value is 0 which
# means files get closed after each write
#max-open-files: 1000
- # output module to log files tracked in a easily parsable json format
- - file-log:
- enabled: no
- filename: files-json.log
- append: yes
- #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+ # Force logging of checksums, available hash functions are md5,
+ # sha1 and sha256. Note that SHA256 is automatically forced by
+ # the use of this output module as it uses the SHA256 as the
+ # file naming scheme.
+ #force-hash: [sha1, md5]
+ # NOTE: X-Forwarded configuration is ignored if write-fileinfo is disabled
+ # HTTP X-Forwarded-For support by adding an extra field or overwriting
+ # the source or destination IP address (depending on flow direction)
+ # with the one reported in the X-Forwarded-For HTTP header. This is
+ # helpful when reviewing alerts for traffic that is being reverse
+ # or forward proxied.
+ xff:
+ enabled: no
+ # Two operation modes are available, "extra-data" and "overwrite".
+ mode: extra-data
+ # Two proxy deployments are supported, "reverse" and "forward". In
+ # a "reverse" deployment the IP address used is the last one, in a
+ # "forward" deployment the first IP address is used.
+ deployment: reverse
+ # Header name where the actual IP address will be reported, if more
+ # than one IP address is present, the last IP address will be the
+ # one taken into consideration.
+ header: X-Forwarded-For
- force-magic: no # force logging magic on all logged files
- # force logging of checksums, available hash functions are md5,
- # sha1 and sha256
- #force-hash: [md5]
+ # deprecated - file-store v1
+ - file-store:
+ enabled: no
+ # further options documented at:
+ # https://suricata.readthedocs.io/en/suricata-5.0.0/file-extraction/file-extraction.html#file-store-version-1
# Log TCP data after stream normalization
# 2 types: file or dir. File logs into a single logfile. Dir creates
# 2 files per TCP session and stores the raw TCP data into them.
# Using 'both' will enable both file and dir modes.
#
- # Note: limited by stream.depth
+ # Note: limited by stream.reassembly.depth
- tcp-data:
enabled: no
type: file
@@ -452,7 +541,7 @@ outputs:
# Lua Output Support - execute lua script to generate alert and event
# output.
# Documented at:
- # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Lua_Output
+ # https://suricata.readthedocs.io/en/latest/output/lua-output.html
- lua:
enabled: no
#scripts-dir: /etc/suricata/lua-output/
@@ -466,20 +555,20 @@ logging:
# Note that debug level logging will only be emitted if Suricata was
# compiled with the --enable-debug configure option.
#
- # This value is overriden by the SC_LOG_LEVEL env var.
+ # This value is overridden by the SC_LOG_LEVEL env var.
default-log-level: notice
# The default output format. Optional parameter, should default to
- # something reasonable if not provided. Can be overriden in an
+ # something reasonable if not provided. Can be overridden in an
# output section. You can leave this out to get the default.
#
- # This value is overriden by the SC_LOG_FORMAT env var.
+ # This value is overridden by the SC_LOG_FORMAT env var.
#default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- "
# A regex to filter output. Can be overridden in an output section.
# Defaults to empty (no filter).
#
- # This value is overriden by the SC_LOG_OP_FILTER env var.
+ # This value is overridden by the SC_LOG_OP_FILTER env var.
default-output-filter:
# Define your logging outputs. If none are defined, or they are all
@@ -491,11 +580,23 @@ logging:
- file:
enabled: yes
level: info
- filename: /var/log/suricata/suricata.log
+ filename: suricata.log
# type: json
- syslog:
enabled: no
+ facility: local5
+ format: "[%i] <%d> -- "
+ # type: json
+
+##
+## Step 4: configure common capture settings
+##
+## See "Advanced Capture Options" below for more options, including NETMAP
+## and PF_RING.
+##
+
+# Linux high speed capture support
af-packet:
- interface: {{ interface }}
# Number of receive threads. "auto" uses the number of cores
@@ -505,28 +606,21 @@ af-packet:
# Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
# This is only supported for Linux kernel > 3.1
# possible value are:
- # * cluster_round_robin: round robin load balancing
# * cluster_flow: all packets of a given flow are send to the same socket
# * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket
# * cluster_qm: all packets linked by network card to a RSS queue are sent to the same
# socket. Requires at least Linux 3.14.
- # * cluster_random: packets are sent randomly to sockets but with an equipartition.
- # Requires at least Linux 3.14.
- # * cluster_rollover: kernel rotates between sockets filling each socket before moving
- # to the next. Requires at least Linux 3.10.
+ # * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for
+ # more info.
# Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
# with capture card using RSS (require cpu affinity tuning and system irq tuning)
cluster-type: cluster_flow
# In some fragmentation case, the hash can not be computed. If "defrag" is set
# to yes, the kernel will do the needed defragmentation before sending the packets.
defrag: yes
- # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is
- # full then kernel will send the packet on the next socket with room available. This option
- # can minimize packet drop and increase the treated bandwidth on single intensive flow.
- #rollover: yes
# To use the ring feature of AF_PACKET, set 'use-mmap' to yes
- #use-mmap: yes
- # Lock memory map to avoid it goes to swap. Be careful that over suscribing could lock
+ use-mmap: yes
+ # Lock memory map to avoid it goes to swap. Be careful that over subscribing could lock
# your system
#mmap-locked: yes
# Use tpacket_v3 capture mode, only active if use-mmap is true
@@ -572,13 +666,14 @@ af-packet:
# will not be copied.
#copy-mode: ips
#copy-iface: eth1
+ # For eBPF and XDP setup including bypass, filter and load balancing, please
+ # see doc/userguide/capture-hardware/ebpf-xdp.rst for more info.
# Put default values here. These will be used for an interface that is not
# in the list above.
- interface: default
#threads: auto
#use-mmap: no
- #rollover: yes
#tpacket-v3: yes
# Cross platform libpcap capture support
@@ -595,7 +690,7 @@ pcap:
# Possible values are:
# - yes: checksum validation is forced
# - no: checksum validation is disabled
- # - auto: suricata uses a statistical approach to detect when
+ # - auto: Suricata uses a statistical approach to detect when
# checksum off-loading is used. (default)
# Warning: 'checksum-validation' must be set to yes to have any validation
#checksum-checks: auto
@@ -618,7 +713,7 @@ pcap-file:
# Possible values are:
# - yes: checksum validation is forced
# - no: checksum validation is disabled
- # - auto: suricata uses a statistical approach to detect when
+ # - auto: Suricata uses a statistical approach to detect when
# checksum off-loading is used. (default)
# Warning: 'checksum-validation' must be set to yes to have checksum tested
checksum-checks: auto
@@ -639,42 +734,66 @@ pcap-file:
# "detection-only" enables protocol detection only (parser disabled).
app-layer:
protocols:
+ krb5:
+ enabled: yes
+ snmp:
+ enabled: yes
+ ikev2:
+ enabled: yes
tls:
- enabled: detection-only
+ enabled: yes
detection-ports:
dp: 443
- # Completely stop processing TLS/SSL session after the handshake
- # completed. If bypass is enabled this will also trigger flow
- # bypass. If disabled (the default), TLS/SSL session is still
- # tracked for Heartbleed and other anomalies.
- #no-reassemble: yes
+ # Generate JA3 fingerprint from client hello. If not specified it
+ # will be disabled by default, but enabled if rules require it.
+ #ja3-fingerprints: auto
+
+ # What to do when the encrypted communications start:
+ # - default: keep tracking TLS session, check for protocol anomalies,
+ # inspect tls_* keywords. Disables inspection of unmodified
+ # 'content' signatures.
+ # - bypass: stop processing this flow as much as possible. No further
+ # TLS parsing and inspection. Offload flow bypass to kernel
+ # or hardware if possible.
+ # - full: keep tracking and inspection as normal. Unmodified content
+ # keyword signatures are inspected as well.
+ #
+ # For best performance, select 'bypass'.
+ #
+ #encryption-handling: default
+
dcerpc:
- enabled: detection-only
+ enabled: yes
ftp:
- enabled: detection-only
+ enabled: yes
+ # memcap: 64mb
+ # RDP, disabled by default.
+ rdp:
+ #enabled: no
ssh:
- enabled: detection-only
+ enabled: yes
smtp:
- enabled: detection-only
+ enabled: yes
+ raw-extraction: no
# Configure SMTP-MIME Decoder
mime:
# Decode MIME messages from SMTP transactions
# (may be resource intensive)
# This field supercedes all others because it turns the entire
# process on or off
- decode-mime: detection-only
+ decode-mime: yes
# Decode MIME entity bodies (ie. base64, quoted-printable, etc.)
- decode-base64: detection-only
- decode-quoted-printable: detection-only
+ decode-base64: yes
+ decode-quoted-printable: yes
# Maximum bytes per header data value stored in the data structure
# (default is 2000)
header-value-depth: 2000
# Extract URLs and save in state data structure
- extract-urls: detection-only
+ extract-urls: yes
# Set to yes to compute the md5 of the mail body. You will then
# be able to journalize it.
body-md5: no
@@ -685,19 +804,18 @@ app-layer:
content-inspect-window: 4096
imap:
enabled: detection-only
- msn:
- enabled: detection-only
smb:
- enabled: detection-only
+ enabled: yes
detection-ports:
dp: 139, 445
- # smb2 detection is disabled internally inside the engine.
- #smb2:
- # enabled: yes
- # Note: NFS parser depends on Rust support: pass --enable-rust
- # to configure.
+
+ # Stream reassembly size for SMB streams. By default track it completely.
+ #stream-depth: 0
+
nfs:
- enabled: no
+ enabled: yes
+ tftp:
+ enabled: yes
dns:
# memcaps. Globally and per flow/state.
#global-memcap: 16mb
@@ -708,16 +826,17 @@ app-layer:
#request-flood: 500
tcp:
- enabled: detection-only
+ enabled: yes
detection-ports:
dp: 53
udp:
- enabled: detection-only
+ enabled: yes
detection-ports:
dp: 53
http:
- enabled: detection-only
- # memcap: 64mb
+ enabled: yes
+ # memcap: Maximum memory capacity for http
+ # Default is unlimited, value can be such as 64mb
# default-config: Used when no server-config matches
# personality: List of personalities used by default
@@ -725,37 +844,15 @@ app-layer:
# by http_client_body & pcre /P option.
# response-body-limit: Limit reassembly of response body for inspection
# by file_data, http_server_body & pcre /Q option.
- # double-decode-path: Double decode path section of the URI
- # double-decode-query: Double decode query section of the URI
- # response-body-decompress-layer-limit:
- # Limit to how many layers of compression will be
- # decompressed. Defaults to 2.
#
+ # For advanced options, see the user guide
+
+
# server-config: List of server configurations to use if address matches
- # address: List of ip addresses or networks for this block
+ # address: List of IP addresses or networks for this block
# personalitiy: List of personalities used by this block
- # request-body-limit: Limit reassembly of request body for inspection
- # by http_client_body & pcre /P option.
- # response-body-limit: Limit reassembly of response body for inspection
- # by file_data, http_server_body & pcre /Q option.
- # double-decode-path: Double decode path section of the URI
- # double-decode-query: Double decode query section of the URI
#
- # uri-include-all: Include all parts of the URI. By default the
- # 'scheme', username/password, hostname and port
- # are excluded. Setting this option to true adds
- # all of them to the normalized uri as inspected
- # by http_uri, urilen, pcre with /U and the other
- # keywords that inspect the normalized uri.
- # Note that this does not affect http_raw_uri.
- # Also, note that including all was the default in
- # 1.4 and 2.0beta1.
- #
- # meta-field-limit: Hard size limit for request and response size
- # limits. Applies to request line and headers,
- # response line and headers. Does not apply to
- # request or response bodies. Default is 18k.
- # If this limit is reached an event is raised.
+ # Then, all the fields from default-config can be overloaded
#
# Currently Available Personalities:
# Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
@@ -781,6 +878,20 @@ app-layer:
# auto will use http-body-inline mode in IPS mode, yes or no set it statically
http-body-inline: auto
+ # Decompress SWF files.
+ # 2 types: 'deflate', 'lzma', 'both' will decompress deflate and lzma
+ # compress-depth:
+ # Specifies the maximum amount of data to decompress,
+ # set 0 for unlimited.
+ # decompress-depth:
+ # Specifies the maximum amount of decompressed data to obtain,
+ # set 0 for unlimited.
+ swf-decompression:
+ enabled: yes
+ type: both
+ compress-depth: 0
+ decompress-depth: 0
+
# Take a random value for inspection sizes around the specified value.
# This lower the risk of some evasion technics but could lead
# detection change between runs. It is set to 'yes' by default.
@@ -795,6 +906,15 @@ app-layer:
double-decode-path: no
double-decode-query: no
+ # Can disable LZMA decompression
+ #lzma-enabled: yes
+ # Memory limit usage for LZMA decompression dictionary
+ # Data is decompressed until dictionary reaches this size
+ #lzma-memlimit: 1mb
+ # Maximum decompressed size with a compression ratio
+ # above 2048 (only LZMA can reach this ratio, deflate cannot)
+ #compression-bomb-limit: 1mb
+
server-config:
#- apache:
@@ -854,10 +974,15 @@ app-layer:
dp: 44818
sp: 44818
- # Note: parser depends on experimental Rust support
- # with --enable-rust-experimental passed to configure
ntp:
- enabled: no
+ enabled: yes
+
+ dhcp:
+ enabled: yes
+
+ # SIP, disabled by default.
+ sip:
+ #enabled: no
# Limit for the maximum number of asn1 frames to decode (default 256)
asn1-max-frames: 256
@@ -885,13 +1010,18 @@ run-as:
# Default location of the pid file. The pid file is only used in
# daemon mode (start Suricata with -D). If not running in daemon mode
# the --pidfile command line option must be used to create a pid file.
-#pid-file: /usr/local/var/run/suricata.pid
+#pid-file: /var/run/suricata.pid
# Daemon working directory
# Suricata will change directory to this one if provided
# Default: "/"
#daemon-directory: "/"
+# Umask.
+# Suricata will use this umask if it is provided. By default it will use the
+# umask passed on by the shell.
+#umask: 022
+
# Suricata core dump configuration. Limits the size of the core dump file to
# approximately max-dump. The actual core dump size will be a multiple of the
# page size. Core dumps that would be larger than max-dump are truncated. On
@@ -904,7 +1034,7 @@ run-as:
coredump:
max-dump: unlimited
-# If suricata box is a router for the sniffed networks, set it to 'router'. If
+# If Suricata box is a router for the sniffed networks, set it to 'router'. If
# it is a pure sniffing setup, set it to 'sniffer-only'.
# If set to auto, the variable is internally switch to 'router' in IPS mode
# and 'sniffer-only' in IDS mode.
@@ -914,36 +1044,29 @@ host-mode: auto
# Number of packets preallocated per thread. The default is 1024. A higher number
# will make sure each CPU will be more easily kept busy, but may negatively
# impact caching.
-#
-# If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules
-# apply. In that case try something like 60000 or more. This is because the CUDA
-# pattern matcher buffers and scans as many packets as possible in parallel.
-#max-pending-packets: 1024
+max-pending-packets: 5000
# Runmode the engine should use. Please check --list-runmodes to get the available
-# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned
-# load balancing).
+# runmodes for each packet acquisition method. Default depends on selected capture
+# method. 'workers' generally gives best performance.
runmode: workers
# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
#
# Supported schedulers are:
#
-# round-robin - Flows assigned to threads in a round robin fashion.
-# active-packets - Flows assigned to threads that have the lowest number of
-# unprocessed packets (default).
-# hash - Flow alloted usihng the address hash. More of a random
-# technique. Was the default in Suricata 1.2.1 and older.
+# hash - Flow assigned to threads using the 5-7 tuple hash.
+# ippair - Flow assigned to threads using addresses only.
#
-#autofp-scheduler: active-packets
+#autofp-scheduler: hash
# Preallocated size for packet. Default is 1514 which is the classical
# size for pcap on ethernet. You should adjust this value to the highest
# packet size (MTU + hardware header) on your system.
default-packet-size: {{ MTU + 15 }}
-# Unix command socket can be used to pass commands to suricata.
-# An external tool can then connect to get information from suricata
+# Unix command socket can be used to pass commands to Suricata.
+# An external tool can then connect to get information from Suricata
# or trigger some modifications of the engine. Set enabled to yes
# to activate the feature. In auto mode, the feature will only be
# activated in live capture mode. You can use the filename variable to set
@@ -956,6 +1079,10 @@ unix-command:
#magic-file: /usr/share/file/magic
#magic-file:
+# GeoIP2 database file. Specify path and filename of GeoIP2 database
+# if using rules with "geoip" rule option.
+#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb
+
legacy:
uricontent: enabled
@@ -963,7 +1090,7 @@ legacy:
## Detection settings
##
-# Set the order of alerts bassed on actions
+# Set the order of alerts based on actions
# The default order is pass, drop, reject, alert
# action-order:
# - pass
@@ -972,8 +1099,8 @@ legacy:
# - alert
# IP Reputation
-#reputation-categories-file: /usr/local/etc/suricata/iprep/categories.txt
-#default-reputation-path: /usr/local/etc/suricata/iprep
+#reputation-categories-file: /etc/suricata/iprep/categories.txt
+#default-reputation-path: /etc/suricata/iprep
#reputation-files:
# - reputation.list
@@ -1051,10 +1178,10 @@ defrag:
# emergency-recovery is the percentage of flows that the engine need to
# prune before unsetting the emergency state. The emergency state is activated
# when the memcap limit is reached, allowing to create new flows, but
-# prunning them with the emergency timeouts (they are defined below).
+# pruning them with the emergency timeouts (they are defined below).
# If the memcap is reached, the engine will try to prune flows
-# with the default timeouts. If it doens't find a flow to prune, it will set
-# the emergency bit and it will try again with more agressive timeouts.
+# with the default timeouts. If it doesn't find a flow to prune, it will set
+# the emergency bit and it will try again with more aggressive timeouts.
# If that doesn't work, then it will try to kill the last time seen flows
# not in use.
# The memcap can be specified in kb, mb, gb. Just a number indicates it's
@@ -1077,7 +1204,7 @@ vlan:
# Specific timeouts for flows. Here you can specify the timeouts that the
# active flows will wait to transit from the current state to another, on each
-# protocol. The value of "new" determine the seconds to wait after a hanshake or
+# protocol. The value of "new" determine the seconds to wait after a handshake or
# stream startup before the engine free the data of that flow it doesn't
# change the state to established (usually if we don't receive more packets
# of that flow). The value of "established" is the amount of
@@ -1138,7 +1265,7 @@ flow-timeouts:
# # packet. If csum validation is specified as
# # "yes", then packet with invalid csum will not
# # be processed by the engine stream/app layer.
-# # Warning: locally generated trafic can be
+# # Warning: locally generated traffic can be
# # generated without checksum due to hardware offload
# # of checksum. You can control the handling of checksum
# # on a per-interface basis via the 'checksum-checks'
@@ -1149,7 +1276,9 @@ flow-timeouts:
# inline: no # stream inline mode
# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine
# max-synack-queued: 5 # Max different SYN/ACKs to queue
-# bypass: no # Bypass packets when stream.depth is reached
+# bypass: no # Bypass packets when stream.reassembly.depth is reached.
+# # Warning: first side to reach this triggers
+# # the bypass.
#
# reassembly:
# memcap: 64mb # Can be specified in kb, mb, gb. Just a number
@@ -1222,9 +1351,22 @@ host:
decoder:
# Teredo decoder is known to not be completely accurate
- # it will sometimes detect non-teredo as teredo.
+ # as it will sometimes detect non-teredo as teredo.
teredo:
enabled: true
+ # ports to look for Teredo. Max 4 ports. If no ports are given, or
+ # the value is set to 'any', Teredo detection runs on _all_ UDP packets.
+ ports: $TEREDO_PORTS # syntax: '[3544, 1234]' or '3533' or 'any'.
+
+ # VXLAN decoder is assigned to up to 4 UDP ports. By default only the
+ # IANA assigned port 4789 is enabled.
+ vxlan:
+ enabled: true
+ ports: $VXLAN_PORTS # syntax: '8472, 4789'
+ # ERSPAN Type I decode support
+ erspan:
+ typeI:
+ enabled: false
##
@@ -1292,7 +1434,6 @@ detect:
# The supported algorithms are:
# "ac" - Aho-Corasick, default implementation
# "ac-bs" - Aho-Corasick, reduced memory implementation
-# "ac-cuda" - Aho-Corasick, CUDA implementation
# "ac-ks" - Aho-Corasick, "Ken Steele" variant
# "hs" - Hyperscan, available when built with Hyperscan support
#
@@ -1305,10 +1446,6 @@ detect:
# to be set to "single", because of ac's memory requirements, unless the
# ruleset is small enough to fit in one's memory, in which case one can
# use "full" with "ac". Rest of the mpms can be run in "full" mode.
-#
-# There is also a CUDA pattern matcher (only available if Suricata was
-# compiled with --enable-cuda: b2g_cuda. Make sure to update your
-# max-pending-packets setting above as well if you use b2g_cuda.
mpm-algo: auto
@@ -1338,19 +1475,26 @@ threading:
{%- if salt['pillar.get']('sensor:suriprocs') %}
cpu-affinity:
- management-cpu-set:
- cpu: [ all ] # include only these cpus in affinity settings
+ cpu: [ all ] # include only these CPUs in affinity settings
- receive-cpu-set:
- cpu: [ all ] # include only these cpus in affinity settings
+ cpu: [ all ] # include only these CPUs in affinity settings
- worker-cpu-set:
cpu: [ "all" ]
mode: "exclusive"
# Use explicitely 3 threads and don't compute number by using
# detect-thread-ratio variable:
+ # threads: 3
threads: {{ salt['pillar.get']('sensor:suriprocs') }}
prio:
+ low: [ 0 ]
+ medium: [ "1-2" ]
+ high: [ 3 ]
default: "high"
- {% endif %}
-
+ #- verdict-cpu-set:
+ # cpu: [ 0 ]
+ # prio:
+ # default: "high"
+ {%- endif -%}
{%- if salt['pillar.get']('sensor:suripins') %}
cpu-affinity:
- management-cpu-set:
@@ -1367,10 +1511,6 @@ threading:
default: "high"
{% endif %}
- #- verdict-cpu-set:
- # cpu: [ 0 ]
- # prio:
- # default: "high"
#
# By default Suricata creates one "detect" thread per available CPU/CPU core.
# This setting allows controlling this behaviour. A ratio setting of 2 will
@@ -1425,6 +1565,11 @@ profiling:
filename: keyword_perf.log
append: yes
+ prefilter:
+ enabled: yes
+ filename: prefilter_perf.log
+ append: yes
+
# per rulegroup profiling
rulegroups:
enabled: yes
@@ -1466,7 +1611,7 @@ profiling:
# When running in NFQ inline mode, it is possible to use a simulated
# non-terminal NFQUEUE verdict.
-# This permit to do send all needed packet to suricata via this a rule:
+# This permit to do send all needed packet to Suricata via this a rule:
# iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE
# And below, you can have your standard filtering ruleset. To activate
# this mode, you need to set mode to 'repeat'
@@ -1475,7 +1620,7 @@ profiling:
# On linux >= 3.1, you can set batchcount to a value > 1 to improve performance
# by processing several packets before sending a verdict (worker runmode only).
# On linux >= 3.6, you can set the fail-open option to yes to have the kernel
-# accept the packet if suricata is not able to keep pace.
+# accept the packet if Suricata is not able to keep pace.
# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is
# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask
# on packet of a flow that need to be bypassed. The Nefilter ruleset has to
@@ -1513,17 +1658,17 @@ nflog:
# general settings affecting packet capture
capture:
- # disable NIC offloading. It's restored when Suricata exists.
- # Enabled by default
+ # disable NIC offloading. It's restored when Suricata exits.
+ # Enabled by default.
#disable-offloading: false
#
# disable checksum validation. Same as setting '-k none' on the
- # commandline
+ # commandline.
#checksum-validation: none
# Netmap support
#
-# Netmap operates with NIC directly in driver, so you need FreeBSD wich have
+# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which have
# built-in netmap support or compile and install netmap module and appropriate
# NIC driver on your Linux system.
# To reach maximum throughput disable all receive-, segmentation-,
@@ -1535,7 +1680,9 @@ capture:
netmap:
# To specify OS endpoint add plus sign at the end (e.g. "eth0+")
- interface: eth2
- # Number of receive threads. "auto" uses number of RSS queues on interface.
+ # Number of capture threads. "auto" uses number of RSS queues on interface.
+ # Warning: unless the RSS hashing is symmetrical, this will lead to
+ # accuracy issues.
#threads: auto
# You can use the following variables to activate netmap tap or IPS mode.
# If copy-mode is set to ips or tap, the traffic coming to the current
@@ -1558,7 +1705,7 @@ netmap:
# Possible values are:
# - yes: checksum validation is forced
# - no: checksum validation is disabled
- # - auto: suricata uses a statistical approach to detect when
+ # - auto: Suricata uses a statistical approach to detect when
# checksum off-loading is used.
# Warning: 'checksum-validation' must be set to yes to have any validation
#checksum-checks: auto
@@ -1575,9 +1722,9 @@ netmap:
# for more info see http://www.ntop.org/products/pf_ring/
pfring:
- interface: eth0
- # Number of receive threads (>1 will enable experimental flow pinned
- # runmode)
- threads: 1
+ # Number of receive threads. If set to 'auto' Suricata will first try
+ # to use CPU (core) count and otherwise RSS queue count.
+ threads: auto
# Default clusterid. PF_RING will load balance packets based on flow.
# All threads/processes that will participate need to have the same
@@ -1587,8 +1734,15 @@ pfring:
# Default PF_RING cluster type. PF_RING can load balance per flow.
# Possible values are cluster_flow or cluster_round_robin.
cluster-type: cluster_flow
+
# bpf filter for this interface
#bpf-filter: tcp
+
+ # If bypass is set then the PF_RING hw bypass is activated, when supported
+ # by the interface in use. Suricata will instruct the interface to bypass
+ # all future packets for a flow that need to be bypassed.
+ #bypass: yes
+
# Choose checksum verification mode for the interface. At the moment
# of the capture, some packets may be with an invalid checksum due to
# offloading to the network card of the checksum computation.
@@ -1596,7 +1750,7 @@ pfring:
# - rxonly: only compute checksum for packets received by network card.
# - yes: checksum validation is forced
# - no: checksum validation is disabled
- # - auto: suricata uses a statistical approach to detect when
+ # - auto: Suricata uses a statistical approach to detect when
# checksum off-loading is used. (default)
# Warning: 'checksum-validation' must be set to yes to have any validation
#checksum-checks: auto
@@ -1641,80 +1795,83 @@ napatech:
# (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
# This may be enabled when sharing streams with another application.
# Otherwise, it should be turned off.
- hba: -1
+ #hba: -1
- # use_all_streams set to "yes" will query the Napatech service for all configured
- # streams and listen on all of them. When set to "no" the streams config array
- # will be used.
- use-all-streams: yes
+ # When use_all_streams is set to "yes" the initialization code will query
+ # the Napatech service for all configured streams and listen on all of them.
+ # When set to "no" the streams config array will be used.
+ #
+ # This option necessitates running the appropriate NTPL commands to create
+ # the desired streams prior to running suricata.
+ #use-all-streams: no
- # The streams to listen on. This can be either:
- # a list of individual streams (e.g. streams: [0,1,2,3])
+ # The streams to listen on when auto-config is disabled or when and threading
+ # cpu-affinity is disabled. This can be either:
+ # an individual stream (e.g. streams: [0])
# or
# a range of streams (e.g. streams: ["0-3"])
+ #
streams: ["0-3"]
-# Tilera mpipe configuration. for use on Tilera TILE-Gx.
-mpipe:
+ # When auto-config is enabled the streams will be created and assigned
+ # automatically to the NUMA node where the thread resides. If cpu-affinity
+ # is enabled in the threading section. Then the streams will be created
+ # according to the number of worker threads specified in the worker cpu set.
+ # Otherwise, the streams array is used to define the streams.
+ #
+ # This option cannot be used simultaneous with "use-all-streams".
+ #
+ auto-config: yes
- # Load balancing modes: "static", "dynamic", "sticky", or "round-robin".
- load-balance: dynamic
+ # Ports indicates which napatech ports are to be used in auto-config mode.
+ # these are the port ID's of the ports that will be merged prior to the
+ # traffic being distributed to the streams.
+ #
+ # This can be specified in any of the following ways:
+ #
+ # a list of individual ports (e.g. ports: [0,1,2,3])
+ #
+ # a range of ports (e.g. ports: [0-3])
+ #
+ # "all" to indicate that all ports are to be merged together
+ # (e.g. ports: [all])
+ #
+ # This has no effect if auto-config is disabled.
+ #
+ ports: [all]
- # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536
- iqueue-packets: 2048
-
- # List of interfaces we will listen on.
- inputs:
- - interface: xgbe2
- - interface: xgbe3
- - interface: xgbe4
-
-
- # Relative weight of memory for packets of each mPipe buffer size.
- stack:
- size128: 0
- size256: 9
- size512: 0
- size1024: 0
- size1664: 7
- size4096: 0
- size10386: 0
- size16384: 0
+ # When auto-config is enabled the hashmode specifies the algorithm for
+ # determining to which stream a given packet is to be delivered.
+ # This can be any valid Napatech NTPL hashmode command.
+ #
+ # The most common hashmode commands are: hash2tuple, hash2tuplesorted,
+ # hash5tuple, hash5tuplesorted and roundrobin.
+ #
+ # See Napatech NTPL documentation other hashmodes and details on their use.
+ #
+ # This has no effect if auto-config is disabled.
+ #
+ hashmode: hash5tuplesorted
##
-## Hardware accelaration
+## Configure Suricata to load Suricata-Update managed rules.
+##
+## If this section is completely commented out move down to the "Advanced rule
+## file configuration".
##
-# Cuda configuration.
-cuda:
- # The "mpm" profile. On not specifying any of these parameters, the engine's
- # internal default values are used, which are same as the ones specified in
- # in the default conf file.
- mpm:
- # The minimum length required to buffer data to the gpu.
- # Anything below this is MPM'ed on the CPU.
- # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
- # A value of 0 indicates there's no limit.
- data-buffer-size-min-limit: 0
- # The maximum length for data that we would buffer to the gpu.
- # Anything over this is MPM'ed on the CPU.
- # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
- data-buffer-size-max-limit: 1500
- # The ring buffer size used by the CudaBuffer API to buffer data.
- cudabuffer-buffer-size: 500mb
- # The max chunk size that can be sent to the gpu in a single go.
- gpu-transfer-size: 50mb
- # The timeout limit for batching of packets in microseconds.
- batching-timeout: 2000
- # The device to use for the mpm. Currently we don't support load balancing
- # on multiple gpus. In case you have multiple devices on your system, you
- # can specify the device to use, using this conf. By default we hold 0, to
- # specify the first device cuda sees. To find out device-id associated with
- # the card(s) on the system run "suricata --list-cuda-cards".
- device-id: 0
- # No of Cuda streams used for asynchronous processing. All values > 0 are valid.
- # For this option you need a device with Compute Capability > 1.0.
- cuda-streams: 2
+default-rule-path: /etc/suricata/rules
+
+rule-files:
+ - all.rules
+
+##
+## Auxiliary configuration files.
+##
+
+classification-file: /etc/suricata/classification.config
+reference-config-file: /etc/suricata/reference.config
+# threshold-file: /etc/suricata/threshold.config
##
## Include other configs
diff --git a/salt/suricata/files/suricataMETA.yaml b/salt/suricata/files/suricataMETA.yaml
index 99a59c719..964d3fab7 100644
--- a/salt/suricata/files/suricataMETA.yaml
+++ b/salt/suricata/files/suricataMETA.yaml
@@ -1,16 +1,18 @@
%YAML 1.1
---
-{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
-{%- if grains['role'] == 'so-eval' %}
-{%- set MTU = 1500 %}
-{%- else %}
-{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
-{%- endif %}
-{%- if salt['pillar.get']('sensor:homenet') %}
- {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
-{%- else %}
- {%- set homenet = salt['pillar.get']('static:hnmaster', '') %}
-{%- endif %}
+ {%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
+ {%- if grains['role'] == 'so-eval' %}
+ {%- set MTU = 1500 %}
+ {%- elif grains['role'] == 'so-helix' %}
+ {%- set MTU = 9000 %}
+ {%- else %}
+ {%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
+ {%- endif %}
+ {%- if salt['pillar.get']('sensor:homenet') %}
+ {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
+ {%- else %}
+ {%- set homenet = salt['pillar.get']('static:hnmaster', '') %}
+ {%- endif %}
# Suricata configuration file. In addition to the comments describing all
# options in this file, full documentation can be found at:
# https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html
@@ -23,6 +25,11 @@ vars:
# more specific is better for alert accuracy and performance
address-groups:
HOME_NET: "[{{ homenet }}]"
+ #HOME_NET: "[192.168.0.0/16]"
+ #HOME_NET: "[10.0.0.0/8]"
+ #HOME_NET: "[172.16.0.0/12]"
+ #HOME_NET: "any"
+
EXTERNAL_NET: "!$HOME_NET"
#EXTERNAL_NET: "any"
@@ -49,6 +56,8 @@ vars:
MODBUS_PORTS: 502
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
FTP_PORTS: 21
+ VXLAN_PORTS: 4789
+ TEREDO_PORTS: 3544
##
## Step 2: select outputs to enable
@@ -64,9 +73,12 @@ stats:
enabled: yes
# The interval field (in seconds) controls at what interval
# the loggers are invoked.
- interval: 8
+ interval: 30
# Add decode events as stats.
#decoder-events: true
+ # Decoder event prefix in stats. Has been 'decoder' before, but that leads
+ # to missing events in the eve.stats records. See issue #2225.
+ #decoder-events-prefix: "decoder.event"
# Add stream events as stats.
#stream-events: false
@@ -83,18 +95,35 @@ outputs:
- eve-log:
enabled: yes
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
- filename: eve.json
+ filename: /nsm/eve.json
rotate-interval: hour
+
#prefix: "@cee: " # prefix to prepend to each log entry
# the following are valid when type: syslog above
#identity: "suricata"
#facility: local5
#level: Info ## possible levels: Emergency, Alert, Critical,
## Error, Warning, Notice, Info, Debug
+ #redis:
+ # server: 127.0.0.1
+ # port: 6379
+ # async: true ## if redis replies are read asynchronously
+ # mode: list ## possible values: list|lpush (default), rpush, channel|publish
+ # ## lpush and rpush are using a Redis list. "list" is an alias for lpush
+ # ## publish is using a Redis channel. "channel" is an alias for publish
+ # key: suricata ## key or channel to use (default to suricata)
+ # Redis pipelining set up. This will enable to only do a query every
+ # 'batch-size' events. This should lower the latency induced by network
+ # connection at the cost of some memory. There is no flushing implemented
+ # so this setting as to be reserved to high traffic suricata.
+ # pipelining:
+ # enabled: yes ## set enable to yes to enable query pipelining
+ # batch-size: 10 ## number of entry to keep in buffer
# Include top level metadata. Default yes.
#metadata: no
+ # include the name of the input pcap file in pcap file processing mode
pcap-file: false
# Community Flow ID
@@ -106,7 +135,7 @@ outputs:
# to make the id less predictable.
# enable/disable the community id feature.
- community-id: false
+ community-id: true
# Seed value for the ID output. Valid values are 0-65535.
community-id-seed: 0
@@ -130,36 +159,76 @@ outputs:
types:
- alert:
- # payload: yes # enable dumping payload in Base64
- # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
- # payload-printable: yes # enable dumping payload in printable (lossy) format
- # packet: yes # enable dumping of packet (without stream segments)
- # http-body: yes # enable dumping of http body in Base64
- # http-body-printable: yes # enable dumping of http body in printable format
- # metadata: no # enable inclusion of app layer metadata with alert. Default yes
+ payload: no # enable dumping payload in Base64
+ payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
+ payload-printable: yes # enable dumping payload in printable (lossy) format
+ packet: yes # enable dumping of packet (without stream segments)
+ metadata:
+ app-layer: false
+ flow: false
+ rule:
+ metadata: true
+ raw: true
+
+ # http-body: yes # Requires metadata; enable dumping of http body in Base64
+ # http-body-printable: yes # Requires metadata; enable dumping of http body in printable format
# Enable the logging of tagged packets for rules using the
# "tag" keyword.
tagged-packets: no
+ - anomaly:
+ # Anomaly log records describe unexpected conditions such
+ # as truncated packets, packets with invalid IP/UDP/TCP
+ # length values, and other events that render the packet
+ # invalid for further processing or describe unexpected
+ # behavior on an established stream. Networks which
+ # experience high occurrences of anomalies may experience
+ # packet processing degradation.
+ #
+ # Anomalies are reported for the following:
+ # 1. Decode: Values and conditions that are detected while
+ # decoding individual packets. This includes invalid or
+ # unexpected values for low-level protocol lengths as well
+ # as stream related events (TCP 3-way handshake issues,
+ # unexpected sequence number, etc).
+ # 2. Stream: This includes stream related events (TCP
+ # 3-way handshake issues, unexpected sequence number,
+ # etc).
+ # 3. Application layer: These denote application layer
+ # specific conditions that are unexpected, invalid or are
+ # unexpected given the application monitoring state.
+ #
+ # By default, anomaly logging is disabled. When anomaly
+ # logging is enabled, applayer anomaly reporting is
+ # enabled.
+ enabled: no
+ #
+ # Choose one or more types of anomaly logging and whether to enable
+ # logging of the packet header for packet anomalies.
+ types:
+ decode: no
+ stream: no
+ applayer: yes
+ packethdr: no
- http:
extended: yes # enable this for extended logging information
# custom allows additional http fields to be included in eve-log
# the example below adds three additional fields when uncommented
#custom: [Accept-Encoding, Accept-Language, Authorization]
+ # set this value to one and only one among {both, request, response}
+ # to dump all http headers for every http request and/or response
+ # dump-all-headers: none
- dns:
# This configuration uses the new DNS logging format,
# the old configuration is still available:
- # http://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html#eve-extensible-event-format
- # Use version 2 logging with the new format:
- # DNS answers will be logged in one single event
- # rather than an event for each of it.
- # Without setting a version the version
- # will fallback to 1 for backwards compatibility.
- # Note: version 1 is not available with rust enabled
+ # https://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html#dns-v1-format
+
+ # As of Suricata 5.0, version 2 of the eve dns output
+ # format is the default.
version: 2
# Enable/disable this logger. Default: enabled.
- #enabled: no
+ enabled: yes
# Control logging of requests and responses:
# - requests: enable logging of DNS queries
@@ -174,8 +243,8 @@ outputs:
# Default: all
#formats: [detailed, grouped]
- # Answer types to log.
- # Default: all
+ # Types to log, based on the query type.
+ # Default: all.
#types: [a, aaaa, cname, mx, ns, ptr, txt]
- tls:
extended: yes # enable this for extended logging information
@@ -184,7 +253,7 @@ outputs:
#session-resumption: no
# custom allows to control which tls fields that are included
# in eve-log
- #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3]
+ #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s]
- files:
force-magic: no # force logging magic on all logged files
# force logging of checksums, available hash functions are md5,
@@ -207,20 +276,23 @@ outputs:
# to yes
#md5: [body, subject]
- #- dnp3
+ - dnp3
+ - ftp
+ - rdp
- nfs
- smb
- tftp
- ikev2
- krb5
+ - snmp
+ - sip
- dhcp:
- # DHCP logging requires Rust.
enabled: yes
# When extended mode is on, all DHCP messages are logged
# with full detail. When extended mode is off (the
# default), just enough information to map a MAC address
# to an IP address is logged.
- extended: no
+ # extended: no
- ssh
#- stats:
# totals: yes # stats for all threads merged together
@@ -236,47 +308,11 @@ outputs:
# flowints.
#- metadata
- # alert output for use with Barnyard2
+ # deprecated - unified2 alert format for use with Barnyard2
- unified2-alert:
enabled: no
- filename: unified2.alert
-
- # File size limit. Can be specified in kb, mb, gb. Just a number
- # is parsed as bytes.
- #limit: 32mb
-
- # By default unified2 log files have the file creation time (in
- # unix epoch format) appended to the filename. Set this to yes to
- # disable this behaviour.
- #nostamp: no
-
- # Sensor ID field of unified2 alerts.
- #sensor-id: 0
-
- # Include payload of packets related to alerts. Defaults to true, set to
- # false if payload is not required.
- #payload: yes
-
- # HTTP X-Forwarded-For support by adding the unified2 extra header or
- # overwriting the source or destination IP address (depending on flow
- # direction) with the one reported in the X-Forwarded-For HTTP header.
- # This is helpful when reviewing alerts for traffic that is being reverse
- # or forward proxied.
- xff:
- enabled: no
- # Two operation modes are available, "extra-data" and "overwrite". Note
- # that in the "overwrite" mode, if the reported IP address in the HTTP
- # X-Forwarded-For header is of a different version of the packet
- # received, it will fall-back to "extra-data" mode.
- mode: extra-data
- # Two proxy deployments are supported, "reverse" and "forward". In
- # a "reverse" deployment the IP address used is the last one, in a
- # "forward" deployment the first IP address is used.
- deployment: reverse
- # Header name where the actual IP address will be reported, if more
- # than one IP address is present, the last IP address will be the
- # one taken into consideration.
- header: X-Forwarded-For
+ # for further options see:
+ # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#alert-output-for-use-with-barnyard2-unified2-alert
# a line based log of HTTP requests (no alerts)
- http-log:
@@ -285,6 +321,7 @@ outputs:
append: yes
#extended: yes # enable this for extended logging information
#custom: yes # enabled the custom logging format (defined by customformat)
+ #customformat: ""
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
# a line based log of TLS handshake parameters (no alerts)
@@ -294,6 +331,7 @@ outputs:
append: yes
#extended: yes # Log extended information like fingerprint
#custom: yes # enabled the custom logging format (defined by customformat)
+ #customformat: ""
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
# output TLS transaction where the session is resumed using a
# session id
@@ -304,14 +342,6 @@ outputs:
enabled: no
#certs-log-dir: certs # directory to store the certificates files
- # a line based log of DNS requests and/or replies (no alerts)
- # Note: not available when Rust is enabled (--enable-rust).
- - dns-log:
- enabled: no
- filename: dns.log
- append: yes
- #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
# Packet log... log packets in pcap format. 3 modes of operation: "normal"
# "multi" and "sguil".
#
@@ -382,7 +412,7 @@ outputs:
append: yes
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
- # alert output to prelude (http://www.prelude-technologies.com/) only
+ # alert output to prelude (https://www.prelude-siem.org/) only
# available if Suricata has been compiled with --enable-prelude
- alert-prelude:
enabled: no
@@ -397,7 +427,7 @@ outputs:
append: yes # append to file (yes) or overwrite it (no)
totals: yes # stats for all threads merged together
threads: no # per thread stats
- #null-values: yes # print counters that have value 0
+ null-values: yes # print counters that have value 0
# a line based alerts log similar to fast.log into syslog
- syslog:
@@ -409,12 +439,11 @@ outputs:
#level: Info ## possible levels: Emergency, Alert, Critical,
## Error, Warning, Notice, Info, Debug
- # a line based information for dropped packets in IPS mode
+ # deprecated a line based information for dropped packets in IPS mode
- drop:
enabled: no
- filename: drop.log
- append: yes
- #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+ # further options documented at:
+ # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#drop-log-a-line-based-information-for-dropped-packets
# Output module for storing files on disk. Files are stored in a
# directory names consisting of the first 2 characters of the
@@ -481,58 +510,18 @@ outputs:
# one taken into consideration.
header: X-Forwarded-For
- # output module to store extracted files to disk (old style, deprecated)
- #
- # The files are stored to the log-dir in a format "file." where is
- # an incrementing number starting at 1. For each file "file." a meta
- # file "file..meta" is created. Before they are finalized, they will
- # have a ".tmp" suffix to indicate that they are still being processed.
- #
- # If include-pid is yes, then the files are instead "file..", with
- # meta files named as "file...meta"
- #
- # File extraction depends on a lot of things to be fully done:
- # - file-store stream-depth. For optimal results, set this to 0 (unlimited)
- # - http request / response body sizes. Again set to 0 for optimal results.
- # - rules that contain the "filestore" keyword.
+ # deprecated - file-store v1
- file-store:
- enabled: no # set to yes to enable
- log-dir: files # directory to store the files
- force-magic: no # force logging magic on all stored files
- # force logging of checksums, available hash functions are md5,
- # sha1 and sha256
- #force-hash: [md5]
- force-filestore: no # force storing of all files
- # override global stream-depth for sessions in which we want to
- # perform file extraction. Set to 0 for unlimited.
- #stream-depth: 0
- #waldo: file.waldo # waldo file to store the file_id across runs
- # uncomment to disable meta file writing
- #write-meta: no
- # uncomment the following variable to define how many files can
- # remain open for filestore by Suricata. Default value is 0 which
- # means files get closed after each write
- #max-open-files: 1000
- include-pid: no # set to yes to include pid in file names
-
- # output module to log files tracked in a easily parsable JSON format
- - file-log:
enabled: no
- filename: files-json.log
- append: yes
- #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
- force-magic: no # force logging magic on all logged files
- # force logging of checksums, available hash functions are md5,
- # sha1 and sha256
- #force-hash: [md5]
+ # further options documented at:
+ # https://suricata.readthedocs.io/en/suricata-5.0.0/file-extraction/file-extraction.html#file-store-version-1
# Log TCP data after stream normalization
# 2 types: file or dir. File logs into a single logfile. Dir creates
# 2 files per TCP session and stores the raw TCP data into them.
# Using 'both' will enable both file and dir modes.
#
- # Note: limited by stream.depth
+ # Note: limited by stream.reassembly.depth
- tcp-data:
enabled: no
type: file
@@ -591,10 +580,14 @@ logging:
- file:
enabled: yes
level: info
- filename: /var/log/suricata/suricata.log
+ filename: suricata.log
# type: json
- syslog:
enabled: no
+ facility: local5
+ format: "[%i] <%d> -- "
+ # type: json
+
##
## Step 4: configure common capture settings
@@ -613,16 +606,11 @@ af-packet:
# Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
# This is only supported for Linux kernel > 3.1
# possible value are:
- # * cluster_round_robin: round robin load balancing
# * cluster_flow: all packets of a given flow are send to the same socket
# * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket
# * cluster_qm: all packets linked by network card to a RSS queue are sent to the same
# socket. Requires at least Linux 3.14.
- # * cluster_random: packets are sent randomly to sockets but with an equipartition.
- # Requires at least Linux 3.14.
- # * cluster_rollover: kernel rotates between sockets filling each socket before moving
- # to the next. Requires at least Linux 3.10.
- # * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture/ebpf-xdt.rst for
+ # * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for
# more info.
# Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
# with capture card using RSS (require cpu affinity tuning and system irq tuning)
@@ -630,12 +618,8 @@ af-packet:
# In some fragmentation case, the hash can not be computed. If "defrag" is set
# to yes, the kernel will do the needed defragmentation before sending the packets.
defrag: yes
- # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is
- # full then kernel will send the packet on the next socket with room available. This option
- # can minimize packet drop and increase the treated bandwidth on single intensive flow.
- #rollover: yes
# To use the ring feature of AF_PACKET, set 'use-mmap' to yes
- #use-mmap: yes
+ use-mmap: yes
# Lock memory map to avoid it goes to swap. Be careful that over subscribing could lock
# your system
#mmap-locked: yes
@@ -683,14 +667,13 @@ af-packet:
#copy-mode: ips
#copy-iface: eth1
# For eBPF and XDP setup including bypass, filter and load balancing, please
- # see doc/userguide/capture/ebpf-xdt.rst for more info.
+ # see doc/userguide/capture-hardware/ebpf-xdp.rst for more info.
# Put default values here. These will be used for an interface that is not
# in the list above.
- interface: default
#threads: auto
#use-mmap: no
- #rollover: yes
#tpacket-v3: yes
# Cross platform libpcap capture support
@@ -753,6 +736,8 @@ app-layer:
protocols:
krb5:
enabled: yes
+ snmp:
+ enabled: yes
ikev2:
enabled: yes
tls:
@@ -760,8 +745,9 @@ app-layer:
detection-ports:
dp: 443
- # Generate JA3 fingerprint from client hello
- ja3-fingerprints: yes
+ # Generate JA3 fingerprint from client hello. If not specified it
+ # will be disabled by default, but enabled if rules require it.
+ #ja3-fingerprints: auto
# What to do when the encrypted communications start:
# - default: keep tracking TLS session, check for protocol anomalies,
@@ -775,17 +761,21 @@ app-layer:
#
# For best performance, select 'bypass'.
#
- #encrypt-handling: default
+ #encryption-handling: default
dcerpc:
enabled: yes
ftp:
enabled: yes
# memcap: 64mb
+ # RDP, disabled by default.
+ rdp:
+ #enabled: no
ssh:
enabled: yes
smtp:
enabled: yes
+ raw-extraction: no
# Configure SMTP-MIME Decoder
mime:
# Decode MIME messages from SMTP transactions
@@ -814,10 +804,6 @@ app-layer:
content-inspect-window: 4096
imap:
enabled: detection-only
- msn:
- enabled: detection-only
- # Note: --enable-rust is required for full SMB1/2 support. W/o rust
- # only minimal SMB1 support is available.
smb:
enabled: yes
detection-ports:
@@ -826,8 +812,6 @@ app-layer:
# Stream reassembly size for SMB streams. By default track it completely.
#stream-depth: 0
- # Note: NFS parser depends on Rust support: pass --enable-rust
- # to configure.
nfs:
enabled: yes
tftp:
@@ -851,7 +835,8 @@ app-layer:
dp: 53
http:
enabled: yes
- # memcap: 64mb
+ # memcap: Maximum memory capacity for http
+ # Default is unlimited, value can be such as 64mb
# default-config: Used when no server-config matches
# personality: List of personalities used by default
@@ -859,37 +844,15 @@ app-layer:
# by http_client_body & pcre /P option.
# response-body-limit: Limit reassembly of response body for inspection
# by file_data, http_server_body & pcre /Q option.
- # double-decode-path: Double decode path section of the URI
- # double-decode-query: Double decode query section of the URI
- # response-body-decompress-layer-limit:
- # Limit to how many layers of compression will be
- # decompressed. Defaults to 2.
#
+ # For advanced options, see the user guide
+
+
# server-config: List of server configurations to use if address matches
# address: List of IP addresses or networks for this block
# personalitiy: List of personalities used by this block
- # request-body-limit: Limit reassembly of request body for inspection
- # by http_client_body & pcre /P option.
- # response-body-limit: Limit reassembly of response body for inspection
- # by file_data, http_server_body & pcre /Q option.
- # double-decode-path: Double decode path section of the URI
- # double-decode-query: Double decode query section of the URI
#
- # uri-include-all: Include all parts of the URI. By default the
- # 'scheme', username/password, hostname and port
- # are excluded. Setting this option to true adds
- # all of them to the normalized uri as inspected
- # by http_uri, urilen, pcre with /U and the other
- # keywords that inspect the normalized uri.
- # Note that this does not affect http_raw_uri.
- # Also, note that including all was the default in
- # 1.4 and 2.0beta1.
- #
- # meta-field-limit: Hard size limit for request and response size
- # limits. Applies to request line and headers,
- # response line and headers. Does not apply to
- # request or response bodies. Default is 18k.
- # If this limit is reached an event is raised.
+ # Then, all the fields from default-config can be overloaded
#
# Currently Available Personalities:
# Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
@@ -943,6 +906,15 @@ app-layer:
double-decode-path: no
double-decode-query: no
+ # Can disable LZMA decompression
+ #lzma-enabled: yes
+ # Memory limit usage for LZMA decompression dictionary
+ # Data is decompressed until dictionary reaches this size
+ #lzma-memlimit: 1mb
+ # Maximum decompressed size with a compression ratio
+ # above 2048 (only LZMA can reach this ratio, deflate cannot)
+ #compression-bomb-limit: 1mb
+
server-config:
#- apache:
@@ -1002,13 +974,16 @@ app-layer:
dp: 44818
sp: 44818
- # Note: parser depends on Rust support
ntp:
enabled: yes
dhcp:
enabled: yes
+ # SIP, disabled by default.
+ sip:
+ #enabled: no
+
# Limit for the maximum number of asn1 frames to decode (default 256)
asn1-max-frames: 256
@@ -1024,9 +999,9 @@ asn1-max-frames: 256
##
# Run suricata as user and group.
-#run-as:
-# user: suri
-# group: suri
+run-as:
+ user: suricata
+ group: suricata
# Some logging module will use that name in event as identifier. The default
# value is the hostname
@@ -1069,29 +1044,26 @@ host-mode: auto
# Number of packets preallocated per thread. The default is 1024. A higher number
# will make sure each CPU will be more easily kept busy, but may negatively
# impact caching.
-#max-pending-packets: 1024
+max-pending-packets: 5000
# Runmode the engine should use. Please check --list-runmodes to get the available
-# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned
-# load balancing).
+# runmodes for each packet acquisition method. Default depends on selected capture
+# method. 'workers' generally gives best performance.
runmode: workers
# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
#
# Supported schedulers are:
#
-# round-robin - Flows assigned to threads in a round robin fashion.
-# active-packets - Flows assigned to threads that have the lowest number of
-# unprocessed packets (default).
-# hash - Flow allocated using the address hash. More of a random
-# technique. Was the default in Suricata 1.2.1 and older.
+# hash - Flow assigned to threads using the 5-7 tuple hash.
+# ippair - Flow assigned to threads using addresses only.
#
-#autofp-scheduler: active-packets
+#autofp-scheduler: hash
# Preallocated size for packet. Default is 1514 which is the classical
# size for pcap on ethernet. You should adjust this value to the highest
# packet size (MTU + hardware header) on your system.
-#default-packet-size: 1514
+default-packet-size: {{ MTU + 15 }}
# Unix command socket can be used to pass commands to Suricata.
# An external tool can then connect to get information from Suricata
@@ -1107,6 +1079,10 @@ unix-command:
#magic-file: /usr/share/file/magic
#magic-file:
+# GeoIP2 database file. Specify path and filename of GeoIP2 database
+# if using rules with "geoip" rule option.
+#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb
+
legacy:
uricontent: enabled
@@ -1300,7 +1276,9 @@ flow-timeouts:
# inline: no # stream inline mode
# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine
# max-synack-queued: 5 # Max different SYN/ACKs to queue
-# bypass: no # Bypass packets when stream.depth is reached
+# bypass: no # Bypass packets when stream.reassembly.depth is reached.
+# # Warning: first side to reach this triggers
+# # the bypass.
#
# reassembly:
# memcap: 64mb # Can be specified in kb, mb, gb. Just a number
@@ -1373,9 +1351,22 @@ host:
decoder:
# Teredo decoder is known to not be completely accurate
- # it will sometimes detect non-teredo as teredo.
+ # as it will sometimes detect non-teredo as teredo.
teredo:
enabled: true
+ # ports to look for Teredo. Max 4 ports. If no ports are given, or
+ # the value is set to 'any', Teredo detection runs on _all_ UDP packets.
+ ports: $TEREDO_PORTS # syntax: '[3544, 1234]' or '3533' or 'any'.
+
+ # VXLAN decoder is assigned to up to 4 UDP ports. By default only the
+ # IANA assigned port 4789 is enabled.
+ vxlan:
+ enabled: true
+ ports: $VXLAN_PORTS # syntax: '8472, 4789'
+ # ERSPAN Type I decode support
+ erspan:
+ typeI:
+ enabled: false
##
@@ -1484,19 +1475,26 @@ threading:
{%- if salt['pillar.get']('sensor:suriprocs') %}
cpu-affinity:
- management-cpu-set:
- cpu: [ all ] # include only these cpus in affinity settings
+ cpu: [ all ] # include only these CPUs in affinity settings
- receive-cpu-set:
- cpu: [ all ] # include only these cpus in affinity settings
+ cpu: [ all ] # include only these CPUs in affinity settings
- worker-cpu-set:
cpu: [ "all" ]
mode: "exclusive"
# Use explicitely 3 threads and don't compute number by using
# detect-thread-ratio variable:
+ # threads: 3
threads: {{ salt['pillar.get']('sensor:suriprocs') }}
prio:
- default: "medium"
- {% endif %}
-
+ low: [ 0 ]
+ medium: [ "1-2" ]
+ high: [ 3 ]
+ default: "high"
+ #- verdict-cpu-set:
+ # cpu: [ 0 ]
+ # prio:
+ # default: "high"
+ {%- endif -%}
{%- if salt['pillar.get']('sensor:suripins') %}
cpu-affinity:
- management-cpu-set:
@@ -1512,6 +1510,8 @@ threading:
prio:
default: "high"
{% endif %}
+
+ #
# By default Suricata creates one "detect" thread per available CPU/CPU core.
# This setting allows controlling this behaviour. A ratio setting of 2 will
# create 2 detect threads for each CPU/CPU core. So for a dual core CPU this
@@ -1545,7 +1545,7 @@ profiling:
# Profiling can be disabled here, but it will still have a
# performance impact if compiled in.
- enabled: no
+ enabled: yes
filename: rule_perf.log
append: yes
@@ -1668,7 +1668,7 @@ capture:
# Netmap support
#
-# Netmap operates with NIC directly in driver, so you need FreeBSD which have
+# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which have
# built-in netmap support or compile and install netmap module and appropriate
# NIC driver on your Linux system.
# To reach maximum throughput disable all receive-, segmentation-,
@@ -1680,7 +1680,9 @@ capture:
netmap:
# To specify OS endpoint add plus sign at the end (e.g. "eth0+")
- interface: eth2
- # Number of receive threads. "auto" uses number of RSS queues on interface.
+ # Number of capture threads. "auto" uses number of RSS queues on interface.
+ # Warning: unless the RSS hashing is symmetrical, this will lead to
+ # accuracy issues.
#threads: auto
# You can use the following variables to activate netmap tap or IPS mode.
# If copy-mode is set to ips or tap, the traffic coming to the current
@@ -1793,45 +1795,63 @@ napatech:
# (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
# This may be enabled when sharing streams with another application.
# Otherwise, it should be turned off.
- hba: -1
+ #hba: -1
- # use_all_streams set to "yes" will query the Napatech service for all configured
- # streams and listen on all of them. When set to "no" the streams config array
- # will be used.
- use-all-streams: yes
+ # When use_all_streams is set to "yes" the initialization code will query
+ # the Napatech service for all configured streams and listen on all of them.
+ # When set to "no" the streams config array will be used.
+ #
+ # This option necessitates running the appropriate NTPL commands to create
+ # the desired streams prior to running suricata.
+ #use-all-streams: no
- # The streams to listen on. This can be either:
- # a list of individual streams (e.g. streams: [0,1,2,3])
+ # The streams to listen on when auto-config is disabled or when and threading
+ # cpu-affinity is disabled. This can be either:
+ # an individual stream (e.g. streams: [0])
# or
# a range of streams (e.g. streams: ["0-3"])
+ #
streams: ["0-3"]
-# Tilera mpipe configuration. for use on Tilera TILE-Gx.
-mpipe:
+ # When auto-config is enabled the streams will be created and assigned
+ # automatically to the NUMA node where the thread resides. If cpu-affinity
+ # is enabled in the threading section. Then the streams will be created
+ # according to the number of worker threads specified in the worker cpu set.
+ # Otherwise, the streams array is used to define the streams.
+ #
+ # This option cannot be used simultaneous with "use-all-streams".
+ #
+ auto-config: yes
- # Load balancing modes: "static", "dynamic", "sticky", or "round-robin".
- load-balance: dynamic
+ # Ports indicates which napatech ports are to be used in auto-config mode.
+ # these are the port ID's of the ports that will be merged prior to the
+ # traffic being distributed to the streams.
+ #
+ # This can be specified in any of the following ways:
+ #
+ # a list of individual ports (e.g. ports: [0,1,2,3])
+ #
+ # a range of ports (e.g. ports: [0-3])
+ #
+ # "all" to indicate that all ports are to be merged together
+ # (e.g. ports: [all])
+ #
+ # This has no effect if auto-config is disabled.
+ #
+ ports: [all]
- # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536
- iqueue-packets: 2048
-
- # List of interfaces we will listen on.
- inputs:
- - interface: xgbe2
- - interface: xgbe3
- - interface: xgbe4
-
-
- # Relative weight of memory for packets of each mPipe buffer size.
- stack:
- size128: 0
- size256: 9
- size512: 0
- size1024: 0
- size1664: 7
- size4096: 0
- size10386: 0
- size16384: 0
+ # When auto-config is enabled the hashmode specifies the algorithm for
+ # determining to which stream a given packet is to be delivered.
+ # This can be any valid Napatech NTPL hashmode command.
+ #
+ # The most common hashmode commands are: hash2tuple, hash2tuplesorted,
+ # hash5tuple, hash5tuplesorted and roundrobin.
+ #
+ # See Napatech NTPL documentation other hashmodes and details on their use.
+ #
+ # This has no effect if auto-config is disabled.
+ #
+ hashmode: hash5tuplesorted
##
## Configure Suricata to load Suricata-Update managed rules.
@@ -1841,77 +1861,9 @@ mpipe:
##
default-rule-path: /etc/suricata/rules
+
rule-files:
- - all.rules
-
-##
-## Advanced rule file configuration.
-##
-## If this section is completely commented out then your configuration
-## is setup for suricata-update as it was most likely bundled and
-## installed with Suricata.
-##
-
-#default-rule-path: /var/lib/suricata/rules
-
-#rule-files:
-# - botcc.rules
-# # - botcc.portgrouped.rules
-# - ciarmy.rules
-# - compromised.rules
-# - drop.rules
-# - dshield.rules
-## - emerging-activex.rules
-# - emerging-attack_response.rules
-# - emerging-chat.rules
-# - emerging-current_events.rules
-# - emerging-dns.rules
-# - emerging-dos.rules
-# - emerging-exploit.rules
-# - emerging-ftp.rules
-## - emerging-games.rules
-## - emerging-icmp_info.rules
-## - emerging-icmp.rules
-# - emerging-imap.rules
-## - emerging-inappropriate.rules
-## - emerging-info.rules
-# - emerging-malware.rules
-# - emerging-misc.rules
-# - emerging-mobile_malware.rules
-# - emerging-netbios.rules
-# - emerging-p2p.rules
-# - emerging-policy.rules
-# - emerging-pop3.rules
-# - emerging-rpc.rules
-## - emerging-scada.rules
-## - emerging-scada_special.rules
-# - emerging-scan.rules
-## - emerging-shellcode.rules
-# - emerging-smtp.rules
-# - emerging-snmp.rules
-# - emerging-sql.rules
-# - emerging-telnet.rules
-# - emerging-tftp.rules
-# - emerging-trojan.rules
-# - emerging-user_agents.rules
-# - emerging-voip.rules
-# - emerging-web_client.rules
-# - emerging-web_server.rules
-## - emerging-web_specific_apps.rules
-# - emerging-worm.rules
-# - tor.rules
-## - decoder-events.rules # available in suricata sources under rules dir
-## - stream-events.rules # available in suricata sources under rules dir
-# - http-events.rules # available in suricata sources under rules dir
-# - smtp-events.rules # available in suricata sources under rules dir
-# - dns-events.rules # available in suricata sources under rules dir
-# - tls-events.rules # available in suricata sources under rules dir
-## - modbus-events.rules # available in suricata sources under rules dir
-## - app-layer-events.rules # available in suricata sources under rules dir
-## - dnp3-events.rules # available in suricata sources under rules dir
-## - ntp-events.rules # available in suricata sources under rules dir
-## - ipsec-events.rules # available in suricata sources under rules dir
-## - kerberos-events.rules # available in suricata sources under rules dir
+ - all.rules
##
## Auxiliary configuration files.
diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls
index 39f419ad0..0f3d49bc3 100644
--- a/salt/suricata/init.sls
+++ b/salt/suricata/init.sls
@@ -55,6 +55,12 @@ surilogdir:
- user: 940
- group: 939
+suridatadir:
+ file.directory:
+ - name: /nsm/suricata
+ - user: 940
+ - group: 939
+
surirulesync:
file.recurse:
- name: /opt/so/conf/suricata/rules/
@@ -119,6 +125,7 @@ so-suricata:
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
- /opt/so/log/suricata/:/var/log/suricata/:rw
+ - /nsm/suricata/:/nsm/:rw
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
- network_mode: host
- watch:
diff --git a/salt/suricata/master.sls b/salt/suricata/master.sls
new file mode 100644
index 000000000..5998a484b
--- /dev/null
+++ b/salt/suricata/master.sls
@@ -0,0 +1,19 @@
+surilocaldir:
+ file.directory:
+ - name: /opt/so/saltstack/local/salt/suricata
+ - user: socore
+ - group: socore
+ - makedirs: True
+
+ruleslink:
+ file.symlink:
+ - name: /opt/so/saltstack/local/salt/suricata/rules
+ - user: socore
+ - group: socore
+ - target: /opt/so/rules/nids
+
+refresh_salt_master_fileserver_suricata_ruleslink:
+ salt.runner:
+ - name: fileserver.update
+ - onchanges:
+ - file: ruleslink
\ No newline at end of file
diff --git a/salt/hive/thehive/etc/application.conf b/salt/thehive/etc/application.conf
similarity index 99%
rename from salt/hive/thehive/etc/application.conf
rename to salt/thehive/etc/application.conf
index 230d87d67..8630cb386 100644
--- a/salt/hive/thehive/etc/application.conf
+++ b/salt/thehive/etc/application.conf
@@ -12,7 +12,7 @@ search {
# Name of the index
index = the_hive
# Name of the Elasticsearch cluster
- cluster = hive
+ cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
#search.uri = "http://{{ MASTERIP }}:9500"
diff --git a/salt/hive/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf
similarity index 99%
rename from salt/hive/thehive/etc/cortex-application.conf
rename to salt/thehive/etc/cortex-application.conf
index 356bfd7b3..1a887cdb3 100644
--- a/salt/hive/thehive/etc/cortex-application.conf
+++ b/salt/thehive/etc/cortex-application.conf
@@ -12,7 +12,7 @@ search {
# Name of the index
index = cortex
# Name of the Elasticsearch cluster
- cluster = hive
+ cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
# Scroll keepalive
diff --git a/salt/hive/thehive/etc/es/elasticsearch.yml b/salt/thehive/etc/es/elasticsearch.yml
similarity index 95%
rename from salt/hive/thehive/etc/es/elasticsearch.yml
rename to salt/thehive/etc/es/elasticsearch.yml
index d00c01d5d..7f268a671 100644
--- a/salt/hive/thehive/etc/es/elasticsearch.yml
+++ b/salt/thehive/etc/es/elasticsearch.yml
@@ -1,4 +1,4 @@
-cluster.name: "hive"
+cluster.name: "thehive"
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.
diff --git a/salt/hive/thehive/etc/es/log4j2.properties b/salt/thehive/etc/es/log4j2.properties
similarity index 100%
rename from salt/hive/thehive/etc/es/log4j2.properties
rename to salt/thehive/etc/es/log4j2.properties
diff --git a/salt/hive/init.sls b/salt/thehive/init.sls
similarity index 70%
rename from salt/hive/init.sls
rename to salt/thehive/init.sls
index 2be2f7480..732fe4a77 100644
--- a/salt/hive/init.sls
+++ b/salt/thehive/init.sls
@@ -1,24 +1,24 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
-hiveconfdir:
+thehiveconfdir:
file.directory:
- - name: /opt/so/conf/hive/etc
+ - name: /opt/so/conf/thehive/etc
- makedirs: True
- user: 939
- group: 939
-hivelogdir:
+thehivelogdir:
file.directory:
- - name: /opt/so/log/hive
+ - name: /opt/so/log/thehive
- makedirs: True
- user: 939
- group: 939
-hiveconf:
+thehiveconf:
file.recurse:
- - name: /opt/so/conf/hive/etc
- - source: salt://hive/thehive/etc
+ - name: /opt/so/conf/thehive/etc
+ - source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -40,7 +40,7 @@ cortexlogdir:
cortexconf:
file.recurse:
- name: /opt/so/conf/cortex
- - source: salt://hive/thehive/etc
+ - source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -48,9 +48,9 @@ cortexconf:
# Install Elasticsearch
# Made directory for ES data to live in
-hiveesdata:
+thehiveesdata:
file.directory:
- - name: /nsm/hive/esdata
+ - name: /nsm/thehive/esdata
- makedirs: True
- user: 939
- group: 939
@@ -64,16 +64,16 @@ so-thehive-es:
- interactive: True
- tty: True
- binds:
- - /nsm/hive/esdata:/usr/share/elasticsearch/data:rw
- - /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- - /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- - /opt/so/log/hive:/var/log/elasticsearch:rw
+ - /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw
+ - /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
+ - /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
+ - /opt/so/log/thehive:/var/log/elasticsearch:rw
- environment:
- http.host=0.0.0.0
- http.port=9400
- transport.tcp.port=9500
- transport.host=0.0.0.0
- - cluster.name=hive
+ - cluster.name=thehive
- thread_pool.index.queue_size=100000
- thread_pool.search.queue_size=100000
- thread_pool.bulk.queue_size=100000
@@ -90,13 +90,13 @@ so-cortex:
- name: so-cortex
- user: 939
- binds:
- - /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
+ - /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9001:9001
cortexscript:
cmd.script:
- - source: salt://hive/thehive/scripts/cortex_init
+ - source: salt://thehive/scripts/cortex_init
- cwd: /opt/so
- template: jinja
@@ -109,12 +109,12 @@ so-thehive:
- name: so-thehive
- user: 939
- binds:
- - /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro
+ - /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9000:9000
-hivescript:
+thehivescript:
cmd.script:
- - source: salt://hive/thehive/scripts/hive_init
+ - source: salt://thehive/scripts/hive_init
- cwd: /opt/so
- template: jinja
diff --git a/salt/hive/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init
similarity index 96%
rename from salt/hive/thehive/scripts/cortex_init
rename to salt/thehive/scripts/cortex_init
index 786039bf1..063ae498d 100644
--- a/salt/hive/thehive/scripts/cortex_init
+++ b/salt/thehive/scripts/cortex_init
@@ -7,6 +7,8 @@
{%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %}
{%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
+default_salt_dir=/opt/so/saltstack/default
+
cortex_init(){
sleep 60
CORTEX_IP="{{MASTERIP}}"
@@ -17,7 +19,7 @@ cortex_init(){
CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup"
CORTEX_ORG_USER="{{CORTEXORGUSER}}"
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
- SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
+ SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf"
# Migrate DB
diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init
new file mode 100755
index 000000000..296004e77
--- /dev/null
+++ b/salt/thehive/scripts/hive_init
@@ -0,0 +1,64 @@
+#!/bin/bash
+{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
+{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
+{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
+
+thehive_init(){
+ sleep 120
+ THEHIVE_IP="{{MASTERIP}}"
+ THEHIVE_USER="{{THEHIVEUSER}}"
+ THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
+ THEHIVE_KEY="{{THEHIVEKEY}}"
+ SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
+
+ echo -n "Waiting for TheHive..."
+ COUNT=0
+ THEHIVE_CONNECTED="no"
+ while [[ "$COUNT" -le 240 ]]; do
+ curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive"
+ if [ $? -eq 0 ]; then
+ THEHIVE_CONNECTED="yes"
+ echo "connected!"
+ break
+ else
+ ((COUNT+=1))
+ sleep 1
+ echo -n "."
+ fi
+ done
+
+ if [ "$THEHIVE_CONNECTED" == "yes" ]; then
+
+ # Migrate DB
+ curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
+
+ # Create intial TheHive user
+ curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
+
+ # Pre-load custom fields
+ #
+ # reputation
+ curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
+
+
+ touch /opt/so/state/thehive.txt
+ else
+ echo "We experienced an issue connecting to TheHive!"
+ fi
+}
+
+if [ -f /opt/so/state/thehive.txt ]; then
+ exit 0
+else
+ rm -f garbage_file
+ while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
+ do
+ echo "Waiting for Elasticsearch..."
+ rm -f garbage_file
+ sleep 1
+ done
+ rm -f garbage_file
+ sleep 5
+ thehive_init
+fi
diff --git a/salt/top.sls b/salt/top.sls
index 95acae1fd..52beeeae9 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -30,6 +30,7 @@ base:
- telegraf
- firewall
- idstools
+ - suricata.master
- pcap
- suricata
- zeek
@@ -73,6 +74,7 @@ base:
- soc
- firewall
- idstools
+ - suricata.master
- healthcheck
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
@@ -100,7 +102,7 @@ base:
- schedule
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -129,6 +131,7 @@ base:
- firewall
- master
- idstools
+ - suricata.master
- redis
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
@@ -149,11 +152,14 @@ base:
{%- endif %}
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
{%- endif %}
+ {%- if NAVIGATOR != 0 %}
+ - navigator
+ {%- endif %}
{%- if FREQSERVER != 0 %}
- freqserver
{%- endif %}
@@ -174,6 +180,7 @@ base:
- soc
- firewall
- idstools
+ - suricata.master
- healthcheck
- redis
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
@@ -203,7 +210,7 @@ base:
- schedule
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -256,6 +263,7 @@ base:
- ca
- ssl
- common
+ - nginx
- telegraf
- firewall
{%- if WAZUH != 0 %}
@@ -297,6 +305,7 @@ base:
- firewall
- master
- idstools
+ - suricata.master
- redis
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
@@ -318,7 +327,7 @@ base:
{%- endif %}
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
diff --git a/salt/wazuh/files/wazuh-manager-whitelist b/salt/wazuh/files/wazuh-manager-whitelist
index ab4b15fd0..66dc13cd9 100755
--- a/salt/wazuh/files/wazuh-manager-whitelist
+++ b/salt/wazuh/files/wazuh-manager-whitelist
@@ -1,5 +1,7 @@
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set WAZUH_ENABLED = salt['pillar.get']('static:wazuh', '0') %}
#!/bin/bash
+local_salt_dir=/opt/so/saltstack/local
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
@@ -17,7 +19,7 @@
# along with this program. If not, see .
# Check if Wazuh enabled
-if grep -q -R "wazuh: 1" /opt/so/saltstack/pillar/*; then
+if [ {{ WAZUH_ENABLED }} ]; then
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
if ! grep -q "{{ MASTERIP }}" $WAZUH_MGR_CFG ; then
DATE=`date`
diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls
index 54db40787..c483f07a0 100644
--- a/salt/wazuh/init.sls
+++ b/salt/wazuh/init.sls
@@ -80,11 +80,6 @@ wazuhmgrwhitelist:
- mode: 755
- template: jinja
-wazuhagentservice:
- service.running:
- - name: wazuh-agent
- - enable: True
-
so-wazuh:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-wazuh:{{ VERSION }}
@@ -110,3 +105,8 @@ whitelistmanager:
cmd.run:
- name: /usr/sbin/wazuh-manager-whitelist
- cwd: /
+
+wazuhagentservice:
+ service.running:
+ - name: wazuh-agent
+ - enable: True
diff --git a/salt/zeek/cron/packetloss.sh b/salt/zeek/cron/packetloss.sh
index 51812edf5..c8750dd92 100755
--- a/salt/zeek/cron/packetloss.sh
+++ b/salt/zeek/cron/packetloss.sh
@@ -1,2 +1,2 @@
#!/bin/bash
-/usr/bin/docker exec so-zeek /opt/zeek/bin/zeekctl netstats | awk '{print $(NF-2),$(NF-1),$NF}' | awk -F '[ =]' '{RCVD += $2;DRP += $4;TTL += $6} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/zeek/logs/packetloss.log
+/usr/bin/docker exec so-zeek /opt/zeek/bin/zeekctl netstats | awk '{print $(NF-2),$(NF-1),$NF}' | awk -F '[ =]' '{RCVD += $2;DRP += $4;TTL += $6} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/zeek/logs/packetloss.log 2>&1
diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults
index b5a6258ff..ae4554a3f 100644
--- a/setup/automation/pm_standalone_defaults
+++ b/setup/automation/pm_standalone_defaults
@@ -21,6 +21,8 @@ address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
+ALLOW_CIDR=0.0.0.0/0
+ALLOW_ROLE=a
BASICBRO=7
BASICSURI=7
# BLOGS=
@@ -65,6 +67,7 @@ PLAYBOOK=1
REDIRECTINFO=IP
RULESETUP=ETOPEN
# SHARDCOUNT=
+SKIP_REBOOT=1
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
diff --git a/setup/so-common-functions b/setup/so-common-functions
index 15cb3e686..fc380f85b 100644
--- a/setup/so-common-functions
+++ b/setup/so-common-functions
@@ -38,31 +38,3 @@ calculate_useable_cores() {
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
export lb_procs
}
-
-set_defaul_log_size() {
- local percentage
-
- case $INSTALLTYPE in
- EVAL | HEAVYNODE)
- percentage=50
- ;;
- *)
- percentage=80
- ;;
- esac
-
- local disk_dir="/"
- if [ -d /nsm ]; then
- disk_dir="/nsm"
- fi
- local disk_size_1k
- disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
-
- local ratio="1048576"
-
- local disk_size_gb
- disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
-
- log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
- export log_size_limit
-}
diff --git a/setup/so-functions b/setup/so-functions
index 487b98f0b..fda0398a6 100755
--- a/setup/so-functions
+++ b/setup/so-functions
@@ -56,6 +56,19 @@ add_master_hostfile() {
whiptail_check_exitstatus $exitstatus
}
+addtotab_generate_templates() {
+
+ local addtotab_path=$local_salt_dir/pillar/data
+
+ for i in evaltab mastersearchtab mastertab nodestab sensorstab; do
+ printf '%s\n'\
+ "$i:"\
+ "" > "$addtotab_path"/$i.sls
+ echo "Added $i Template"
+ done
+
+}
+
# $5 => (optional) password variable
so_add_user() {
local username=$1
@@ -116,16 +129,16 @@ add_web_user() {
# Create an secrets pillar so that passwords survive re-install
secrets_pillar(){
- if [ ! -f /opt/so/saltstack/pillar/secrets.sls ]; then
+ if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then
echo "Creating Secrets Pillar" >> "$setup_log" 2>&1
- mkdir -p /opt/so/saltstack/pillar
+ mkdir -p $local_salt_dir/pillar
printf '%s\n'\
"secrets:"\
" mysql: $MYSQLPASS"\
" playbook: $PLAYBOOKPASS"\
" fleet: $FLEETPASS"\
" fleet_jwt: $FLEETJWT"\
- " fleet_enroll-secret: False" > /opt/so/saltstack/pillar/secrets.sls
+ " fleet_enroll-secret: False" > $local_salt_dir/pillar/secrets.sls
fi
}
@@ -193,7 +206,7 @@ check_admin_pass() {
check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH"
}
-check_hive_init_then_reboot() {
+check_hive_init() {
wait_for_file /opt/so/state/thehive.txt 20 5
local return_val=$?
@@ -203,7 +216,6 @@ check_hive_init_then_reboot() {
docker stop so-thehive
docker rm so-thehive
- shutdown -r now
}
check_network_manager_conf() {
@@ -261,7 +273,7 @@ clear_master() {
{
echo "Clearing old master key";
rm -f /etc/salt/pki/minion/minion_master.pub;
- sytemctl -q restart salt-minion;
+ systemctl -q restart salt-minion;
} >> "$setup_log" 2>&1
fi
@@ -355,10 +367,10 @@ configure_minion() {
"mysql.host: '$MAINIP'"\
"mysql.port: 3306"\
"mysql.user: 'root'" >> "$minion_config"
- if [ ! -f /opt/so/saltstack/pillar/secrets.sls ]; then
+ if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then
echo "mysql.pass: '$MYSQLPASS'" >> "$minion_config"
else
- OLDPASS=$(grep "mysql" /opt/so/saltstack/pillar/secrets.sls | awk '{print $2}')
+ OLDPASS=$(grep "mysql" $local_salt_dir/pillar/secrets.sls | awk '{print $2}')
echo "mysql.pass: '$OLDPASS'" >> "$minion_config"
fi
;;
@@ -438,20 +450,20 @@ copy_master_config() {
copy_minion_tmp_files() {
case "$install_type" in
'MASTER' | 'EVAL' | 'HELIXSENSOR' | 'MASTERSEARCH' | 'STANDALONE')
- echo "Copying pillar and salt files in $temp_install_dir to /opt/so/saltstack"
- cp -Rv "$temp_install_dir"/pillar/ /opt/so/saltstack/ >> "$setup_log" 2>&1
+ echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
+ cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
if [ -d "$temp_install_dir"/salt ] ; then
- cp -Rv "$temp_install_dir"/salt/ /opt/so/saltstack/ >> "$setup_log" 2>&1
+ cp -Rv "$temp_install_dir"/salt/ $local_salt_dir/ >> "$setup_log" 2>&1
fi
;;
*)
{
- echo "scp pillar and salt files in $temp_install_dir to master /opt/so/saltstack";
+ echo "scp pillar and salt files in $temp_install_dir to master $local_salt_dir";
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/salt/patch/os/schedules/* soremote@"$MSRV":/tmp/"$MINION_ID"/schedules;
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/salt/master/files/add_minion.sh "$MINION_ID";
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/master/files/add_minion.sh "$MINION_ID";
} >> "$setup_log" 2>&1
;;
esac
@@ -469,6 +481,20 @@ copy_ssh_key() {
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
}
+create_local_directories() {
+ echo "Creating local pillar and salt directories"
+ PILLARSALTDIR=${SCRIPTDIR::-5}
+ for i in "pillar" "salt"; do
+ for d in `find $PILLARSALTDIR/$i -type d`; do
+ suffixdir=${d//$PILLARSALTDIR/}
+ if [ ! -d "$local_salt_dir/$suffixdir" ]; then
+ mkdir -v "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1
+ fi
+ done
+ chown -R socore:socore "$local_salt_dir/$i"
+ done
+
+}
create_sensor_bond() {
echo "Setting up sensor bond" >> "$setup_log" 2>&1
@@ -588,14 +614,18 @@ disable_misc_network_features() {
filter_unused_nics
if [ ${#filtered_nics[@]} -ne 0 ]; then
for unused_nic in "${filtered_nics[@]}"; do
- # Disable DHCPv4/v6 and autoconnect
- nmcli con mod "$unused_nic" \
- ipv4.method disabled \
- ipv6.method ignore \
- connection.autoconnect "no" >> "$setup_log" 2>&1
+ if [ -n "$unused_nic" ]; then
+ echo "Disabling unused NIC: $unused_nic" >> "$setup_log" 2>&1
- # Flush any existing IPs
- ip addr flush "$unused_nic" >> "$setup_log" 2>&1
+ # Disable DHCPv4/v6 and autoconnect
+ nmcli con mod "$unused_nic" \
+ ipv4.method disabled \
+ ipv6.method ignore \
+ connection.autoconnect "no" >> "$setup_log" 2>&1
+
+ # Flush any existing IPs
+ ip addr flush "$unused_nic" >> "$setup_log" 2>&1
+ fi
done
fi
# Disable IPv6
@@ -612,9 +642,9 @@ docker_install() {
{
yum clean expire-cache;
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
- yum -y install docker-ce-19.03.9-3.el7 containerd.io-1.2.6-3.el7;
- yum versionlock docker-ce-19.03.9-3.el7;
- yum versionlock containerd.io-1.2.6-3.el7
+ yum -y install docker-ce-19.03.11-3.el7 containerd.io-1.2.13-3.2.el7;
+ yum versionlock docker-ce-19.03.11-3.el7;
+ yum versionlock containerd.io-1.2.13-3.2.el7
} >> "$setup_log" 2>&1
else
@@ -730,7 +760,7 @@ docker_seed_registry() {
fireeye_pillar() {
- local fireeye_pillar_path=/opt/so/saltstack/pillar/fireeye
+ local fireeye_pillar_path=$local_salt_dir/pillar/fireeye
mkdir -p "$fireeye_pillar_path"
printf '%s\n'\
@@ -744,7 +774,7 @@ fireeye_pillar() {
# Generate Firewall Templates
firewall_generate_templates() {
- local firewall_pillar_path=/opt/so/saltstack/pillar/firewall
+ local firewall_pillar_path=$local_salt_dir/pillar/firewall
mkdir -p "$firewall_pillar_path"
for i in analyst beats_endpoint forward_nodes masterfw minions osquery_endpoint search_nodes wazuh_endpoint
@@ -808,7 +838,7 @@ get_minion_type() {
'HELIXSENSOR')
minion_type='helix'
;;
- '*NODE')
+ *'NODE')
minion_type='node'
;;
esac
@@ -897,7 +927,7 @@ master_pillar() {
}
master_static() {
- local static_pillar="/opt/so/saltstack/pillar/static.sls"
+ local static_pillar="$local_salt_dir/pillar/static.sls"
# Create a static file for global values
printf '%s\n'\
@@ -995,54 +1025,6 @@ node_pillar() {
cat "$pillar_file" >> "$setup_log" 2>&1
}
-parse_options() {
- case "$1" in
- --turbo=*)
- local proxy
- proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}')
- proxy_url="http://$proxy"
- TURBO="$proxy_url"
- ;;
- --proxy=*)
- local proxy
- proxy=$(echo "$1" | tr -d '"' | awk -F'--proxy=' '{print $2}')
-
- local proxy_protocol
- proxy_protocol=$(echo "$proxy" | awk 'match($0, /http|https/) { print substr($0, RSTART, RLENGTH) }')
-
- if [[ ! $proxy_protocol =~ ^(http|https)$ ]]; then
- echo "Invalid proxy protocol"
- echo "Ignoring proxy"
- return
- fi
-
- if [[ $2 == --proxy-user=* && $3 == --proxy-pass=* ]]; then
- local proxy_user
- local proxy_password
- proxy_user=$(echo "$2" | tr -d '"' | awk -F'--proxy-user=' '{print $2}')
- proxy_password=$(echo "$3" | tr -d '"' | awk -F'--proxy-pass=' '{print $2}')
-
- local proxy_addr
- proxy_addr=$(echo "$proxy" | awk -F'http\:\/\/|https\:\/\/' '{print $2}')
-
- export http_proxy="${proxy_protocol}://${proxy_user}:${proxy_password}@${proxy_addr}"
-
- elif [[ (-z $2 || -z $3) && (-n $2 || -n $3) || ( -n $2 && -n $3 && ($2 != --proxy-user=* || $3 != --proxy-pass=*) ) ]]; then
- echo "Invalid options passed for proxy. Order is --proxy-user= --proxy-pass="
- echo "Ignoring proxy"
- return
-
- else
- export http_proxy="$proxy"
- fi
-
- export {https,ftp,rsync,all}_proxy="$http_proxy"
- ;;
- *)
- echo "Invalid option"
- esac
-}
-
patch_pillar() {
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
@@ -1276,7 +1258,7 @@ salt_checkin() {
# Run a salt command to generate the minion key
salt_firstcheckin() {
- salt-call state.show_top >> /dev/null # send output to /dev/null because we don't actually care about the ouput
+ salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
}
set_base_heapsizes() {
@@ -1290,16 +1272,18 @@ set_main_ip() {
setup_salt_master_dirs() {
# Create salt paster directories
- mkdir -p /opt/so/saltstack/salt
- mkdir -p /opt/so/saltstack/pillar
+ mkdir -p $default_salt_dir/pillar
+ mkdir -p $default_salt_dir/salt
+ mkdir -p $local_salt_dir/pillar
+ mkdir -p $local_salt_dir/salt
# Copy over the salt code and templates
if [ "$setup_type" = 'iso' ]; then
- rsync -avh --exclude 'TRANS.TBL' /home/onion/SecurityOnion/pillar/* /opt/so/saltstack/pillar/ >> "$setup_log" 2>&1
- rsync -avh --exclude 'TRANS.TBL' /home/onion/SecurityOnion/salt/* /opt/so/saltstack/salt/ >> "$setup_log" 2>&1
+ rsync -avh --exclude 'TRANS.TBL' /home/onion/SecurityOnion/pillar/* $default_salt_dir/pillar/ >> "$setup_log" 2>&1
+ rsync -avh --exclude 'TRANS.TBL' /home/onion/SecurityOnion/salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1
else
- cp -R ../pillar/* /opt/so/saltstack/pillar/ >> "$setup_log" 2>&1
- cp -R ../salt/* /opt/so/saltstack/salt/ >> "$setup_log" 2>&1
+ cp -R ../pillar/* $default_salt_dir/pillar/ >> "$setup_log" 2>&1
+ cp -R ../salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1
fi
echo "Chown the salt dirs on the master for socore" >> "$setup_log" 2>&1
@@ -1372,6 +1356,33 @@ sensor_pillar() {
cat "$pillar_file" >> "$setup_log" 2>&1
}
+set_default_log_size() {
+ local percentage
+
+ case $INSTALLTYPE in
+ EVAL | HEAVYNODE)
+ percentage=50
+ ;;
+ *)
+ percentage=80
+ ;;
+ esac
+
+ local disk_dir="/"
+ if [ -d /nsm ]; then
+ disk_dir="/nsm"
+ fi
+ local disk_size_1k
+ disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
+
+ local ratio="1048576"
+
+ local disk_size_gb
+ disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
+
+ log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
+}
+
set_hostname() {
set_hostname_iso
@@ -1399,49 +1410,49 @@ set_initial_firewall_policy() {
set_main_ip
- if [ -f /opt/so/saltstack/pillar/data/addtotab.sh ]; then chmod +x /opt/so/saltstack/pillar/data/addtotab.sh; fi
- if [ -f /opt/so/saltstack/pillar/firewall/addfirewall.sh ]; then chmod +x /opt/so/saltstack/pillar/firewall/addfirewall.sh; fi
+ if [ -f $default_salt_dir/pillar/data/addtotab.sh ]; then chmod +x $default_salt_dir/pillar/data/addtotab.sh; fi
+ if [ -f $default_salt_dir/pillar/firewall/addfirewall.sh ]; then chmod +x $default_salt_dir/pillar/firewall/addfirewall.sh; fi
case "$install_type" in
'MASTER')
- printf " - %s\n" "$MAINIP" | tee -a /opt/so/saltstack/pillar/firewall/minions.sls /opt/so/saltstack/pillar/firewall/masterfw.sls
- /opt/so/saltstack/pillar/data/addtotab.sh mastertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
+ printf " - %s\n" "$MAINIP" | tee -a $local_salt_dir/pillar/firewall/minions.sls $local_salt_dir/pillar/firewall/masterfw.sls
+ $default_salt_dir/pillar/data/addtotab.sh mastertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'EVAL' | 'MASTERSEARCH')
- printf " - %s\n" "$MAINIP" | tee -a /opt/so/saltstack/pillar/firewall/minions.sls\
- /opt/so/saltstack/pillar/firewall/masterfw.sls\
- /opt/so/saltstack/pillar/firewall/forward_nodes.sls\
- /opt/so/saltstack/pillar/firewall/search_nodes.sls
+ printf " - %s\n" "$MAINIP" | tee -a $local_salt_dir/pillar/firewall/minions.sls\
+ $local_salt_dir/pillar/firewall/masterfw.sls\
+ $local_salt_dir/pillar/firewall/forward_nodes.sls\
+ $local_salt_dir/pillar/firewall/search_nodes.sls
case "$install_type" in
'EVAL')
- /opt/so/saltstack/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
+ $default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0 True
;;
'MASTERSEARCH')
- /opt/so/saltstack/pillar/data/addtotab.sh mastersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
+ $default_salt_dir/pillar/data/addtotab.sh mastersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
esac
;;
'HELIXSENSOR')
- printf " - %s\n" "$MAINIP" | tee -a /opt/so/saltstack/pillar/firewall/minions.sls\
- /opt/so/saltstack/pillar/firewall/masterfw.sls\
- /opt/so/saltstack/pillar/firewall/forward_nodes.sls
+ printf " - %s\n" "$MAINIP" | tee -a $local_salt_dir/pillar/firewall/minions.sls\
+ $local_salt_dir/pillar/firewall/masterfw.sls\
+ $local_salt_dir/pillar/firewall/forward_nodes.sls
;;
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET')
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions "$MAINIP"
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh minions "$MAINIP"
case "$install_type" in
'SENSOR')
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
;;
'SEARCHNODE')
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'HEAVYNODE')
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
esac
;;
@@ -1519,9 +1530,9 @@ update_sudoers() {
if ! grep -qE '^soremote\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then
# Update Sudoers so that soremote can accept keys without a password
echo "soremote ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers
- echo "soremote ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | tee -a /etc/sudoers
- echo "soremote ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/data/addtotab.sh" | tee -a /etc/sudoers
- echo "soremote ALL=(ALL) NOPASSWD:/opt/so/saltstack/salt/master/files/add_minion.sh" | tee -a /etc/sudoers
+ echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/firewall/addfirewall.sh" | tee -a /etc/sudoers
+ echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/data/addtotab.sh" | tee -a /etc/sudoers
+ echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/master/files/add_minion.sh" | tee -a /etc/sudoers
else
echo "User soremote already granted sudo privileges" >> "$setup_log" 2>&1
fi
diff --git a/setup/so-setup b/setup/so-setup
index 92147f8f8..8e72a1b42 100755
--- a/setup/so-setup
+++ b/setup/so-setup
@@ -21,18 +21,40 @@ source ./so-common-functions
source ./so-whiptail
source ./so-variables
+# Parse command line arguments
setup_type=$1
-export setup_type
-
automation=$2
-automated=no
+while [[ $# -gt 0 ]]; do
+ arg="$1"
+ shift
+ case "$arg" in
+ "--turbo="* )
+ export TURBO="http://${arg#*=}";;
+ "--proxy="* )
+ export {http,https,ftp,rsync,all}_proxy="${arg#*=}";;
+ "--allow-role="* )
+ export ALLOW_ROLE="${arg#*=}";;
+ "--allow-cidr="* )
+ export ALLOW_CIDR="${arg#*=}";;
+ "--skip-reboot" )
+ export SKIP_REBOOT=1;;
+ * )
+ if [[ "$arg" == "--"* ]]; then
+ echo "Invalid option"
+ fi
+ esac
+done
+# Begin Installation pre-processing
echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1
+automated=no
function progress() {
if [ $automated == no ]; then
whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0
+ else
+ cat >> $setup_log 2>&1
fi
}
@@ -41,7 +63,7 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th
source automation/$automation
automated=yes
- echo "Checking network configuration" >> $setup_log 2>&1g
+ echo "Checking network configuration" >> $setup_log 2>&1
ip a >> $setup_log 2>&1
attempt=1
@@ -76,11 +98,6 @@ export PATH=$PATH:../salt/common/tools/sbin
got_root
-if [[ $# -gt 1 ]]; then
- set -- "${@:2}"
- parse_options "$@" >> $setup_log 2>&1
-fi
-
detect_os
if [ "$OS" == ubuntu ]; then
@@ -178,17 +195,21 @@ echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1
minion_type=$(get_minion_type)
-# Set any constants needed
+# Set any variables needed
+set_default_log_size >> $setup_log 2>&1
+
if [[ $is_helix ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
HNSENSOR=inherit
MASTERUPDATES=0
fi
+
if [[ $is_helix || ( $is_master && $is_node ) ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
fi
+
if [[ $is_master && $is_node ]]; then
LSPIPELINEWORKERS=1
LSPIPELINEBATCH=125
@@ -197,6 +218,7 @@ if [[ $is_master && $is_node ]]; then
NIDS=Suricata
BROVERSION=ZEEK
fi
+
if [[ $is_node ]]; then
CURCLOSEDAYS=30
fi
@@ -339,22 +361,22 @@ fi
# Set initial percentage to 0
export percentage=0
- set_progress_str 1 'Updating packages'
+ if [[ $is_minion ]]; then
+ set_progress_str 1 'Configuring firewall'
+ set_initial_firewall_policy >> $setup_log 2>&1
+ fi
+
+ set_progress_str 2 'Updating packages'
update_packages >> $setup_log 2>&1
if [[ $is_sensor || $is_helix ]]; then
- set_progress_str 2 'Creating bond interface'
+ set_progress_str 3 'Creating bond interface'
create_sensor_bond >> $setup_log 2>&1
- set_progress_str 3 'Generating sensor pillar'
+ set_progress_str 4 'Generating sensor pillar'
sensor_pillar >> $setup_log 2>&1
fi
- if [[ $is_minion ]]; then
- set_progress_str 4 'Configuring firewall'
- set_initial_firewall_policy >> $setup_log 2>&1
- fi
-
set_progress_str 5 'Installing Salt and dependencies'
saltify 2>> $setup_log
@@ -370,6 +392,8 @@ fi
if [[ $is_master || $is_helix ]]; then
set_progress_str 10 'Configuring Salt master'
+ create_local_directories >> $setup_log 2>&1
+ addtotab_generate_templates >> $setup_log 2>&1
copy_master_config >> $setup_log 2>&1
setup_salt_master_dirs >> $setup_log 2>&1
firewall_generate_templates >> $setup_log 2>&1
@@ -430,12 +454,15 @@ fi
salt-call state.apply -l info registry >> $setup_log 2>&1
docker_seed_registry 2>> "$setup_log" # ~ 60% when finished
- set_progress_str 61 "$(print_salt_state_apply 'master')"
+ set_progress_str 60 "$(print_salt_state_apply 'master')"
salt-call state.apply -l info master >> $setup_log 2>&1
- set_progress_str 62 "$(print_salt_state_apply 'idstools')"
+ set_progress_str 61 "$(print_salt_state_apply 'idstools')"
salt-call state.apply -l info idstools >> $setup_log 2>&1
+ set_progress_str 61 "$(print_salt_state_apply 'suricata.master')"
+ salt-call state.apply -l info suricata.master >> $setup_log 2>&1
+
fi
set_progress_str 62 "$(print_salt_state_apply 'firewall')"
@@ -566,12 +593,17 @@ fi
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
if [[ "$success" = 0 ]]; then
whiptail_setup_complete
+ if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then
+ export IP=$ALLOW_CIDR
+ so-allow -$ALLOW_ROLE >> $setup_log 2>&1
+ fi
if [[ $THEHIVE == 1 ]]; then
- check_hive_init_then_reboot
- else
- shutdown -r now
+ check_hive_init
fi
else
whiptail_setup_failed
+fi
+
+if [[ -z $SKIP_REBOOT ]]; then
shutdown -r now
fi
diff --git a/setup/so-variables b/setup/so-variables
index 786a4ca9b..e14a955ab 100644
--- a/setup/so-variables
+++ b/setup/so-variables
@@ -34,3 +34,8 @@ export temp_install_dir=/root/installtmp
export percentage_str='Getting started'
export DEBIAN_FRONTEND=noninteractive
+
+export default_salt_dir=/opt/so/saltstack/default
+export local_salt_dir=/opt/so/saltstack/local
+
+export SCRIPTDIR=$(cd `dirname $0` && pwd)
diff --git a/setup/so-whiptail b/setup/so-whiptail
index dd6d1edac..693e53162 100755
--- a/setup/so-whiptail
+++ b/setup/so-whiptail
@@ -24,7 +24,7 @@ whiptail_basic_bro() {
[ -n "$TESTING" ] && return
BASICBRO=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter the number of bro processes:" 10 75 "$lb_procs" 3>&1 1>&2 2>&3)
+ "Enter the number of zeek processes:" 10 75 "$lb_procs" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -51,7 +51,7 @@ whiptail_bro_pins() {
cpu_core_list_whiptail+=("$item" "OFF")
done
- BROPINS=$(whiptail --noitem --title "Pin Bro CPUS" --checklist "Please select $lb_procs cores to pin Bro to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 )
+ BROPINS=$(whiptail --noitem --title "Pin Zeek CPUS" --checklist "Please select $lb_procs cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -458,7 +458,6 @@ whiptail_log_size_limit() {
[ -n "$TESTING" ] && return
- set_defaul_log_size
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
diff --git a/upgrade/so-update-functions b/upgrade/so-update-functions
index 5666fc2d6..dd4235902 100644
--- a/upgrade/so-update-functions
+++ b/upgrade/so-update-functions
@@ -95,9 +95,9 @@ copy_new_files() {
# Copy new files over to the salt dir
cd /tmp/sogh/securityonion-saltstack
- rsync -a --exclude-from 'exclude-list.txt' salt /opt/so/saltstack/
- chown -R socore:socore /opt/so/saltstack/salt
- chmod 755 /opt/so/saltstack/pillar/firewall/addfirewall.sh
+ rsync -a --exclude-from 'exclude-list.txt' salt $default_salt_dir/
+ chown -R socore:socore $default_salt_dir/salt
+ chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh
cd /tmp
}