mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-05-06 03:17:53 +02:00
Merge remote-tracking branch 'remotes/origin/dev' into feature/fleet-setup
This commit is contained in:
@@ -62,6 +62,7 @@ commonpkgs:
|
||||
- python3-dateutil
|
||||
- python3-m2crypto
|
||||
- python3-mysqldb
|
||||
- git
|
||||
heldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
@@ -96,12 +97,13 @@ commonpkgs:
|
||||
- device-mapper-persistent-data
|
||||
- lvm2
|
||||
- openssl
|
||||
- git
|
||||
|
||||
heldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.2.13-3.2.el7
|
||||
- docker-ce: 3:19.03.9-3.el7
|
||||
- docker-ce: 3:19.03.11-3.el7
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
@@ -128,4 +130,4 @@ utilsyncscripts:
|
||||
- group: 0
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- source: salt://common/tools/sbin
|
||||
- source: salt://common/tools/sbin
|
||||
|
||||
@@ -17,6 +17,9 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
SKIP=0
|
||||
|
||||
while getopts "abowi:" OPTION
|
||||
@@ -80,10 +83,10 @@ if [ "$SKIP" -eq 0 ]; then
|
||||
fi
|
||||
|
||||
echo "Adding $IP to the $FULLROLE role. This can take a few seconds"
|
||||
/opt/so/saltstack/pillar/firewall/addfirewall.sh $FULLROLE $IP
|
||||
$default_salt_dir/pillar/firewall/addfirewall.sh $FULLROLE $IP
|
||||
|
||||
# Check if Wazuh enabled
|
||||
if grep -q -R "wazuh: 1" /opt/so/saltstack/pillar/*; then
|
||||
if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then
|
||||
# If analyst, add to Wazuh AR whitelist
|
||||
if [ "$FULLROLE" == "analyst" ]; then
|
||||
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
#!/bin/bash
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
bro_logs_enabled() {
|
||||
|
||||
echo "brologs:" > /opt/so/saltstack/pillar/brologs.sls
|
||||
echo " enabled:" >> /opt/so/saltstack/pillar/brologs.sls
|
||||
echo "brologs:" > $local_salt_dir/pillar/brologs.sls
|
||||
echo " enabled:" >> $local_salt_dir/pillar/brologs.sls
|
||||
for BLOG in ${BLOGS[@]}; do
|
||||
echo " - $BLOG" | tr -d '"' >> /opt/so/saltstack/pillar/brologs.sls
|
||||
echo " - $BLOG" | tr -d '"' >> $local_salt_dir/pillar/brologs.sls
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
@@ -17,4 +17,5 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart cortex $1
|
||||
/usr/sbin/so-stop cortex $1
|
||||
/usr/sbin/so-start thehive $1
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start cortex $1
|
||||
/usr/sbin/so-start thehive $1
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -0,0 +1,112 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
got_root(){
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
master_check() {
|
||||
# Check to see if this is a master
|
||||
MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
|
||||
if [ $MASTERCHECK == 'so-eval' ] || [ $MASTERCHECK == 'so-master' ] || [ $MASTERCHECK == 'so-mastersearch' ] || [ $MASTERCHECK == 'so-standalone' ] || [ $MASTERCHECK == 'so-helix' ]; then
|
||||
echo "This is a master. We can proceed"
|
||||
else
|
||||
echo "Please run soup on the master. The master controls all updates."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
update_docker_containers() {
|
||||
|
||||
# Download the containers from the interwebs
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
# Pull down the trusted docker image
|
||||
echo "Downloading $i"
|
||||
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
|
||||
# Tag it with the new registry destination
|
||||
docker tag soshybridhunter/$i $HOSTNAME:5000/soshybridhunter/$i
|
||||
docker push $HOSTNAME:5000/soshybridhunter/$i
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
version_check() {
|
||||
if [ -f /etc/soversion ]; then
|
||||
VERSION=$(cat /etc/soversion)
|
||||
else
|
||||
echo "Unable to detect version. I will now terminate."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
got_root
|
||||
master_check
|
||||
version_check
|
||||
|
||||
# Use the hostname
|
||||
HOSTNAME=$(hostname)
|
||||
BUILD=HH
|
||||
# List all the containers
|
||||
if [ $MASTERCHECK != 'so-helix' ]; then
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-acng:$BUILD$VERSION" \
|
||||
"so-thehive-cortex:$BUILD$VERSION" \
|
||||
"so-curator:$BUILD$VERSION" \
|
||||
"so-domainstats:$BUILD$VERSION" \
|
||||
"so-elastalert:$BUILD$VERSION" \
|
||||
"so-elasticsearch:$BUILD$VERSION" \
|
||||
"so-filebeat:$BUILD$VERSION" \
|
||||
"so-fleet:$BUILD$VERSION" \
|
||||
"so-fleet-launcher:$BUILD$VERSION" \
|
||||
"so-freqserver:$BUILD$VERSION" \
|
||||
"so-grafana:$BUILD$VERSION" \
|
||||
"so-idstools:$BUILD$VERSION" \
|
||||
"so-influxdb:$BUILD$VERSION" \
|
||||
"so-kibana:$BUILD$VERSION" \
|
||||
"so-kratos:$BUILD$VERSION" \
|
||||
"so-logstash:$BUILD$VERSION" \
|
||||
"so-mysql:$BUILD$VERSION" \
|
||||
"so-navigator:$BUILD$VERSION" \
|
||||
"so-nginx:$BUILD$VERSION" \
|
||||
"so-playbook:$BUILD$VERSION" \
|
||||
"so-redis:$BUILD$VERSION" \
|
||||
"so-soc:$BUILD$VERSION" \
|
||||
"so-soctopus:$BUILD$VERSION" \
|
||||
"so-steno:$BUILD$VERSION" \
|
||||
"so-strelka:$BUILD$VERSION" \
|
||||
"so-suricata:$BUILD$VERSION" \
|
||||
"so-telegraf:$BUILD$VERSION" \
|
||||
"so-thehive:$BUILD$VERSION" \
|
||||
"so-thehive-es:$BUILD$VERSION" \
|
||||
"so-wazuh:$BUILD$VERSION" \
|
||||
"so-zeek:$BUILD$VERSION" )
|
||||
else
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-filebeat:$BUILD$VERSION" \
|
||||
"so-idstools:$BUILD$VERSION" \
|
||||
"so-logstash:$BUILD$VERSION" \
|
||||
"so-nginx:$BUILD$VERSION" \
|
||||
"so-redis:$BUILD$VERSION" \
|
||||
"so-steno:$BUILD$VERSION" \
|
||||
"so-suricata:$BUILD$VERSION" \
|
||||
"so-telegraf:$BUILD$VERSION" \
|
||||
"so-zeek:$BUILD$VERSION" )
|
||||
fi
|
||||
|
||||
update_docker_containers
|
||||
@@ -15,12 +15,13 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
ELASTICSEARCH_HOST="{{ MASTERIP}}"
|
||||
ELASTICSEARCH_PORT=9200
|
||||
#ELASTICSEARCH_AUTH=""
|
||||
|
||||
# Define a default directory to load pipelines from
|
||||
ELASTICSEARCH_TEMPLATES="/opt/so/saltstack/salt/logstash/pipelines/templates/so/"
|
||||
ELASTICSEARCH_TEMPLATES="$default_salt_dir/salt/logstash/pipelines/templates/so/"
|
||||
|
||||
# Wait for ElasticSearch to initialize
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
|
||||
@@ -15,10 +15,11 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
VERSION=$(grep soversion /opt/so/saltstack/pillar/static.sls | cut -d':' -f2|sed 's/ //g')
|
||||
VERSION=$(grep soversion $local_salt_dir/pillar/static.sls | cut -d':' -f2|sed 's/ //g')
|
||||
# Modify static.sls to enable Features
|
||||
sed -i 's/features: False/features: True/' /opt/so/saltstack/pillar/static.sls
|
||||
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/static.sls
|
||||
SUFFIX="-features"
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-elasticsearch:$VERSION$SUFFIX" \
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
got_root() {
|
||||
|
||||
# Make sure you are root
|
||||
@@ -10,13 +13,13 @@ got_root() {
|
||||
}
|
||||
|
||||
got_root
|
||||
if [ ! -f /opt/so/saltstack/pillar/fireeye/init.sls ]; then
|
||||
if [ ! -f $local_salt_dir/pillar/fireeye/init.sls ]; then
|
||||
echo "This is nto configured for Helix Mode. Please re-install."
|
||||
exit
|
||||
else
|
||||
echo "Enter your Helix API Key: "
|
||||
read APIKEY
|
||||
sed -i "s/^ api_key.*/ api_key: $APIKEY/g" /opt/so/saltstack/pillar/fireeye/init.sls
|
||||
sed -i "s/^ api_key.*/ api_key: $APIKEY/g" $local_salt_dir/pillar/fireeye/init.sls
|
||||
docker stop so-logstash
|
||||
docker rm so-logstash
|
||||
echo "Restarting Logstash for updated key"
|
||||
|
||||
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
clone_to_tmp() {
|
||||
|
||||
# TODO Need to add a air gap option
|
||||
# Make a temp location for the files
|
||||
mkdir /tmp/sogh
|
||||
cd /tmp/sogh
|
||||
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
|
||||
git clone https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
|
||||
cd /tmp
|
||||
|
||||
}
|
||||
|
||||
copy_new_files() {
|
||||
|
||||
# Copy new files over to the salt dir
|
||||
cd /tmp/sogh/securityonion-saltstack
|
||||
git checkout $BRANCH
|
||||
rsync -a --exclude-from 'exclude-list.txt' salt $default_salt_dir/
|
||||
rsync -a --exclude-from 'exclude-list.txt' pillar $default_salt_dir/
|
||||
chown -R socore:socore $default_salt_dir/salt
|
||||
chown -R socore:socore $default_salt_dir/pillar
|
||||
chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh
|
||||
rm -rf /tmp/sogh
|
||||
}
|
||||
|
||||
got_root(){
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
got_root
|
||||
if [ $# -ne 1 ] ; then
|
||||
BRANCH=master
|
||||
else
|
||||
BRANCH=$1
|
||||
fi
|
||||
clone_to_tmp
|
||||
copy_new_files
|
||||
@@ -32,5 +32,5 @@ fi
|
||||
case $1 in
|
||||
"all") salt-call state.highstate queue=True;;
|
||||
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
|
||||
*) if docker ps | grep -q so-$1; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||
esac
|
||||
|
||||
Executable
+21
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop thehive-es $1
|
||||
/usr/sbin/so-start thehive $1
|
||||
Executable
+20
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start thehive $1
|
||||
Executable
+20
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop thehive-es $1
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Show Zeek stats (capstats, netstats)
|
||||
|
||||
show_stats() {
|
||||
echo '##############'
|
||||
echo '# Zeek Stats #'
|
||||
echo '##############'
|
||||
echo
|
||||
echo "Average throughput:"
|
||||
echo
|
||||
docker exec -it so-zeek /opt/zeek/bin/zeekctl capstats
|
||||
echo
|
||||
echo "Average packet loss:"
|
||||
echo
|
||||
docker exec -it so-zeek /opt/zeek/bin/zeekctl netstats
|
||||
echo
|
||||
}
|
||||
|
||||
if docker ps | grep -q zeek; then
|
||||
show_stats
|
||||
else
|
||||
echo "Zeek is not running! Try starting it with 'so-zeek-start'." && exit 1;
|
||||
fi
|
||||
@@ -89,7 +89,7 @@ curdel:
|
||||
|
||||
so-curatorcloseddeletecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-closed-delete
|
||||
- name: /usr/sbin/so-curator-closed-delete > /opt/so/log/curator/cron-closed-delete.log 2>&1
|
||||
- user: root
|
||||
- minute: '*'
|
||||
- hour: '*'
|
||||
@@ -99,7 +99,7 @@ so-curatorcloseddeletecron:
|
||||
|
||||
so-curatorclosecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-close
|
||||
- name: /usr/sbin/so-curator-close > /opt/so/log/curator/cron-close.log 2>&1
|
||||
- user: root
|
||||
- minute: '*'
|
||||
- hour: '*'
|
||||
@@ -109,7 +109,7 @@ so-curatorclosecron:
|
||||
|
||||
so-curatordeletecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-delete
|
||||
- name: /usr/sbin/so-curator-delete > /opt/so/log/curator/cron-delete.log 2>&1
|
||||
- user: root
|
||||
- minute: '*'
|
||||
- hour: '*'
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"description" : "beats.common",
|
||||
"processors" : [
|
||||
{"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}},
|
||||
{ "set": { "if": "ctx.winlog?.channel != null", "field": "dataset", "value": "wel-{{winlog.channel}}", "override": true } },
|
||||
{ "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } },
|
||||
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.category", "value": "host,process,network", "override": true } },
|
||||
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.category", "value": "host,process", "override": true } },
|
||||
{ "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"description" : "syslog",
|
||||
"processors" : [
|
||||
{
|
||||
"dissect": {
|
||||
"field": "message",
|
||||
"pattern" : "%{message}",
|
||||
"on_failure": [ { "drop" : { } } ]
|
||||
},
|
||||
"remove": {
|
||||
"field": [ "type", "agent" ],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
@@ -17,7 +17,7 @@
|
||||
{ "rename": { "field": "message2.orig_ip_bytes", "target_field": "client.ip_bytes", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_pkts", "target_field": "server.packets", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_ip_bytes", "target_field": "server.ip_bytes", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.tunnel_parents", "target_field": "connection.tunnel_parents", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.tunnel_parents", "target_field": "log.id.tunnel_parents", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.orig_cc", "target_field": "client.country_code","ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } },
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.username", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.mac", "target_field": "host.mac", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.framed_addr", "target_field": "framed_addr", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.framed_addr", "target_field": "radius.framed_address", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.remote_ip", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.connect_info", "target_field": "radius.connect_info", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.reply_msg", "target_field": "radius.reply_message", "ignore_missing": true } },
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"processors" : [
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.uid", "target_field": "uid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.uid", "target_field": "log.id.uid", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
|
||||
|
||||
@@ -75,6 +75,32 @@ filebeat.modules:
|
||||
filebeat.inputs:
|
||||
#------------------------------ Log prospector --------------------------------
|
||||
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
|
||||
|
||||
- type: udp
|
||||
enabled: true
|
||||
host: "0.0.0.0:514"
|
||||
fields:
|
||||
module: syslog
|
||||
dataset: syslog
|
||||
pipeline: "syslog"
|
||||
index: "so-syslog-%{+yyyy.MM.dd}"
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
|
||||
- type: tcp
|
||||
enabled: true
|
||||
host: "0.0.0.0:514"
|
||||
fields:
|
||||
module: syslog
|
||||
dataset: syslog
|
||||
pipeline: "syslog"
|
||||
index: "so-syslog-%{+yyyy.MM.dd}"
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
{%- if BROVER != 'SURICATA' %}
|
||||
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
|
||||
- type: log
|
||||
|
||||
@@ -57,12 +57,14 @@ so-filebeat:
|
||||
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
|
||||
- /nsm/zeek:/nsm/zeek:ro
|
||||
- /nsm/strelka/log:/nsm/strelka/log:ro
|
||||
- /opt/so/log/suricata:/suricata:ro
|
||||
- /nsm/suricata:/suricata:ro
|
||||
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
||||
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
||||
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
|
||||
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
|
||||
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
|
||||
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
|
||||
- port_bindings:
|
||||
- 0.0.0.0:514:514/udp
|
||||
- watch:
|
||||
- file: /opt/so/conf/filebeat/etc/filebeat.yml
|
||||
|
||||
@@ -137,6 +137,18 @@ enable_wazuh_manager_1514_udp_{{ip}}:
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
# Allow syslog
|
||||
enable_syslog_514_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 514
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
# Rules if you are a Master
|
||||
{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
|
||||
#This should be more granular
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
#!/bin/bash
|
||||
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||
{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
|
||||
{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
|
||||
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
|
||||
|
||||
hive_init(){
|
||||
sleep 120
|
||||
HIVE_IP="{{MASTERIP}}"
|
||||
HIVE_USER="{{HIVEUSER}}"
|
||||
HIVE_PASSWORD="{{HIVEPASSWORD}}"
|
||||
HIVE_KEY="{{HIVEKEY}}"
|
||||
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
|
||||
|
||||
echo -n "Waiting for TheHive..."
|
||||
COUNT=0
|
||||
HIVE_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl --output /dev/null --silent --head --fail -k "https://$HIVE_IP/thehive"
|
||||
if [ $? -eq 0 ]; then
|
||||
HIVE_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$HIVE_CONNECTED" == "yes" ]; then
|
||||
|
||||
# Migrate DB
|
||||
curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate"
|
||||
|
||||
# Create intial TheHive user
|
||||
curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
|
||||
|
||||
# Pre-load custom fields
|
||||
#
|
||||
# reputation
|
||||
curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
|
||||
|
||||
|
||||
touch /opt/so/state/thehive.txt
|
||||
else
|
||||
echo "We experienced an issue connecting to TheHive!"
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -f /opt/so/state/thehive.txt ]; then
|
||||
exit 0
|
||||
else
|
||||
rm -f garbage_file
|
||||
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
|
||||
do
|
||||
echo "Waiting for Elasticsearch..."
|
||||
rm -f garbage_file
|
||||
sleep 1
|
||||
done
|
||||
rm -f garbage_file
|
||||
sleep 5
|
||||
hive_init
|
||||
fi
|
||||
@@ -39,7 +39,7 @@ idstoolsetcsync:
|
||||
|
||||
so-ruleupdatecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-rule-update.sh > /opt/so/log/idstools/download.log
|
||||
- name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '7'
|
||||
@@ -58,11 +58,6 @@ synclocalnidsrules:
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
ruleslink:
|
||||
file.symlink:
|
||||
- name: /opt/so/saltstack/salt/suricata/rules
|
||||
- target: /opt/so/rules/nids
|
||||
|
||||
so-idstools:
|
||||
docker_container.running:
|
||||
- image: {{ MASTER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
|
||||
|
||||
@@ -198,7 +198,7 @@ so-logstash:
|
||||
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{%- if grains['role'] == 'so-eval' %}
|
||||
- /nsm/zeek:/nsm/zeek:ro
|
||||
- /opt/so/log/suricata:/suricata:ro
|
||||
- /nsm/suricata:/suricata:ro
|
||||
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
||||
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
||||
- /opt/so/log/fleet/:/osquery/logs:ro
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
# For custom logstash configs, they should be placed in /opt/so/saltstack/local/salt/logstash/pipelines/config/custom/
|
||||
@@ -0,0 +1,6 @@
|
||||
input {
|
||||
beats {
|
||||
port => "5044"
|
||||
tags => [ "beat-ext" ]
|
||||
}
|
||||
}
|
||||
@@ -3,24 +3,21 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
|
||||
{%- endif %}
|
||||
# Author: Justin Henderson
|
||||
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
|
||||
# Updated by: Doug Burks
|
||||
# Last Update: 5/15/2017
|
||||
|
||||
filter {
|
||||
if "syslog" in [tags] and "test_data" not in [tags] {
|
||||
if [module] =~ "syslog" {
|
||||
mutate {
|
||||
##add_tag => [ "conf_file_9034"]
|
||||
}
|
||||
##add_tag => [ "conf_file_9000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
output {
|
||||
if "syslog" in [tags] and "test_data" not in [tags] {
|
||||
if [module] =~ "syslog" {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-syslog-%{+YYYY.MM.dd}"
|
||||
template_name => "logstash"
|
||||
template_name => "so-common"
|
||||
template => "/so-common-template.json"
|
||||
template_overwrite => true
|
||||
}
|
||||
|
||||
@@ -3,22 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
|
||||
{%- endif %}
|
||||
# Author: Wes Lambert
|
||||
# Last Update: 09/14/2018
|
||||
filter {
|
||||
if "beat" in [tags] {
|
||||
mutate {
|
||||
##add_tag => [ "conf_file_9500"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output {
|
||||
if "beat" in [tags] {
|
||||
if "beat-ext" in [tags] {
|
||||
elasticsearch {
|
||||
pipeline => "beats.common"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-beats-%{+YYYY.MM.dd}"
|
||||
template_name => "so-beats"
|
||||
template => "/so-beats-template.json"
|
||||
template_name => "so-common"
|
||||
template => "/so-common-template.json"
|
||||
template_overwrite => true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
# Reference /usr/share/logstash/pipeline.custom/templates/YOURTEMPLATE.json
|
||||
#
|
||||
@@ -0,0 +1,2 @@
|
||||
# Reference /usr/share/logstash/pipeline.custom/templates/YOURTEMPLATE.json
|
||||
# For custom logstash templates, they should be placed in /opt/so/saltstack/local/salt/logstash/pipelines/templates/custom/
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script adds pillar and schedule files securely
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
MINION=$1
|
||||
|
||||
echo "Adding $1"
|
||||
cp /tmp/$MINION/pillar/$MINION.sls /opt/so/saltstack/pillar/minions/
|
||||
cp /tmp/$MINION/schedules/* /opt/so/saltstack/salt/patch/os/schedules/
|
||||
cp /tmp/$MINION/pillar/$MINION.sls $local_salt_dir/pillar/minions/
|
||||
cp --parents /tmp/$MINION/schedules/* $local_salt_dir/salt/patch/os/schedules/
|
||||
rm -rf /tmp/$MINION
|
||||
@@ -61,6 +61,7 @@ so-aptcacherng:
|
||||
docker_container.running:
|
||||
- image: {{ MASTER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
|
||||
- hostname: so-acng
|
||||
- restart_policy: always
|
||||
- port_bindings:
|
||||
- 0.0.0.0:3142:3142
|
||||
- binds:
|
||||
|
||||
@@ -134,7 +134,7 @@ http {
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
||||
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
@@ -134,7 +134,7 @@ http {
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
||||
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
@@ -134,7 +134,7 @@ http {
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
||||
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
@@ -134,7 +134,7 @@ http {
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
||||
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
{%- set ip = salt['pillar.get']('static:masterip', '') -%}
|
||||
#!/bin/bash
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
|
||||
echo "Waiting for connection"
|
||||
until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do
|
||||
@@ -7,5 +8,5 @@ until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do
|
||||
sleep 1
|
||||
done
|
||||
echo "Loading flows..."
|
||||
curl -XPOST -v -H "Content-Type: application/json" -d @/opt/so/saltstack/salt/nodered/so_flows.json {{ ip }}:1880/flows
|
||||
curl -XPOST -v -H "Content-Type: application/json" -d @$default_salt_dir/salt/nodered/so_flows.json {{ ip }}:1880/flows
|
||||
echo "Done loading..."
|
||||
|
||||
@@ -36,7 +36,7 @@ nodered:
|
||||
|
||||
noderedflows:
|
||||
file.recurse:
|
||||
- name: /opt/so/saltstack/salt/nodered/
|
||||
- name: /opt/so/saltstack/default/salt/nodered/
|
||||
- source: salt://nodered/files
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
|
||||
#!/bin/sh
|
||||
|
||||
docker cp /opt/so/saltstack/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
|
||||
docker cp $default_salt_dir/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql
|
||||
docker exec so-mysql /bin/bash -c "/usr/bin/mysql -b -uroot -p{{MYSQLPASS}} < /tmp/playbook_db_init.sql"
|
||||
@@ -86,15 +86,22 @@ so-playbook:
|
||||
|
||||
{% endif %}
|
||||
|
||||
playbooklogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/playbook
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
so-playbooksynccron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-playbook-sync
|
||||
- name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
|
||||
so-playbookruleupdatecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-playbook-ruleupdate
|
||||
- name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '6'
|
||||
@@ -9,9 +9,9 @@ import subprocess
|
||||
def run():
|
||||
MINIONID = data['id']
|
||||
ACTION = data['data']['action']
|
||||
|
||||
STATICFILE = '/opt/so/saltstack/pillar/static.sls'
|
||||
SECRETSFILE = '/opt/so/saltstack/pillar/secrets.sls'
|
||||
local_salt_dir = /opt/so/saltstack/local
|
||||
STATICFILE = local_salt_dir + '/pillar/static.sls'
|
||||
SECRETSFILE = local_salt_dir + '/pillar/secrets.sls'
|
||||
|
||||
if MINIONID.split('_')[-1] in ['master','eval','fleet','mastersearch','standalone']:
|
||||
if ACTION == 'enablefleet':
|
||||
@@ -58,7 +58,7 @@ def run():
|
||||
PACKAGEVERSION += 1
|
||||
|
||||
# Run Docker container that will build the packages
|
||||
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", "type=bind,source=/opt/so/saltstack/salt/fleet/packages,target=/output", \
|
||||
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", "type=bind,ssource=" + local_salt_dir + "/salt/fleet/packages,target=/output", \
|
||||
"--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MASTER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
|
||||
f"{ESECRET}", f"{PACKAGEHOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii')
|
||||
|
||||
|
||||
@@ -42,6 +42,7 @@ so-dockerregistry:
|
||||
docker_container.running:
|
||||
- image: registry:2
|
||||
- hostname: so-registry
|
||||
- restart_policy: always
|
||||
- port_bindings:
|
||||
- 0.0.0.0:5000:5000
|
||||
- binds:
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
|
||||
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
|
||||
{
|
||||
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
|
||||
"server": {
|
||||
"bindAddress": "0.0.0.0:9822",
|
||||
"maxPacketCount": 5000,
|
||||
"htmlDir": "html",
|
||||
"modules": {
|
||||
"filedatastore": {
|
||||
"jobDir": "jobs"
|
||||
},
|
||||
"securityonion": {
|
||||
"elasticsearchHost": "http://{{ MASTERIP }}:9200",
|
||||
"elasticsearchUsername": "",
|
||||
"elasticsearchPassword": "",
|
||||
"elasticsearchVerifyCert": false
|
||||
},
|
||||
"statickeyauth": {
|
||||
"anonymousCidr": "172.17.0.0/24",
|
||||
"apiKey": "{{ SENSORONIKEY }}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
|
||||
sensoronidir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/sensoroni
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
sensoronidatadir:
|
||||
file.directory:
|
||||
- name: /nsm/sensoroni/jobs
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
sensoronilogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/sensoroni
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
sensoronisync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/sensoroni
|
||||
- source: salt://sensoroni/files
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
|
||||
so-sensoroni:
|
||||
docker_container.running:
|
||||
- image: {{ MASTER }}:5000/soshybridhunter/so-sensoroni:{{ VERSION }}
|
||||
- hostname: sensoroni
|
||||
- name: so-sensoroni
|
||||
- binds:
|
||||
- /nsm/sensoroni/jobs:/opt/sensoroni/jobs:rw
|
||||
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
|
||||
- /opt/so/log/sensoroni/:/opt/sensoroni/logs/:rw
|
||||
- port_bindings:
|
||||
- 0.0.0.0:9822:9822
|
||||
- watch:
|
||||
- file: /opt/so/conf/sensoroni
|
||||
@@ -42,7 +42,7 @@ urls:
|
||||
login_ui: https://{{ WEBACCESS }}/login/
|
||||
registration_ui: https://{{ WEBACCESS }}/login/
|
||||
error_ui: https://{{ WEBACCESS }}/login/
|
||||
settings_ui: https://{{ WEBACCESS }}/
|
||||
settings_ui: https://{{ WEBACCESS }}/?r=/settings
|
||||
verify_ui: https://{{ WEBACCESS }}/
|
||||
mfa_ui: https://{{ WEBACCESS }}/
|
||||
|
||||
|
||||
+49
-49
@@ -93,55 +93,55 @@
|
||||
{ "name": "Wazuh/OSSEC Users", "description": "Show all Wazuh alerts grouped by username", "query": "event.module:ossec AND event.dataset:alert | groupby user.name"},
|
||||
{ "name": "Sysmon Events", "description": "Show all Sysmon logs grouped by event_id", "query": "event_type:sysmon | groupby event_id"},
|
||||
{ "name": "Sysmon Usernames", "description": "Show all Sysmon logs grouped by username", "query": "event_type:sysmon | groupby username"},
|
||||
{ "name": "Zeek Notice", "description": "Show notices from Zeek", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note notice.message"},
|
||||
{ "name": "Connections", "description": "Connections grouped by IP and Port", "query": "event.module:zeek AND event.dataset:conn | groupby source.ip destination.ip network.protocol destination.port"},
|
||||
{ "name": "Connections", "description": "Connections grouped by Service", "query": "event.module:zeek AND event.dataset:conn | groupby network.protocol destination.port"},
|
||||
{ "name": "Connections", "description": "Connections grouped by destination country", "query": "event.module:zeek AND event.dataset:conn | groupby destination.geo.country_name"},
|
||||
{ "name": "Connections", "description": "Connections grouped by source country", "query": "event.module:zeek AND event.dataset:conn | groupby source.geo.country_name"},
|
||||
{ "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.module:zeek AND event.dataset:dce_rpc | groupby dce_rpc.operation"},
|
||||
{ "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain dhcp.requested_address"},
|
||||
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.module:zeek AND event.dataset:dhcp | groupby dhcp.message_types"},
|
||||
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.module:zeek AND event.dataset:dnp3 | groupby dnp3.fc_reply"},
|
||||
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.name destination.port"},
|
||||
{ "name": "DNS", "description": "DNS queries grouped by type", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.type_name destination.port"},
|
||||
{ "name": "DNS", "description": "DNS highest registered domain", "query": "event.module:zeek AND event.dataset:dns | groupby dns.highest_registered_domain.keyword"},
|
||||
{ "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.module:zeek AND event.dataset:dns | groupby dns.parent_domain.keyword"},
|
||||
{ "name": "Files", "description": "Files grouped by mimetype", "query": "event.module:zeek AND event.dataset:files | groupby file.mime_type source.ip"},
|
||||
{ "name": "FTP", "description": "FTP grouped by argument", "query": "event.module:zeek AND event.dataset:ftp | groupby ftp.argument"},
|
||||
{ "name": "FTP", "description": "FTP grouped by command", "query": "event.module:zeek AND event.dataset:ftp | groupby ftp.command"},
|
||||
{ "name": "FTP", "description": "FTP grouped by username", "query": "event.module:zeek AND event.dataset:ftp | groupby ftp.user"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by destination port", "query": "event.module:zeek AND event.dataset:http | groupby destination.port"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by method", "query": "event.module:zeek AND event.dataset:http | groupby http.method"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by status code", "query": "event.module:zeek AND event.dataset:http | groupby http.status_code"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by status message", "query": "event.module:zeek AND event.dataset:http | groupby http.status_message"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by user agent", "query": "event.module:zeek AND event.dataset:http | groupby http.useragent"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.module:zeek AND event.dataset:http | groupby http.virtual_host"},
|
||||
{ "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.module:zeek AND event.dataset:http AND file.resp_mime_types:dosexec | groupby http.virtual_host"},
|
||||
{ "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.module:zeek AND event.dataset:intel | groupby intel.indicator"},
|
||||
{ "name": "IRC", "description": "IRC grouped by command", "query": "event.module:zeek AND event.dataset:irc | groupby irc.command.type"},
|
||||
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.module:zeek AND event.dataset:kerberos | groupby kerberos.service"},
|
||||
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.module:zeek AND event.dataset:modbus | groupby modbus.function"},
|
||||
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.module:zeek AND event.dataset:mysql | groupby mysql.command"},
|
||||
{ "name": "NOTICE", "description": "Zeek notice logs grouped by note", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note"},
|
||||
{ "name": "NOTICE", "description": "Zeek notice logs grouped by message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.message"},
|
||||
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.module:zeek AND event.dataset:ntlm | groupby ntlm.server.dns.name"},
|
||||
{ "name": "PE", "description": "PE files list", "query": "event.module:zeek AND event.dataset:pe | groupby file.machine file.os file.subsystem"},
|
||||
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.module:zeek AND event.dataset:radius | groupby user.name.keyword"},
|
||||
{ "name": "RDP", "description": "RDP grouped by client name", "query": "event.module:zeek AND event.dataset:rdp | groupby client.name"},
|
||||
{ "name": "RFB", "description": "RFB grouped by desktop name", "query": "event.module:zeek AND event.dataset:rfb | groupby rfb.desktop.name"},
|
||||
{ "name": "Signatures", "description": "Zeek signatures grouped by signature id", "query": "event.module:zeek AND event.dataset:signatures | groupby signature_id"},
|
||||
{ "name": "SIP", "description": "SIP grouped by user agent", "query": "event.module:zeek AND event.dataset:sip | groupby client.user_agent"},
|
||||
{ "name": "SMB_Files", "description": "SMB files grouped by action", "query": "event.module:zeek AND event.dataset:smb_files | groupby file.action"},
|
||||
{ "name": "SMB_Mapping", "description": "SMB mapping grouped by path", "query": "event.module:zeek AND event.dataset:smb_mapping | groupby smb.path"},
|
||||
{ "name": "SMTP", "description": "SMTP grouped by subject", "query": "event.module:zeek AND event.dataset:smtp | groupby smtp.subject"},
|
||||
{ "name": "SNMP", "description": "SNMP grouped by version and string", "query": "event.module:zeek AND event.dataset:snmp | groupby snmp.community snmp.version"},
|
||||
{ "name": "Software", "description": "List of software seen on the network", "query": "event.module:zeek AND event.dataset:software | groupby software.type software.name"},
|
||||
{ "name": "SSH", "description": "SSH grouped by version", "query": "event.module:zeek AND event.dataset:ssh | groupby ssh.version"},
|
||||
{ "name": "SSL", "description": "SSL grouped by version and server name", "query": "event.module:zeek AND event.dataset:ssl | groupby ssl.version ssl.server_name"},
|
||||
{ "name": "SYSLOG", "description": "SYSLOG grouped by severity and facility ", "query": "event.module:zeek AND event.dataset:syslog | groupby syslog.severity syslog.facility"},
|
||||
{ "name": "Tunnels", "description": "Tunnels grouped by action", "query": "event.module:zeek AND event.dataset:tunnels | groupby event.action"},
|
||||
{ "name": "Weird", "description": "Zeek weird log grouped by name", "query": "event.module:zeek AND event.dataset:weird | groupby weird.name"},
|
||||
{ "name": "x509", "description": "x.509 grouped by key length", "query": "event.module:zeek AND event.dataset:x509 | groupby x509.certificate.key.length"},
|
||||
{ "name": "Zeek Notice", "description": "Show notices from Zeek", "query": "event.dataset:notice | groupby notice.note notice.message"},
|
||||
{ "name": "Connections", "description": "Connections grouped by IP and Port", "query": "event.dataset:conn | groupby source.ip destination.ip network.protocol destination.port"},
|
||||
{ "name": "Connections", "description": "Connections grouped by Service", "query": "event.dataset:conn | groupby network.protocol destination.port"},
|
||||
{ "name": "Connections", "description": "Connections grouped by destination country", "query": "event.dataset:conn | groupby destination.geo.country_name"},
|
||||
{ "name": "Connections", "description": "Connections grouped by source country", "query": "event.dataset:conn | groupby source.geo.country_name"},
|
||||
{ "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.dataset:dce_rpc | groupby dce_rpc.operation"},
|
||||
{ "name": "DHCP", "description": "DHCP leases", "query": "event.dataset:dhcp | groupby host.hostname host.domain"},
|
||||
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.dataset:dhcp | groupby dhcp.message_types"},
|
||||
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.dataset:dnp3 | groupby dnp3.fc_reply"},
|
||||
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.dataset:dns | groupby dns.query.name destination.port"},
|
||||
{ "name": "DNS", "description": "DNS queries grouped by type", "query": "event.dataset:dns | groupby dns.query.type_name destination.port"},
|
||||
{ "name": "DNS", "description": "DNS highest registered domain", "query": "event.dataset:dns | groupby dns.highest_registered_domain.keyword"},
|
||||
{ "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.dataset:dns | groupby dns.parent_domain.keyword"},
|
||||
{ "name": "Files", "description": "Files grouped by mimetype", "query": "event.dataset:files | groupby file.mime_type source.ip"},
|
||||
{ "name": "FTP", "description": "FTP grouped by argument", "query": "event.dataset:ftp | groupby ftp.argument"},
|
||||
{ "name": "FTP", "description": "FTP grouped by command", "query": "event.dataset:ftp | groupby ftp.command"},
|
||||
{ "name": "FTP", "description": "FTP grouped by username", "query": "event.dataset:ftp | groupby ftp.user"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by destination port", "query": "event.dataset:http | groupby destination.port"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by method", "query": "event.dataset:http | groupby http.method"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by status code and message", "query": "event.dataset:http | groupby http.status_code http.status_message"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by user agent", "query": "event.dataset:http | groupby http.useragent"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.dataset:http | groupby http.virtual_host"},
|
||||
{ "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.dataset:http AND file.resp_mime_types:dosexec | groupby http.virtual_host"},
|
||||
{ "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.dataset:intel | groupby intel.indicator"},
|
||||
{ "name": "IRC", "description": "IRC grouped by command", "query": "event.dataset:irc | groupby irc.command.type"},
|
||||
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.dataset:kerberos | groupby kerberos.service"},
|
||||
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.dataset:modbus | groupby modbus.function"},
|
||||
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.dataset:mysql | groupby mysql.command"},
|
||||
{ "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.dataset:notice | groupby notice.note notice.message"},
|
||||
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.dataset:ntlm | groupby ntlm.server.dns.name"},
|
||||
{ "name": "PE", "description": "PE files list", "query": "event.dataset:pe | groupby file.machine file.os file.subsystem"},
|
||||
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.dataset:radius | groupby user.name.keyword"},
|
||||
{ "name": "RDP", "description": "RDP grouped by client name", "query": "event.dataset:rdp | groupby client.name"},
|
||||
{ "name": "RFB", "description": "RFB grouped by desktop name", "query": "event.dataset:rfb | groupby rfb.desktop.name"},
|
||||
{ "name": "Signatures", "description": "Zeek signatures grouped by signature id", "query": "event.dataset:signatures | groupby signature_id"},
|
||||
{ "name": "SIP", "description": "SIP grouped by user agent", "query": "event.dataset:sip | groupby client.user_agent"},
|
||||
{ "name": "SMB_Files", "description": "SMB files grouped by action", "query": "event.dataset:smb_files | groupby file.action"},
|
||||
{ "name": "SMB_Mapping", "description": "SMB mapping grouped by path", "query": "event.dataset:smb_mapping | groupby smb.path"},
|
||||
{ "name": "SMTP", "description": "SMTP grouped by subject", "query": "event.dataset:smtp | groupby smtp.subject"},
|
||||
{ "name": "SNMP", "description": "SNMP grouped by version and string", "query": "event.dataset:snmp | groupby snmp.community snmp.version"},
|
||||
{ "name": "Software", "description": "List of software seen on the network", "query": "event.dataset:software | groupby software.type software.name"},
|
||||
{ "name": "SSH", "description": "SSH grouped by version", "query": "event.dataset:ssh | groupby ssh.version"},
|
||||
{ "name": "SSL", "description": "SSL grouped by version and server name", "query": "event.dataset:ssl | groupby ssl.version ssl.server_name"},
|
||||
{ "name": "SYSLOG", "description": "SYSLOG grouped by severity and facility ", "query": "event.dataset:syslog | groupby syslog.severity syslog.facility"},
|
||||
{ "name": "Tunnels", "description": "Tunnels grouped by action", "query": "event.dataset:tunnels | groupby event.action"},
|
||||
{ "name": "Weird", "description": "Zeek weird log grouped by name", "query": "event.dataset:weird | groupby weird.name"},
|
||||
{ "name": "x509", "description": "x.509 grouped by key length", "query": "event.dataset:x509 | groupby x509.certificate.key.length"},
|
||||
{ "name": "x509", "description": "x.509 grouped by issuer", "query": "event.dataset:x509 | groupby x509.certificate.issuer"},
|
||||
{ "name": "x509", "description": "x.509 grouped by subject", "query": "event.dataset:x509 | groupby x509.certificate.subject"},
|
||||
{ "name": "Firewall", "description": "Firewall events grouped by action", "query": "event_type:firewall | groupby action"}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
{%- set MASTER_URL = salt['pillar.get']('master:url_base', '') %}
|
||||
{%- set MASTER_IP = salt['pillar.get']('static:masterip', '') %}
|
||||
|
||||
soctopusdir:
|
||||
file.directory:
|
||||
@@ -69,3 +71,5 @@ so-soctopus:
|
||||
- /opt/so/conf/navigator/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
|
||||
- port_bindings:
|
||||
- 0.0.0.0:7000:7000
|
||||
- extra_hosts:
|
||||
- {{MASTER_URL}}:{{MASTER_IP}}
|
||||
|
||||
+4
-4
@@ -86,17 +86,17 @@ chownilogstashfilebeatp8:
|
||||
# Create Symlinks to the keys so I can distribute it to all the things
|
||||
filebeatdir:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack/salt/filebeat/files
|
||||
- mkdirs: True
|
||||
- name: /opt/so/saltstack/local/salt/filebeat/files
|
||||
- makedirs: True
|
||||
|
||||
fbkeylink:
|
||||
file.symlink:
|
||||
- name: /opt/so/saltstack/salt/filebeat/files/filebeat.p8
|
||||
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.p8
|
||||
- target: /etc/pki/filebeat.p8
|
||||
|
||||
fbcrtlink:
|
||||
file.symlink:
|
||||
- name: /opt/so/saltstack/salt/filebeat/files/filebeat.crt
|
||||
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt
|
||||
- target: /etc/pki/filebeat.crt
|
||||
|
||||
# Create a cert for the docker registry
|
||||
|
||||
+511
-354
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,18 @@
|
||||
%YAML 1.1
|
||||
---
|
||||
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
|
||||
{%- if grains['role'] == 'so-eval' %}
|
||||
{%- set MTU = 1500 %}
|
||||
{%- else %}
|
||||
{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
|
||||
{%- endif %}
|
||||
{%- if salt['pillar.get']('sensor:homenet') %}
|
||||
{%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
|
||||
{%- else %}
|
||||
{%- set homenet = salt['pillar.get']('static:hnmaster', '') %}
|
||||
{%- endif %}
|
||||
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
|
||||
{%- if grains['role'] == 'so-eval' %}
|
||||
{%- set MTU = 1500 %}
|
||||
{%- elif grains['role'] == 'so-helix' %}
|
||||
{%- set MTU = 9000 %}
|
||||
{%- else %}
|
||||
{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
|
||||
{%- endif %}
|
||||
{%- if salt['pillar.get']('sensor:homenet') %}
|
||||
{%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
|
||||
{%- else %}
|
||||
{%- set homenet = salt['pillar.get']('static:hnmaster', '') %}
|
||||
{%- endif %}
|
||||
# Suricata configuration file. In addition to the comments describing all
|
||||
# options in this file, full documentation can be found at:
|
||||
# https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html
|
||||
@@ -23,6 +25,11 @@ vars:
|
||||
# more specific is better for alert accuracy and performance
|
||||
address-groups:
|
||||
HOME_NET: "[{{ homenet }}]"
|
||||
#HOME_NET: "[192.168.0.0/16]"
|
||||
#HOME_NET: "[10.0.0.0/8]"
|
||||
#HOME_NET: "[172.16.0.0/12]"
|
||||
#HOME_NET: "any"
|
||||
|
||||
EXTERNAL_NET: "!$HOME_NET"
|
||||
#EXTERNAL_NET: "any"
|
||||
|
||||
@@ -49,6 +56,8 @@ vars:
|
||||
MODBUS_PORTS: 502
|
||||
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
|
||||
FTP_PORTS: 21
|
||||
VXLAN_PORTS: 4789
|
||||
TEREDO_PORTS: 3544
|
||||
|
||||
##
|
||||
## Step 2: select outputs to enable
|
||||
@@ -64,9 +73,12 @@ stats:
|
||||
enabled: yes
|
||||
# The interval field (in seconds) controls at what interval
|
||||
# the loggers are invoked.
|
||||
interval: 8
|
||||
interval: 30
|
||||
# Add decode events as stats.
|
||||
#decoder-events: true
|
||||
# Decoder event prefix in stats. Has been 'decoder' before, but that leads
|
||||
# to missing events in the eve.stats records. See issue #2225.
|
||||
#decoder-events-prefix: "decoder.event"
|
||||
# Add stream events as stats.
|
||||
#stream-events: false
|
||||
|
||||
@@ -83,18 +95,35 @@ outputs:
|
||||
- eve-log:
|
||||
enabled: yes
|
||||
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
|
||||
filename: eve.json
|
||||
filename: /nsm/eve.json
|
||||
rotate-interval: hour
|
||||
|
||||
#prefix: "@cee: " # prefix to prepend to each log entry
|
||||
# the following are valid when type: syslog above
|
||||
#identity: "suricata"
|
||||
#facility: local5
|
||||
#level: Info ## possible levels: Emergency, Alert, Critical,
|
||||
## Error, Warning, Notice, Info, Debug
|
||||
#redis:
|
||||
# server: 127.0.0.1
|
||||
# port: 6379
|
||||
# async: true ## if redis replies are read asynchronously
|
||||
# mode: list ## possible values: list|lpush (default), rpush, channel|publish
|
||||
# ## lpush and rpush are using a Redis list. "list" is an alias for lpush
|
||||
# ## publish is using a Redis channel. "channel" is an alias for publish
|
||||
# key: suricata ## key or channel to use (default to suricata)
|
||||
# Redis pipelining set up. This will enable to only do a query every
|
||||
# 'batch-size' events. This should lower the latency induced by network
|
||||
# connection at the cost of some memory. There is no flushing implemented
|
||||
# so this setting as to be reserved to high traffic suricata.
|
||||
# pipelining:
|
||||
# enabled: yes ## set enable to yes to enable query pipelining
|
||||
# batch-size: 10 ## number of entry to keep in buffer
|
||||
|
||||
# Include top level metadata. Default yes.
|
||||
#metadata: no
|
||||
|
||||
# include the name of the input pcap file in pcap file processing mode
|
||||
pcap-file: false
|
||||
|
||||
# Community Flow ID
|
||||
@@ -106,7 +135,7 @@ outputs:
|
||||
# to make the id less predictable.
|
||||
|
||||
# enable/disable the community id feature.
|
||||
community-id: false
|
||||
community-id: true
|
||||
# Seed value for the ID output. Valid values are 0-65535.
|
||||
community-id-seed: 0
|
||||
|
||||
@@ -130,36 +159,76 @@ outputs:
|
||||
|
||||
types:
|
||||
- alert:
|
||||
# payload: yes # enable dumping payload in Base64
|
||||
# payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
|
||||
# payload-printable: yes # enable dumping payload in printable (lossy) format
|
||||
# packet: yes # enable dumping of packet (without stream segments)
|
||||
# http-body: yes # enable dumping of http body in Base64
|
||||
# http-body-printable: yes # enable dumping of http body in printable format
|
||||
# metadata: no # enable inclusion of app layer metadata with alert. Default yes
|
||||
payload: no # enable dumping payload in Base64
|
||||
payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
|
||||
payload-printable: yes # enable dumping payload in printable (lossy) format
|
||||
packet: yes # enable dumping of packet (without stream segments)
|
||||
metadata:
|
||||
app-layer: false
|
||||
flow: false
|
||||
rule:
|
||||
metadata: true
|
||||
raw: true
|
||||
|
||||
# http-body: yes # Requires metadata; enable dumping of http body in Base64
|
||||
# http-body-printable: yes # Requires metadata; enable dumping of http body in printable format
|
||||
|
||||
# Enable the logging of tagged packets for rules using the
|
||||
# "tag" keyword.
|
||||
tagged-packets: no
|
||||
- anomaly:
|
||||
# Anomaly log records describe unexpected conditions such
|
||||
# as truncated packets, packets with invalid IP/UDP/TCP
|
||||
# length values, and other events that render the packet
|
||||
# invalid for further processing or describe unexpected
|
||||
# behavior on an established stream. Networks which
|
||||
# experience high occurrences of anomalies may experience
|
||||
# packet processing degradation.
|
||||
#
|
||||
# Anomalies are reported for the following:
|
||||
# 1. Decode: Values and conditions that are detected while
|
||||
# decoding individual packets. This includes invalid or
|
||||
# unexpected values for low-level protocol lengths as well
|
||||
# as stream related events (TCP 3-way handshake issues,
|
||||
# unexpected sequence number, etc).
|
||||
# 2. Stream: This includes stream related events (TCP
|
||||
# 3-way handshake issues, unexpected sequence number,
|
||||
# etc).
|
||||
# 3. Application layer: These denote application layer
|
||||
# specific conditions that are unexpected, invalid or are
|
||||
# unexpected given the application monitoring state.
|
||||
#
|
||||
# By default, anomaly logging is disabled. When anomaly
|
||||
# logging is enabled, applayer anomaly reporting is
|
||||
# enabled.
|
||||
enabled: no
|
||||
#
|
||||
# Choose one or more types of anomaly logging and whether to enable
|
||||
# logging of the packet header for packet anomalies.
|
||||
types:
|
||||
decode: no
|
||||
stream: no
|
||||
applayer: yes
|
||||
packethdr: no
|
||||
- http:
|
||||
extended: yes # enable this for extended logging information
|
||||
# custom allows additional http fields to be included in eve-log
|
||||
# the example below adds three additional fields when uncommented
|
||||
#custom: [Accept-Encoding, Accept-Language, Authorization]
|
||||
# set this value to one and only one among {both, request, response}
|
||||
# to dump all http headers for every http request and/or response
|
||||
# dump-all-headers: none
|
||||
- dns:
|
||||
# This configuration uses the new DNS logging format,
|
||||
# the old configuration is still available:
|
||||
# http://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html#eve-extensible-event-format
|
||||
# Use version 2 logging with the new format:
|
||||
# DNS answers will be logged in one single event
|
||||
# rather than an event for each of it.
|
||||
# Without setting a version the version
|
||||
# will fallback to 1 for backwards compatibility.
|
||||
# Note: version 1 is not available with rust enabled
|
||||
# https://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html#dns-v1-format
|
||||
|
||||
# As of Suricata 5.0, version 2 of the eve dns output
|
||||
# format is the default.
|
||||
version: 2
|
||||
|
||||
# Enable/disable this logger. Default: enabled.
|
||||
#enabled: no
|
||||
enabled: yes
|
||||
|
||||
# Control logging of requests and responses:
|
||||
# - requests: enable logging of DNS queries
|
||||
@@ -174,8 +243,8 @@ outputs:
|
||||
# Default: all
|
||||
#formats: [detailed, grouped]
|
||||
|
||||
# Answer types to log.
|
||||
# Default: all
|
||||
# Types to log, based on the query type.
|
||||
# Default: all.
|
||||
#types: [a, aaaa, cname, mx, ns, ptr, txt]
|
||||
- tls:
|
||||
extended: yes # enable this for extended logging information
|
||||
@@ -184,7 +253,7 @@ outputs:
|
||||
#session-resumption: no
|
||||
# custom allows to control which tls fields that are included
|
||||
# in eve-log
|
||||
#custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3]
|
||||
#custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s]
|
||||
- files:
|
||||
force-magic: no # force logging magic on all logged files
|
||||
# force logging of checksums, available hash functions are md5,
|
||||
@@ -207,20 +276,23 @@ outputs:
|
||||
# to yes
|
||||
#md5: [body, subject]
|
||||
|
||||
#- dnp3
|
||||
- dnp3
|
||||
- ftp
|
||||
- rdp
|
||||
- nfs
|
||||
- smb
|
||||
- tftp
|
||||
- ikev2
|
||||
- krb5
|
||||
- snmp
|
||||
- sip
|
||||
- dhcp:
|
||||
# DHCP logging requires Rust.
|
||||
enabled: yes
|
||||
# When extended mode is on, all DHCP messages are logged
|
||||
# with full detail. When extended mode is off (the
|
||||
# default), just enough information to map a MAC address
|
||||
# to an IP address is logged.
|
||||
extended: no
|
||||
# extended: no
|
||||
- ssh
|
||||
#- stats:
|
||||
# totals: yes # stats for all threads merged together
|
||||
@@ -236,47 +308,11 @@ outputs:
|
||||
# flowints.
|
||||
#- metadata
|
||||
|
||||
# alert output for use with Barnyard2
|
||||
# deprecated - unified2 alert format for use with Barnyard2
|
||||
- unified2-alert:
|
||||
enabled: no
|
||||
filename: unified2.alert
|
||||
|
||||
# File size limit. Can be specified in kb, mb, gb. Just a number
|
||||
# is parsed as bytes.
|
||||
#limit: 32mb
|
||||
|
||||
# By default unified2 log files have the file creation time (in
|
||||
# unix epoch format) appended to the filename. Set this to yes to
|
||||
# disable this behaviour.
|
||||
#nostamp: no
|
||||
|
||||
# Sensor ID field of unified2 alerts.
|
||||
#sensor-id: 0
|
||||
|
||||
# Include payload of packets related to alerts. Defaults to true, set to
|
||||
# false if payload is not required.
|
||||
#payload: yes
|
||||
|
||||
# HTTP X-Forwarded-For support by adding the unified2 extra header or
|
||||
# overwriting the source or destination IP address (depending on flow
|
||||
# direction) with the one reported in the X-Forwarded-For HTTP header.
|
||||
# This is helpful when reviewing alerts for traffic that is being reverse
|
||||
# or forward proxied.
|
||||
xff:
|
||||
enabled: no
|
||||
# Two operation modes are available, "extra-data" and "overwrite". Note
|
||||
# that in the "overwrite" mode, if the reported IP address in the HTTP
|
||||
# X-Forwarded-For header is of a different version of the packet
|
||||
# received, it will fall-back to "extra-data" mode.
|
||||
mode: extra-data
|
||||
# Two proxy deployments are supported, "reverse" and "forward". In
|
||||
# a "reverse" deployment the IP address used is the last one, in a
|
||||
# "forward" deployment the first IP address is used.
|
||||
deployment: reverse
|
||||
# Header name where the actual IP address will be reported, if more
|
||||
# than one IP address is present, the last IP address will be the
|
||||
# one taken into consideration.
|
||||
header: X-Forwarded-For
|
||||
# for further options see:
|
||||
# https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#alert-output-for-use-with-barnyard2-unified2-alert
|
||||
|
||||
# a line based log of HTTP requests (no alerts)
|
||||
- http-log:
|
||||
@@ -285,6 +321,7 @@ outputs:
|
||||
append: yes
|
||||
#extended: yes # enable this for extended logging information
|
||||
#custom: yes # enabled the custom logging format (defined by customformat)
|
||||
#customformat: ""
|
||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||
|
||||
# a line based log of TLS handshake parameters (no alerts)
|
||||
@@ -294,6 +331,7 @@ outputs:
|
||||
append: yes
|
||||
#extended: yes # Log extended information like fingerprint
|
||||
#custom: yes # enabled the custom logging format (defined by customformat)
|
||||
#customformat: ""
|
||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||
# output TLS transaction where the session is resumed using a
|
||||
# session id
|
||||
@@ -304,14 +342,6 @@ outputs:
|
||||
enabled: no
|
||||
#certs-log-dir: certs # directory to store the certificates files
|
||||
|
||||
# a line based log of DNS requests and/or replies (no alerts)
|
||||
# Note: not available when Rust is enabled (--enable-rust).
|
||||
- dns-log:
|
||||
enabled: no
|
||||
filename: dns.log
|
||||
append: yes
|
||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||
|
||||
# Packet log... log packets in pcap format. 3 modes of operation: "normal"
|
||||
# "multi" and "sguil".
|
||||
#
|
||||
@@ -382,7 +412,7 @@ outputs:
|
||||
append: yes
|
||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||
|
||||
# alert output to prelude (http://www.prelude-technologies.com/) only
|
||||
# alert output to prelude (https://www.prelude-siem.org/) only
|
||||
# available if Suricata has been compiled with --enable-prelude
|
||||
- alert-prelude:
|
||||
enabled: no
|
||||
@@ -397,7 +427,7 @@ outputs:
|
||||
append: yes # append to file (yes) or overwrite it (no)
|
||||
totals: yes # stats for all threads merged together
|
||||
threads: no # per thread stats
|
||||
#null-values: yes # print counters that have value 0
|
||||
null-values: yes # print counters that have value 0
|
||||
|
||||
# a line based alerts log similar to fast.log into syslog
|
||||
- syslog:
|
||||
@@ -409,12 +439,11 @@ outputs:
|
||||
#level: Info ## possible levels: Emergency, Alert, Critical,
|
||||
## Error, Warning, Notice, Info, Debug
|
||||
|
||||
# a line based information for dropped packets in IPS mode
|
||||
# deprecated a line based information for dropped packets in IPS mode
|
||||
- drop:
|
||||
enabled: no
|
||||
filename: drop.log
|
||||
append: yes
|
||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||
# further options documented at:
|
||||
# https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#drop-log-a-line-based-information-for-dropped-packets
|
||||
|
||||
# Output module for storing files on disk. Files are stored in a
|
||||
# directory names consisting of the first 2 characters of the
|
||||
@@ -481,58 +510,18 @@ outputs:
|
||||
# one taken into consideration.
|
||||
header: X-Forwarded-For
|
||||
|
||||
# output module to store extracted files to disk (old style, deprecated)
|
||||
#
|
||||
# The files are stored to the log-dir in a format "file.<id>" where <id> is
|
||||
# an incrementing number starting at 1. For each file "file.<id>" a meta
|
||||
# file "file.<id>.meta" is created. Before they are finalized, they will
|
||||
# have a ".tmp" suffix to indicate that they are still being processed.
|
||||
#
|
||||
# If include-pid is yes, then the files are instead "file.<pid>.<id>", with
|
||||
# meta files named as "file.<pid>.<id>.meta"
|
||||
#
|
||||
# File extraction depends on a lot of things to be fully done:
|
||||
# - file-store stream-depth. For optimal results, set this to 0 (unlimited)
|
||||
# - http request / response body sizes. Again set to 0 for optimal results.
|
||||
# - rules that contain the "filestore" keyword.
|
||||
# deprecated - file-store v1
|
||||
- file-store:
|
||||
enabled: no # set to yes to enable
|
||||
log-dir: files # directory to store the files
|
||||
force-magic: no # force logging magic on all stored files
|
||||
# force logging of checksums, available hash functions are md5,
|
||||
# sha1 and sha256
|
||||
#force-hash: [md5]
|
||||
force-filestore: no # force storing of all files
|
||||
# override global stream-depth for sessions in which we want to
|
||||
# perform file extraction. Set to 0 for unlimited.
|
||||
#stream-depth: 0
|
||||
#waldo: file.waldo # waldo file to store the file_id across runs
|
||||
# uncomment to disable meta file writing
|
||||
#write-meta: no
|
||||
# uncomment the following variable to define how many files can
|
||||
# remain open for filestore by Suricata. Default value is 0 which
|
||||
# means files get closed after each write
|
||||
#max-open-files: 1000
|
||||
include-pid: no # set to yes to include pid in file names
|
||||
|
||||
# output module to log files tracked in a easily parsable JSON format
|
||||
- file-log:
|
||||
enabled: no
|
||||
filename: files-json.log
|
||||
append: yes
|
||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||
|
||||
force-magic: no # force logging magic on all logged files
|
||||
# force logging of checksums, available hash functions are md5,
|
||||
# sha1 and sha256
|
||||
#force-hash: [md5]
|
||||
# further options documented at:
|
||||
# https://suricata.readthedocs.io/en/suricata-5.0.0/file-extraction/file-extraction.html#file-store-version-1
|
||||
|
||||
# Log TCP data after stream normalization
|
||||
# 2 types: file or dir. File logs into a single logfile. Dir creates
|
||||
# 2 files per TCP session and stores the raw TCP data into them.
|
||||
# Using 'both' will enable both file and dir modes.
|
||||
#
|
||||
# Note: limited by stream.depth
|
||||
# Note: limited by stream.reassembly.depth
|
||||
- tcp-data:
|
||||
enabled: no
|
||||
type: file
|
||||
@@ -591,10 +580,14 @@ logging:
|
||||
- file:
|
||||
enabled: yes
|
||||
level: info
|
||||
filename: /var/log/suricata/suricata.log
|
||||
filename: suricata.log
|
||||
# type: json
|
||||
- syslog:
|
||||
enabled: no
|
||||
facility: local5
|
||||
format: "[%i] <%d> -- "
|
||||
# type: json
|
||||
|
||||
|
||||
##
|
||||
## Step 4: configure common capture settings
|
||||
@@ -613,16 +606,11 @@ af-packet:
|
||||
# Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
|
||||
# This is only supported for Linux kernel > 3.1
|
||||
# possible value are:
|
||||
# * cluster_round_robin: round robin load balancing
|
||||
# * cluster_flow: all packets of a given flow are send to the same socket
|
||||
# * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket
|
||||
# * cluster_qm: all packets linked by network card to a RSS queue are sent to the same
|
||||
# socket. Requires at least Linux 3.14.
|
||||
# * cluster_random: packets are sent randomly to sockets but with an equipartition.
|
||||
# Requires at least Linux 3.14.
|
||||
# * cluster_rollover: kernel rotates between sockets filling each socket before moving
|
||||
# to the next. Requires at least Linux 3.10.
|
||||
# * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture/ebpf-xdt.rst for
|
||||
# * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for
|
||||
# more info.
|
||||
# Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
|
||||
# with capture card using RSS (require cpu affinity tuning and system irq tuning)
|
||||
@@ -630,12 +618,8 @@ af-packet:
|
||||
# In some fragmentation case, the hash can not be computed. If "defrag" is set
|
||||
# to yes, the kernel will do the needed defragmentation before sending the packets.
|
||||
defrag: yes
|
||||
# After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is
|
||||
# full then kernel will send the packet on the next socket with room available. This option
|
||||
# can minimize packet drop and increase the treated bandwidth on single intensive flow.
|
||||
#rollover: yes
|
||||
# To use the ring feature of AF_PACKET, set 'use-mmap' to yes
|
||||
#use-mmap: yes
|
||||
use-mmap: yes
|
||||
# Lock memory map to avoid it goes to swap. Be careful that over subscribing could lock
|
||||
# your system
|
||||
#mmap-locked: yes
|
||||
@@ -683,14 +667,13 @@ af-packet:
|
||||
#copy-mode: ips
|
||||
#copy-iface: eth1
|
||||
# For eBPF and XDP setup including bypass, filter and load balancing, please
|
||||
# see doc/userguide/capture/ebpf-xdt.rst for more info.
|
||||
# see doc/userguide/capture-hardware/ebpf-xdp.rst for more info.
|
||||
|
||||
# Put default values here. These will be used for an interface that is not
|
||||
# in the list above.
|
||||
- interface: default
|
||||
#threads: auto
|
||||
#use-mmap: no
|
||||
#rollover: yes
|
||||
#tpacket-v3: yes
|
||||
|
||||
# Cross platform libpcap capture support
|
||||
@@ -753,6 +736,8 @@ app-layer:
|
||||
protocols:
|
||||
krb5:
|
||||
enabled: yes
|
||||
snmp:
|
||||
enabled: yes
|
||||
ikev2:
|
||||
enabled: yes
|
||||
tls:
|
||||
@@ -760,8 +745,9 @@ app-layer:
|
||||
detection-ports:
|
||||
dp: 443
|
||||
|
||||
# Generate JA3 fingerprint from client hello
|
||||
ja3-fingerprints: yes
|
||||
# Generate JA3 fingerprint from client hello. If not specified it
|
||||
# will be disabled by default, but enabled if rules require it.
|
||||
#ja3-fingerprints: auto
|
||||
|
||||
# What to do when the encrypted communications start:
|
||||
# - default: keep tracking TLS session, check for protocol anomalies,
|
||||
@@ -775,17 +761,21 @@ app-layer:
|
||||
#
|
||||
# For best performance, select 'bypass'.
|
||||
#
|
||||
#encrypt-handling: default
|
||||
#encryption-handling: default
|
||||
|
||||
dcerpc:
|
||||
enabled: yes
|
||||
ftp:
|
||||
enabled: yes
|
||||
# memcap: 64mb
|
||||
# RDP, disabled by default.
|
||||
rdp:
|
||||
#enabled: no
|
||||
ssh:
|
||||
enabled: yes
|
||||
smtp:
|
||||
enabled: yes
|
||||
raw-extraction: no
|
||||
# Configure SMTP-MIME Decoder
|
||||
mime:
|
||||
# Decode MIME messages from SMTP transactions
|
||||
@@ -814,10 +804,6 @@ app-layer:
|
||||
content-inspect-window: 4096
|
||||
imap:
|
||||
enabled: detection-only
|
||||
msn:
|
||||
enabled: detection-only
|
||||
# Note: --enable-rust is required for full SMB1/2 support. W/o rust
|
||||
# only minimal SMB1 support is available.
|
||||
smb:
|
||||
enabled: yes
|
||||
detection-ports:
|
||||
@@ -826,8 +812,6 @@ app-layer:
|
||||
# Stream reassembly size for SMB streams. By default track it completely.
|
||||
#stream-depth: 0
|
||||
|
||||
# Note: NFS parser depends on Rust support: pass --enable-rust
|
||||
# to configure.
|
||||
nfs:
|
||||
enabled: yes
|
||||
tftp:
|
||||
@@ -851,7 +835,8 @@ app-layer:
|
||||
dp: 53
|
||||
http:
|
||||
enabled: yes
|
||||
# memcap: 64mb
|
||||
# memcap: Maximum memory capacity for http
|
||||
# Default is unlimited, value can be such as 64mb
|
||||
|
||||
# default-config: Used when no server-config matches
|
||||
# personality: List of personalities used by default
|
||||
@@ -859,37 +844,15 @@ app-layer:
|
||||
# by http_client_body & pcre /P option.
|
||||
# response-body-limit: Limit reassembly of response body for inspection
|
||||
# by file_data, http_server_body & pcre /Q option.
|
||||
# double-decode-path: Double decode path section of the URI
|
||||
# double-decode-query: Double decode query section of the URI
|
||||
# response-body-decompress-layer-limit:
|
||||
# Limit to how many layers of compression will be
|
||||
# decompressed. Defaults to 2.
|
||||
#
|
||||
# For advanced options, see the user guide
|
||||
|
||||
|
||||
# server-config: List of server configurations to use if address matches
|
||||
# address: List of IP addresses or networks for this block
|
||||
# personalitiy: List of personalities used by this block
|
||||
# request-body-limit: Limit reassembly of request body for inspection
|
||||
# by http_client_body & pcre /P option.
|
||||
# response-body-limit: Limit reassembly of response body for inspection
|
||||
# by file_data, http_server_body & pcre /Q option.
|
||||
# double-decode-path: Double decode path section of the URI
|
||||
# double-decode-query: Double decode query section of the URI
|
||||
#
|
||||
# uri-include-all: Include all parts of the URI. By default the
|
||||
# 'scheme', username/password, hostname and port
|
||||
# are excluded. Setting this option to true adds
|
||||
# all of them to the normalized uri as inspected
|
||||
# by http_uri, urilen, pcre with /U and the other
|
||||
# keywords that inspect the normalized uri.
|
||||
# Note that this does not affect http_raw_uri.
|
||||
# Also, note that including all was the default in
|
||||
# 1.4 and 2.0beta1.
|
||||
#
|
||||
# meta-field-limit: Hard size limit for request and response size
|
||||
# limits. Applies to request line and headers,
|
||||
# response line and headers. Does not apply to
|
||||
# request or response bodies. Default is 18k.
|
||||
# If this limit is reached an event is raised.
|
||||
# Then, all the fields from default-config can be overloaded
|
||||
#
|
||||
# Currently Available Personalities:
|
||||
# Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
|
||||
@@ -943,6 +906,15 @@ app-layer:
|
||||
double-decode-path: no
|
||||
double-decode-query: no
|
||||
|
||||
# Can disable LZMA decompression
|
||||
#lzma-enabled: yes
|
||||
# Memory limit usage for LZMA decompression dictionary
|
||||
# Data is decompressed until dictionary reaches this size
|
||||
#lzma-memlimit: 1mb
|
||||
# Maximum decompressed size with a compression ratio
|
||||
# above 2048 (only LZMA can reach this ratio, deflate cannot)
|
||||
#compression-bomb-limit: 1mb
|
||||
|
||||
server-config:
|
||||
|
||||
#- apache:
|
||||
@@ -1002,13 +974,16 @@ app-layer:
|
||||
dp: 44818
|
||||
sp: 44818
|
||||
|
||||
# Note: parser depends on Rust support
|
||||
ntp:
|
||||
enabled: yes
|
||||
|
||||
dhcp:
|
||||
enabled: yes
|
||||
|
||||
# SIP, disabled by default.
|
||||
sip:
|
||||
#enabled: no
|
||||
|
||||
# Limit for the maximum number of asn1 frames to decode (default 256)
|
||||
asn1-max-frames: 256
|
||||
|
||||
@@ -1024,9 +999,9 @@ asn1-max-frames: 256
|
||||
##
|
||||
|
||||
# Run suricata as user and group.
|
||||
#run-as:
|
||||
# user: suri
|
||||
# group: suri
|
||||
run-as:
|
||||
user: suricata
|
||||
group: suricata
|
||||
|
||||
# Some logging module will use that name in event as identifier. The default
|
||||
# value is the hostname
|
||||
@@ -1069,29 +1044,26 @@ host-mode: auto
|
||||
# Number of packets preallocated per thread. The default is 1024. A higher number
|
||||
# will make sure each CPU will be more easily kept busy, but may negatively
|
||||
# impact caching.
|
||||
#max-pending-packets: 1024
|
||||
max-pending-packets: 5000
|
||||
|
||||
# Runmode the engine should use. Please check --list-runmodes to get the available
|
||||
# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned
|
||||
# load balancing).
|
||||
# runmodes for each packet acquisition method. Default depends on selected capture
|
||||
# method. 'workers' generally gives best performance.
|
||||
runmode: workers
|
||||
|
||||
# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
|
||||
#
|
||||
# Supported schedulers are:
|
||||
#
|
||||
# round-robin - Flows assigned to threads in a round robin fashion.
|
||||
# active-packets - Flows assigned to threads that have the lowest number of
|
||||
# unprocessed packets (default).
|
||||
# hash - Flow allocated using the address hash. More of a random
|
||||
# technique. Was the default in Suricata 1.2.1 and older.
|
||||
# hash - Flow assigned to threads using the 5-7 tuple hash.
|
||||
# ippair - Flow assigned to threads using addresses only.
|
||||
#
|
||||
#autofp-scheduler: active-packets
|
||||
#autofp-scheduler: hash
|
||||
|
||||
# Preallocated size for packet. Default is 1514 which is the classical
|
||||
# size for pcap on ethernet. You should adjust this value to the highest
|
||||
# packet size (MTU + hardware header) on your system.
|
||||
#default-packet-size: 1514
|
||||
default-packet-size: {{ MTU + 15 }}
|
||||
|
||||
# Unix command socket can be used to pass commands to Suricata.
|
||||
# An external tool can then connect to get information from Suricata
|
||||
@@ -1107,6 +1079,10 @@ unix-command:
|
||||
#magic-file: /usr/share/file/magic
|
||||
#magic-file:
|
||||
|
||||
# GeoIP2 database file. Specify path and filename of GeoIP2 database
|
||||
# if using rules with "geoip" rule option.
|
||||
#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb
|
||||
|
||||
legacy:
|
||||
uricontent: enabled
|
||||
|
||||
@@ -1300,7 +1276,9 @@ flow-timeouts:
|
||||
# inline: no # stream inline mode
|
||||
# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine
|
||||
# max-synack-queued: 5 # Max different SYN/ACKs to queue
|
||||
# bypass: no # Bypass packets when stream.depth is reached
|
||||
# bypass: no # Bypass packets when stream.reassembly.depth is reached.
|
||||
# # Warning: first side to reach this triggers
|
||||
# # the bypass.
|
||||
#
|
||||
# reassembly:
|
||||
# memcap: 64mb # Can be specified in kb, mb, gb. Just a number
|
||||
@@ -1373,9 +1351,22 @@ host:
|
||||
|
||||
decoder:
|
||||
# Teredo decoder is known to not be completely accurate
|
||||
# it will sometimes detect non-teredo as teredo.
|
||||
# as it will sometimes detect non-teredo as teredo.
|
||||
teredo:
|
||||
enabled: true
|
||||
# ports to look for Teredo. Max 4 ports. If no ports are given, or
|
||||
# the value is set to 'any', Teredo detection runs on _all_ UDP packets.
|
||||
ports: $TEREDO_PORTS # syntax: '[3544, 1234]' or '3533' or 'any'.
|
||||
|
||||
# VXLAN decoder is assigned to up to 4 UDP ports. By default only the
|
||||
# IANA assigned port 4789 is enabled.
|
||||
vxlan:
|
||||
enabled: true
|
||||
ports: $VXLAN_PORTS # syntax: '8472, 4789'
|
||||
# ERSPAN Type I decode support
|
||||
erspan:
|
||||
typeI:
|
||||
enabled: false
|
||||
|
||||
|
||||
##
|
||||
@@ -1484,19 +1475,26 @@ threading:
|
||||
{%- if salt['pillar.get']('sensor:suriprocs') %}
|
||||
cpu-affinity:
|
||||
- management-cpu-set:
|
||||
cpu: [ all ] # include only these cpus in affinity settings
|
||||
cpu: [ all ] # include only these CPUs in affinity settings
|
||||
- receive-cpu-set:
|
||||
cpu: [ all ] # include only these cpus in affinity settings
|
||||
cpu: [ all ] # include only these CPUs in affinity settings
|
||||
- worker-cpu-set:
|
||||
cpu: [ "all" ]
|
||||
mode: "exclusive"
|
||||
# Use explicitely 3 threads and don't compute number by using
|
||||
# detect-thread-ratio variable:
|
||||
# threads: 3
|
||||
threads: {{ salt['pillar.get']('sensor:suriprocs') }}
|
||||
prio:
|
||||
default: "medium"
|
||||
{% endif %}
|
||||
|
||||
low: [ 0 ]
|
||||
medium: [ "1-2" ]
|
||||
high: [ 3 ]
|
||||
default: "high"
|
||||
#- verdict-cpu-set:
|
||||
# cpu: [ 0 ]
|
||||
# prio:
|
||||
# default: "high"
|
||||
{%- endif -%}
|
||||
{%- if salt['pillar.get']('sensor:suripins') %}
|
||||
cpu-affinity:
|
||||
- management-cpu-set:
|
||||
@@ -1512,6 +1510,8 @@ threading:
|
||||
prio:
|
||||
default: "high"
|
||||
{% endif %}
|
||||
|
||||
#
|
||||
# By default Suricata creates one "detect" thread per available CPU/CPU core.
|
||||
# This setting allows controlling this behaviour. A ratio setting of 2 will
|
||||
# create 2 detect threads for each CPU/CPU core. So for a dual core CPU this
|
||||
@@ -1545,7 +1545,7 @@ profiling:
|
||||
|
||||
# Profiling can be disabled here, but it will still have a
|
||||
# performance impact if compiled in.
|
||||
enabled: no
|
||||
enabled: yes
|
||||
filename: rule_perf.log
|
||||
append: yes
|
||||
|
||||
@@ -1668,7 +1668,7 @@ capture:
|
||||
|
||||
# Netmap support
|
||||
#
|
||||
# Netmap operates with NIC directly in driver, so you need FreeBSD which have
|
||||
# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which have
|
||||
# built-in netmap support or compile and install netmap module and appropriate
|
||||
# NIC driver on your Linux system.
|
||||
# To reach maximum throughput disable all receive-, segmentation-,
|
||||
@@ -1680,7 +1680,9 @@ capture:
|
||||
netmap:
|
||||
# To specify OS endpoint add plus sign at the end (e.g. "eth0+")
|
||||
- interface: eth2
|
||||
# Number of receive threads. "auto" uses number of RSS queues on interface.
|
||||
# Number of capture threads. "auto" uses number of RSS queues on interface.
|
||||
# Warning: unless the RSS hashing is symmetrical, this will lead to
|
||||
# accuracy issues.
|
||||
#threads: auto
|
||||
# You can use the following variables to activate netmap tap or IPS mode.
|
||||
# If copy-mode is set to ips or tap, the traffic coming to the current
|
||||
@@ -1793,45 +1795,63 @@ napatech:
|
||||
# (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
|
||||
# This may be enabled when sharing streams with another application.
|
||||
# Otherwise, it should be turned off.
|
||||
hba: -1
|
||||
#hba: -1
|
||||
|
||||
# use_all_streams set to "yes" will query the Napatech service for all configured
|
||||
# streams and listen on all of them. When set to "no" the streams config array
|
||||
# will be used.
|
||||
use-all-streams: yes
|
||||
# When use_all_streams is set to "yes" the initialization code will query
|
||||
# the Napatech service for all configured streams and listen on all of them.
|
||||
# When set to "no" the streams config array will be used.
|
||||
#
|
||||
# This option necessitates running the appropriate NTPL commands to create
|
||||
# the desired streams prior to running suricata.
|
||||
#use-all-streams: no
|
||||
|
||||
# The streams to listen on. This can be either:
|
||||
# a list of individual streams (e.g. streams: [0,1,2,3])
|
||||
# The streams to listen on when auto-config is disabled or when and threading
|
||||
# cpu-affinity is disabled. This can be either:
|
||||
# an individual stream (e.g. streams: [0])
|
||||
# or
|
||||
# a range of streams (e.g. streams: ["0-3"])
|
||||
#
|
||||
streams: ["0-3"]
|
||||
|
||||
# Tilera mpipe configuration. for use on Tilera TILE-Gx.
|
||||
mpipe:
|
||||
# When auto-config is enabled the streams will be created and assigned
|
||||
# automatically to the NUMA node where the thread resides. If cpu-affinity
|
||||
# is enabled in the threading section. Then the streams will be created
|
||||
# according to the number of worker threads specified in the worker cpu set.
|
||||
# Otherwise, the streams array is used to define the streams.
|
||||
#
|
||||
# This option cannot be used simultaneous with "use-all-streams".
|
||||
#
|
||||
auto-config: yes
|
||||
|
||||
# Load balancing modes: "static", "dynamic", "sticky", or "round-robin".
|
||||
load-balance: dynamic
|
||||
# Ports indicates which napatech ports are to be used in auto-config mode.
|
||||
# these are the port ID's of the ports that will be merged prior to the
|
||||
# traffic being distributed to the streams.
|
||||
#
|
||||
# This can be specified in any of the following ways:
|
||||
#
|
||||
# a list of individual ports (e.g. ports: [0,1,2,3])
|
||||
#
|
||||
# a range of ports (e.g. ports: [0-3])
|
||||
#
|
||||
# "all" to indicate that all ports are to be merged together
|
||||
# (e.g. ports: [all])
|
||||
#
|
||||
# This has no effect if auto-config is disabled.
|
||||
#
|
||||
ports: [all]
|
||||
|
||||
# Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536
|
||||
iqueue-packets: 2048
|
||||
|
||||
# List of interfaces we will listen on.
|
||||
inputs:
|
||||
- interface: xgbe2
|
||||
- interface: xgbe3
|
||||
- interface: xgbe4
|
||||
|
||||
|
||||
# Relative weight of memory for packets of each mPipe buffer size.
|
||||
stack:
|
||||
size128: 0
|
||||
size256: 9
|
||||
size512: 0
|
||||
size1024: 0
|
||||
size1664: 7
|
||||
size4096: 0
|
||||
size10386: 0
|
||||
size16384: 0
|
||||
# When auto-config is enabled the hashmode specifies the algorithm for
|
||||
# determining to which stream a given packet is to be delivered.
|
||||
# This can be any valid Napatech NTPL hashmode command.
|
||||
#
|
||||
# The most common hashmode commands are: hash2tuple, hash2tuplesorted,
|
||||
# hash5tuple, hash5tuplesorted and roundrobin.
|
||||
#
|
||||
# See Napatech NTPL documentation other hashmodes and details on their use.
|
||||
#
|
||||
# This has no effect if auto-config is disabled.
|
||||
#
|
||||
hashmode: hash5tuplesorted
|
||||
|
||||
##
|
||||
## Configure Suricata to load Suricata-Update managed rules.
|
||||
@@ -1841,77 +1861,9 @@ mpipe:
|
||||
##
|
||||
|
||||
default-rule-path: /etc/suricata/rules
|
||||
|
||||
rule-files:
|
||||
- all.rules
|
||||
|
||||
##
|
||||
## Advanced rule file configuration.
|
||||
##
|
||||
## If this section is completely commented out then your configuration
|
||||
## is setup for suricata-update as it was most likely bundled and
|
||||
## installed with Suricata.
|
||||
##
|
||||
|
||||
#default-rule-path: /var/lib/suricata/rules
|
||||
|
||||
#rule-files:
|
||||
# - botcc.rules
|
||||
# # - botcc.portgrouped.rules
|
||||
# - ciarmy.rules
|
||||
# - compromised.rules
|
||||
# - drop.rules
|
||||
# - dshield.rules
|
||||
## - emerging-activex.rules
|
||||
# - emerging-attack_response.rules
|
||||
# - emerging-chat.rules
|
||||
# - emerging-current_events.rules
|
||||
# - emerging-dns.rules
|
||||
# - emerging-dos.rules
|
||||
# - emerging-exploit.rules
|
||||
# - emerging-ftp.rules
|
||||
## - emerging-games.rules
|
||||
## - emerging-icmp_info.rules
|
||||
## - emerging-icmp.rules
|
||||
# - emerging-imap.rules
|
||||
## - emerging-inappropriate.rules
|
||||
## - emerging-info.rules
|
||||
# - emerging-malware.rules
|
||||
# - emerging-misc.rules
|
||||
# - emerging-mobile_malware.rules
|
||||
# - emerging-netbios.rules
|
||||
# - emerging-p2p.rules
|
||||
# - emerging-policy.rules
|
||||
# - emerging-pop3.rules
|
||||
# - emerging-rpc.rules
|
||||
## - emerging-scada.rules
|
||||
## - emerging-scada_special.rules
|
||||
# - emerging-scan.rules
|
||||
## - emerging-shellcode.rules
|
||||
# - emerging-smtp.rules
|
||||
# - emerging-snmp.rules
|
||||
# - emerging-sql.rules
|
||||
# - emerging-telnet.rules
|
||||
# - emerging-tftp.rules
|
||||
# - emerging-trojan.rules
|
||||
# - emerging-user_agents.rules
|
||||
# - emerging-voip.rules
|
||||
# - emerging-web_client.rules
|
||||
# - emerging-web_server.rules
|
||||
## - emerging-web_specific_apps.rules
|
||||
# - emerging-worm.rules
|
||||
# - tor.rules
|
||||
## - decoder-events.rules # available in suricata sources under rules dir
|
||||
## - stream-events.rules # available in suricata sources under rules dir
|
||||
# - http-events.rules # available in suricata sources under rules dir
|
||||
# - smtp-events.rules # available in suricata sources under rules dir
|
||||
# - dns-events.rules # available in suricata sources under rules dir
|
||||
# - tls-events.rules # available in suricata sources under rules dir
|
||||
## - modbus-events.rules # available in suricata sources under rules dir
|
||||
## - app-layer-events.rules # available in suricata sources under rules dir
|
||||
## - dnp3-events.rules # available in suricata sources under rules dir
|
||||
## - ntp-events.rules # available in suricata sources under rules dir
|
||||
## - ipsec-events.rules # available in suricata sources under rules dir
|
||||
## - kerberos-events.rules # available in suricata sources under rules dir
|
||||
- all.rules
|
||||
|
||||
##
|
||||
## Auxiliary configuration files.
|
||||
|
||||
@@ -55,6 +55,12 @@ surilogdir:
|
||||
- user: 940
|
||||
- group: 939
|
||||
|
||||
suridatadir:
|
||||
file.directory:
|
||||
- name: /nsm/suricata
|
||||
- user: 940
|
||||
- group: 939
|
||||
|
||||
surirulesync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/suricata/rules/
|
||||
@@ -119,6 +125,7 @@ so-suricata:
|
||||
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
|
||||
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
|
||||
- /opt/so/log/suricata/:/var/log/suricata/:rw
|
||||
- /nsm/suricata/:/nsm/:rw
|
||||
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
|
||||
- network_mode: host
|
||||
- watch:
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
surilocaldir:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack/local/salt/suricata
|
||||
- user: socore
|
||||
- group: socore
|
||||
- makedirs: True
|
||||
|
||||
ruleslink:
|
||||
file.symlink:
|
||||
- name: /opt/so/saltstack/local/salt/suricata/rules
|
||||
- user: socore
|
||||
- group: socore
|
||||
- target: /opt/so/rules/nids
|
||||
|
||||
refresh_salt_master_fileserver_suricata_ruleslink:
|
||||
salt.runner:
|
||||
- name: fileserver.update
|
||||
- onchanges:
|
||||
- file: ruleslink
|
||||
@@ -12,7 +12,7 @@ search {
|
||||
# Name of the index
|
||||
index = the_hive
|
||||
# Name of the Elasticsearch cluster
|
||||
cluster = hive
|
||||
cluster = thehive
|
||||
# Address of the Elasticsearch instance
|
||||
host = ["{{ MASTERIP }}:9500"]
|
||||
#search.uri = "http://{{ MASTERIP }}:9500"
|
||||
+1
-1
@@ -12,7 +12,7 @@ search {
|
||||
# Name of the index
|
||||
index = cortex
|
||||
# Name of the Elasticsearch cluster
|
||||
cluster = hive
|
||||
cluster = thehive
|
||||
# Address of the Elasticsearch instance
|
||||
host = ["{{ MASTERIP }}:9500"]
|
||||
# Scroll keepalive
|
||||
@@ -1,4 +1,4 @@
|
||||
cluster.name: "hive"
|
||||
cluster.name: "thehive"
|
||||
network.host: 0.0.0.0
|
||||
discovery.zen.minimum_master_nodes: 1
|
||||
# This is a test -- if this is here, then the volume is mounted correctly.
|
||||
@@ -1,24 +1,24 @@
|
||||
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
hiveconfdir:
|
||||
thehiveconfdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/hive/etc
|
||||
- name: /opt/so/conf/thehive/etc
|
||||
- makedirs: True
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
hivelogdir:
|
||||
thehivelogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/hive
|
||||
- name: /opt/so/log/thehive
|
||||
- makedirs: True
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
hiveconf:
|
||||
thehiveconf:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/hive/etc
|
||||
- source: salt://hive/thehive/etc
|
||||
- name: /opt/so/conf/thehive/etc
|
||||
- source: salt://thehive/etc
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
@@ -40,7 +40,7 @@ cortexlogdir:
|
||||
cortexconf:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/cortex
|
||||
- source: salt://hive/thehive/etc
|
||||
- source: salt://thehive/etc
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
@@ -48,9 +48,9 @@ cortexconf:
|
||||
# Install Elasticsearch
|
||||
|
||||
# Made directory for ES data to live in
|
||||
hiveesdata:
|
||||
thehiveesdata:
|
||||
file.directory:
|
||||
- name: /nsm/hive/esdata
|
||||
- name: /nsm/thehive/esdata
|
||||
- makedirs: True
|
||||
- user: 939
|
||||
- group: 939
|
||||
@@ -64,16 +64,16 @@ so-thehive-es:
|
||||
- interactive: True
|
||||
- tty: True
|
||||
- binds:
|
||||
- /nsm/hive/esdata:/usr/share/elasticsearch/data:rw
|
||||
- /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
|
||||
- /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
|
||||
- /opt/so/log/hive:/var/log/elasticsearch:rw
|
||||
- /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw
|
||||
- /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
|
||||
- /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
|
||||
- /opt/so/log/thehive:/var/log/elasticsearch:rw
|
||||
- environment:
|
||||
- http.host=0.0.0.0
|
||||
- http.port=9400
|
||||
- transport.tcp.port=9500
|
||||
- transport.host=0.0.0.0
|
||||
- cluster.name=hive
|
||||
- cluster.name=thehive
|
||||
- thread_pool.index.queue_size=100000
|
||||
- thread_pool.search.queue_size=100000
|
||||
- thread_pool.bulk.queue_size=100000
|
||||
@@ -90,13 +90,13 @@ so-cortex:
|
||||
- name: so-cortex
|
||||
- user: 939
|
||||
- binds:
|
||||
- /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
|
||||
- /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
|
||||
- port_bindings:
|
||||
- 0.0.0.0:9001:9001
|
||||
|
||||
cortexscript:
|
||||
cmd.script:
|
||||
- source: salt://hive/thehive/scripts/cortex_init
|
||||
- source: salt://thehive/scripts/cortex_init
|
||||
- cwd: /opt/so
|
||||
- template: jinja
|
||||
|
||||
@@ -109,12 +109,12 @@ so-thehive:
|
||||
- name: so-thehive
|
||||
- user: 939
|
||||
- binds:
|
||||
- /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro
|
||||
- /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
|
||||
- port_bindings:
|
||||
- 0.0.0.0:9000:9000
|
||||
|
||||
hivescript:
|
||||
thehivescript:
|
||||
cmd.script:
|
||||
- source: salt://hive/thehive/scripts/hive_init
|
||||
- source: salt://thehive/scripts/hive_init
|
||||
- cwd: /opt/so
|
||||
- template: jinja
|
||||
@@ -7,6 +7,8 @@
|
||||
{%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %}
|
||||
{%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
|
||||
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
|
||||
cortex_init(){
|
||||
sleep 60
|
||||
CORTEX_IP="{{MASTERIP}}"
|
||||
@@ -17,7 +19,7 @@ cortex_init(){
|
||||
CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup"
|
||||
CORTEX_ORG_USER="{{CORTEXORGUSER}}"
|
||||
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
|
||||
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
|
||||
SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf"
|
||||
|
||||
|
||||
# Migrate DB
|
||||
Executable
+64
@@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||
{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
|
||||
{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
|
||||
{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
|
||||
|
||||
thehive_init(){
|
||||
sleep 120
|
||||
THEHIVE_IP="{{MASTERIP}}"
|
||||
THEHIVE_USER="{{THEHIVEUSER}}"
|
||||
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
|
||||
THEHIVE_KEY="{{THEHIVEKEY}}"
|
||||
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
|
||||
|
||||
echo -n "Waiting for TheHive..."
|
||||
COUNT=0
|
||||
THEHIVE_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive"
|
||||
if [ $? -eq 0 ]; then
|
||||
THEHIVE_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||
|
||||
# Migrate DB
|
||||
curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
|
||||
|
||||
# Create intial TheHive user
|
||||
curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
|
||||
|
||||
# Pre-load custom fields
|
||||
#
|
||||
# reputation
|
||||
curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
|
||||
|
||||
|
||||
touch /opt/so/state/thehive.txt
|
||||
else
|
||||
echo "We experienced an issue connecting to TheHive!"
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -f /opt/so/state/thehive.txt ]; then
|
||||
exit 0
|
||||
else
|
||||
rm -f garbage_file
|
||||
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
|
||||
do
|
||||
echo "Waiting for Elasticsearch..."
|
||||
rm -f garbage_file
|
||||
sleep 1
|
||||
done
|
||||
rm -f garbage_file
|
||||
sleep 5
|
||||
thehive_init
|
||||
fi
|
||||
+13
-4
@@ -30,6 +30,7 @@ base:
|
||||
- telegraf
|
||||
- firewall
|
||||
- idstools
|
||||
- suricata.master
|
||||
- pcap
|
||||
- suricata
|
||||
- zeek
|
||||
@@ -73,6 +74,7 @@ base:
|
||||
- soc
|
||||
- firewall
|
||||
- idstools
|
||||
- suricata.master
|
||||
- healthcheck
|
||||
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
||||
- mysql
|
||||
@@ -100,7 +102,7 @@ base:
|
||||
- schedule
|
||||
- soctopus
|
||||
{%- if THEHIVE != 0 %}
|
||||
- hive
|
||||
- thehive
|
||||
{%- endif %}
|
||||
{%- if PLAYBOOK != 0 %}
|
||||
- playbook
|
||||
@@ -129,6 +131,7 @@ base:
|
||||
- firewall
|
||||
- master
|
||||
- idstools
|
||||
- suricata.master
|
||||
- redis
|
||||
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
||||
- mysql
|
||||
@@ -149,11 +152,14 @@ base:
|
||||
{%- endif %}
|
||||
- soctopus
|
||||
{%- if THEHIVE != 0 %}
|
||||
- hive
|
||||
- thehive
|
||||
{%- endif %}
|
||||
{%- if PLAYBOOK != 0 %}
|
||||
- playbook
|
||||
{%- endif %}
|
||||
{%- if NAVIGATOR != 0 %}
|
||||
- navigator
|
||||
{%- endif %}
|
||||
{%- if FREQSERVER != 0 %}
|
||||
- freqserver
|
||||
{%- endif %}
|
||||
@@ -174,6 +180,7 @@ base:
|
||||
- soc
|
||||
- firewall
|
||||
- idstools
|
||||
- suricata.master
|
||||
- healthcheck
|
||||
- redis
|
||||
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
||||
@@ -203,7 +210,7 @@ base:
|
||||
- schedule
|
||||
- soctopus
|
||||
{%- if THEHIVE != 0 %}
|
||||
- hive
|
||||
- thehive
|
||||
{%- endif %}
|
||||
{%- if PLAYBOOK != 0 %}
|
||||
- playbook
|
||||
@@ -256,6 +263,7 @@ base:
|
||||
- ca
|
||||
- ssl
|
||||
- common
|
||||
- nginx
|
||||
- telegraf
|
||||
- firewall
|
||||
{%- if WAZUH != 0 %}
|
||||
@@ -297,6 +305,7 @@ base:
|
||||
- firewall
|
||||
- master
|
||||
- idstools
|
||||
- suricata.master
|
||||
- redis
|
||||
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
||||
- mysql
|
||||
@@ -318,7 +327,7 @@ base:
|
||||
{%- endif %}
|
||||
- soctopus
|
||||
{%- if THEHIVE != 0 %}
|
||||
- hive
|
||||
- thehive
|
||||
{%- endif %}
|
||||
{%- if PLAYBOOK != 0 %}
|
||||
- playbook
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||
{%- set WAZUH_ENABLED = salt['pillar.get']('static:wazuh', '0') %}
|
||||
#!/bin/bash
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
@@ -17,7 +19,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Check if Wazuh enabled
|
||||
if grep -q -R "wazuh: 1" /opt/so/saltstack/pillar/*; then
|
||||
if [ {{ WAZUH_ENABLED }} ]; then
|
||||
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
|
||||
if ! grep -q "<white_list>{{ MASTERIP }}</white_list>" $WAZUH_MGR_CFG ; then
|
||||
DATE=`date`
|
||||
|
||||
+5
-5
@@ -80,11 +80,6 @@ wazuhmgrwhitelist:
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
|
||||
wazuhagentservice:
|
||||
service.running:
|
||||
- name: wazuh-agent
|
||||
- enable: True
|
||||
|
||||
so-wazuh:
|
||||
docker_container.running:
|
||||
- image: {{ MASTER }}:5000/soshybridhunter/so-wazuh:{{ VERSION }}
|
||||
@@ -110,3 +105,8 @@ whitelistmanager:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/wazuh-manager-whitelist
|
||||
- cwd: /
|
||||
|
||||
wazuhagentservice:
|
||||
service.running:
|
||||
- name: wazuh-agent
|
||||
- enable: True
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
/usr/bin/docker exec so-zeek /opt/zeek/bin/zeekctl netstats | awk '{print $(NF-2),$(NF-1),$NF}' | awk -F '[ =]' '{RCVD += $2;DRP += $4;TTL += $6} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/zeek/logs/packetloss.log
|
||||
/usr/bin/docker exec so-zeek /opt/zeek/bin/zeekctl netstats | awk '{print $(NF-2),$(NF-1),$NF}' | awk -F '[ =]' '{RCVD += $2;DRP += $4;TTL += $6} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/zeek/logs/packetloss.log 2>&1
|
||||
|
||||
Reference in New Issue
Block a user