mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge remote-tracking branch 'remotes/origin/dev' into feature/fleet-setup
This commit is contained in:
@@ -37,7 +37,9 @@ log_file: /opt/so/log/salt/master
|
|||||||
#
|
#
|
||||||
file_roots:
|
file_roots:
|
||||||
base:
|
base:
|
||||||
- /opt/so/saltstack/salt
|
- /opt/so/saltstack/local/salt
|
||||||
|
- /opt/so/saltstack/default/salt
|
||||||
|
|
||||||
|
|
||||||
# The master_roots setting configures a master-only copy of the file_roots dictionary,
|
# The master_roots setting configures a master-only copy of the file_roots dictionary,
|
||||||
# used by the state compiler.
|
# used by the state compiler.
|
||||||
@@ -53,7 +55,8 @@ file_roots:
|
|||||||
|
|
||||||
pillar_roots:
|
pillar_roots:
|
||||||
base:
|
base:
|
||||||
- /opt/so/saltstack/pillar
|
- /opt/so/saltstack/local/pillar
|
||||||
|
- /opt/so/saltstack/default/pillar
|
||||||
|
|
||||||
peer:
|
peer:
|
||||||
.*:
|
.*:
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# This script adds sensors/nodes/etc to the nodes tab
|
# This script adds sensors/nodes/etc to the nodes tab
|
||||||
|
default_salt_dir=/opt/so/saltstack/default
|
||||||
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
TYPE=$1
|
TYPE=$1
|
||||||
NAME=$2
|
NAME=$2
|
||||||
IPADDRESS=$3
|
IPADDRESS=$3
|
||||||
@@ -15,7 +16,7 @@ MONINT=$9
|
|||||||
#HOTNAME=$11
|
#HOTNAME=$11
|
||||||
|
|
||||||
echo "Seeing if this host is already in here. If so delete it"
|
echo "Seeing if this host is already in here. If so delete it"
|
||||||
if grep -q $NAME "/opt/so/saltstack/pillar/data/$TYPE.sls"; then
|
if grep -q $NAME "$local_salt_dir/pillar/data/$TYPE.sls"; then
|
||||||
echo "Node Already Present - Let's re-add it"
|
echo "Node Already Present - Let's re-add it"
|
||||||
awk -v blah=" $NAME:" 'BEGIN{ print_flag=1 }
|
awk -v blah=" $NAME:" 'BEGIN{ print_flag=1 }
|
||||||
{
|
{
|
||||||
@@ -31,27 +32,29 @@ if grep -q $NAME "/opt/so/saltstack/pillar/data/$TYPE.sls"; then
|
|||||||
if ( print_flag == 1 )
|
if ( print_flag == 1 )
|
||||||
print $0
|
print $0
|
||||||
|
|
||||||
} ' /opt/so/saltstack/pillar/data/$TYPE.sls > /opt/so/saltstack/pillar/data/tmp.$TYPE.sls
|
} ' $local_salt_dir/pillar/data/$TYPE.sls > $local_salt_dir/pillar/data/tmp.$TYPE.sls
|
||||||
mv /opt/so/saltstack/pillar/data/tmp.$TYPE.sls /opt/so/saltstack/pillar/data/$TYPE.sls
|
mv $local_salt_dir/pillar/data/tmp.$TYPE.sls $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
echo "Deleted $NAME from the tab. Now adding it in again with updated info"
|
echo "Deleted $NAME from the tab. Now adding it in again with updated info"
|
||||||
fi
|
fi
|
||||||
echo " $NAME:" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
echo " $NAME:" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
echo " ip: $IPADDRESS" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
echo " ip: $IPADDRESS" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
echo " manint: $MANINT" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
echo " manint: $MANINT" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
echo " totalcpus: $CPUS" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
echo " totalcpus: $CPUS" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
echo " guid: $GUID" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
echo " guid: $GUID" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
echo " rootfs: $ROOTFS" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
echo " rootfs: $ROOTFS" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
echo " nsmfs: $NSM" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
echo " nsmfs: $NSM" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
if [ $TYPE == 'sensorstab' ]; then
|
if [ $TYPE == 'sensorstab' ]; then
|
||||||
echo " monint: $MONINT" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
salt-call state.apply common queue=True
|
salt-call state.apply grafana queue=True
|
||||||
fi
|
fi
|
||||||
if [ $TYPE == 'evaltab' ]; then
|
if [ $TYPE == 'evaltab' ]; then
|
||||||
echo " monint: $MONINT" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
salt-call state.apply common queue=True
|
if [ ! $10 ]; then
|
||||||
|
salt-call state.apply grafana queue=True
|
||||||
salt-call state.apply utility queue=True
|
salt-call state.apply utility queue=True
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
#if [ $TYPE == 'nodestab' ]; then
|
#if [ $TYPE == 'nodestab' ]; then
|
||||||
# echo " nodetype: $NODETYPE" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
# echo " nodetype: $NODETYPE" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
# echo " hotname: $HOTNAME" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
# echo " hotname: $HOTNAME" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||||
#fi
|
#fi
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
evaltab:
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
mastersearchtab:
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
mastertab:
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
nodestab:
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
sensorstab:
|
|
||||||
@@ -1,13 +1,13 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# This script adds ip addresses to specific rule sets defined by the user
|
# This script adds ip addresses to specific rule sets defined by the user
|
||||||
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
POLICY=$1
|
POLICY=$1
|
||||||
IPADDRESS=$2
|
IPADDRESS=$2
|
||||||
|
|
||||||
if grep -q $2 "/opt/so/saltstack/pillar/firewall/$1.sls"; then
|
if grep -q $2 "$local_salt_dir/pillar/firewall/$1.sls"; then
|
||||||
echo "Firewall Rule Already There"
|
echo "Firewall Rule Already There"
|
||||||
else
|
else
|
||||||
echo " - $2" >> /opt/so/saltstack/pillar/firewall/$1.sls
|
echo " - $2" >> $local_salt_dir/pillar/firewall/$1.sls
|
||||||
salt-call state.apply firewall queue=True
|
salt-call state.apply firewall queue=True
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -2,5 +2,6 @@ logstash:
|
|||||||
pipelines:
|
pipelines:
|
||||||
master:
|
master:
|
||||||
config:
|
config:
|
||||||
|
- so/0009_input_beats.conf
|
||||||
- so/0010_input_hhbeats.conf
|
- so/0010_input_hhbeats.conf
|
||||||
- so/9999_output_redis.conf.jinja
|
- so/9999_output_redis.conf.jinja
|
||||||
|
|||||||
@@ -5,12 +5,12 @@ logstash:
|
|||||||
- so/0900_input_redis.conf.jinja
|
- so/0900_input_redis.conf.jinja
|
||||||
- so/9000_output_zeek.conf.jinja
|
- so/9000_output_zeek.conf.jinja
|
||||||
- so/9002_output_import.conf.jinja
|
- so/9002_output_import.conf.jinja
|
||||||
|
- so/9034_output_syslog.conf.jinja
|
||||||
- so/9100_output_osquery.conf.jinja
|
- so/9100_output_osquery.conf.jinja
|
||||||
- so/9400_output_suricata.conf.jinja
|
- so/9400_output_suricata.conf.jinja
|
||||||
- so/9500_output_beats.conf.jinja
|
- so/9500_output_beats.conf.jinja
|
||||||
- so/9600_output_ossec.conf.jinja
|
- so/9600_output_ossec.conf.jinja
|
||||||
- so/9700_output_strelka.conf.jinja
|
- so/9700_output_strelka.conf.jinja
|
||||||
templates:
|
templates:
|
||||||
- so/so-beats-template.json
|
|
||||||
- so/so-common-template.json
|
- so/so-common-template.json
|
||||||
- so/so-zeek-template.json
|
- so/so-zeek-template.json
|
||||||
|
|||||||
@@ -62,6 +62,7 @@ commonpkgs:
|
|||||||
- python3-dateutil
|
- python3-dateutil
|
||||||
- python3-m2crypto
|
- python3-m2crypto
|
||||||
- python3-mysqldb
|
- python3-mysqldb
|
||||||
|
- git
|
||||||
heldpackages:
|
heldpackages:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
@@ -96,12 +97,13 @@ commonpkgs:
|
|||||||
- device-mapper-persistent-data
|
- device-mapper-persistent-data
|
||||||
- lvm2
|
- lvm2
|
||||||
- openssl
|
- openssl
|
||||||
|
- git
|
||||||
|
|
||||||
heldpackages:
|
heldpackages:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- pkgs:
|
- pkgs:
|
||||||
- containerd.io: 1.2.13-3.2.el7
|
- containerd.io: 1.2.13-3.2.el7
|
||||||
- docker-ce: 3:19.03.9-3.el7
|
- docker-ce: 3:19.03.11-3.el7
|
||||||
- hold: True
|
- hold: True
|
||||||
- update_holds: True
|
- update_holds: True
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -17,6 +17,9 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
default_salt_dir=/opt/so/saltstack/default
|
||||||
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
|
|
||||||
SKIP=0
|
SKIP=0
|
||||||
|
|
||||||
while getopts "abowi:" OPTION
|
while getopts "abowi:" OPTION
|
||||||
@@ -80,10 +83,10 @@ if [ "$SKIP" -eq 0 ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Adding $IP to the $FULLROLE role. This can take a few seconds"
|
echo "Adding $IP to the $FULLROLE role. This can take a few seconds"
|
||||||
/opt/so/saltstack/pillar/firewall/addfirewall.sh $FULLROLE $IP
|
$default_salt_dir/pillar/firewall/addfirewall.sh $FULLROLE $IP
|
||||||
|
|
||||||
# Check if Wazuh enabled
|
# Check if Wazuh enabled
|
||||||
if grep -q -R "wazuh: 1" /opt/so/saltstack/pillar/*; then
|
if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then
|
||||||
# If analyst, add to Wazuh AR whitelist
|
# If analyst, add to Wazuh AR whitelist
|
||||||
if [ "$FULLROLE" == "analyst" ]; then
|
if [ "$FULLROLE" == "analyst" ]; then
|
||||||
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
|
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
|
|
||||||
bro_logs_enabled() {
|
bro_logs_enabled() {
|
||||||
|
|
||||||
echo "brologs:" > /opt/so/saltstack/pillar/brologs.sls
|
echo "brologs:" > $local_salt_dir/pillar/brologs.sls
|
||||||
echo " enabled:" >> /opt/so/saltstack/pillar/brologs.sls
|
echo " enabled:" >> $local_salt_dir/pillar/brologs.sls
|
||||||
for BLOG in ${BLOGS[@]}; do
|
for BLOG in ${BLOGS[@]}; do
|
||||||
echo " - $BLOG" | tr -d '"' >> /opt/so/saltstack/pillar/brologs.sls
|
echo " - $BLOG" | tr -d '"' >> $local_salt_dir/pillar/brologs.sls
|
||||||
done
|
done
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
@@ -17,4 +17,5 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
/usr/sbin/so-restart cortex $1
|
/usr/sbin/so-stop cortex $1
|
||||||
|
/usr/sbin/so-start thehive $1
|
||||||
|
|||||||
@@ -17,4 +17,4 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
/usr/sbin/so-start cortex $1
|
/usr/sbin/so-start thehive $1
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
112
salt/common/tools/sbin/so-docker-refresh
Normal file
112
salt/common/tools/sbin/so-docker-refresh
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
got_root(){
|
||||||
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
|
echo "This script must be run using sudo!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
master_check() {
|
||||||
|
# Check to see if this is a master
|
||||||
|
MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
|
||||||
|
if [ $MASTERCHECK == 'so-eval' ] || [ $MASTERCHECK == 'so-master' ] || [ $MASTERCHECK == 'so-mastersearch' ] || [ $MASTERCHECK == 'so-standalone' ] || [ $MASTERCHECK == 'so-helix' ]; then
|
||||||
|
echo "This is a master. We can proceed"
|
||||||
|
else
|
||||||
|
echo "Please run soup on the master. The master controls all updates."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
update_docker_containers() {
|
||||||
|
|
||||||
|
# Download the containers from the interwebs
|
||||||
|
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||||
|
do
|
||||||
|
# Pull down the trusted docker image
|
||||||
|
echo "Downloading $i"
|
||||||
|
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
|
||||||
|
# Tag it with the new registry destination
|
||||||
|
docker tag soshybridhunter/$i $HOSTNAME:5000/soshybridhunter/$i
|
||||||
|
docker push $HOSTNAME:5000/soshybridhunter/$i
|
||||||
|
done
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
version_check() {
|
||||||
|
if [ -f /etc/soversion ]; then
|
||||||
|
VERSION=$(cat /etc/soversion)
|
||||||
|
else
|
||||||
|
echo "Unable to detect version. I will now terminate."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
got_root
|
||||||
|
master_check
|
||||||
|
version_check
|
||||||
|
|
||||||
|
# Use the hostname
|
||||||
|
HOSTNAME=$(hostname)
|
||||||
|
BUILD=HH
|
||||||
|
# List all the containers
|
||||||
|
if [ $MASTERCHECK != 'so-helix' ]; then
|
||||||
|
TRUSTED_CONTAINERS=( \
|
||||||
|
"so-acng:$BUILD$VERSION" \
|
||||||
|
"so-thehive-cortex:$BUILD$VERSION" \
|
||||||
|
"so-curator:$BUILD$VERSION" \
|
||||||
|
"so-domainstats:$BUILD$VERSION" \
|
||||||
|
"so-elastalert:$BUILD$VERSION" \
|
||||||
|
"so-elasticsearch:$BUILD$VERSION" \
|
||||||
|
"so-filebeat:$BUILD$VERSION" \
|
||||||
|
"so-fleet:$BUILD$VERSION" \
|
||||||
|
"so-fleet-launcher:$BUILD$VERSION" \
|
||||||
|
"so-freqserver:$BUILD$VERSION" \
|
||||||
|
"so-grafana:$BUILD$VERSION" \
|
||||||
|
"so-idstools:$BUILD$VERSION" \
|
||||||
|
"so-influxdb:$BUILD$VERSION" \
|
||||||
|
"so-kibana:$BUILD$VERSION" \
|
||||||
|
"so-kratos:$BUILD$VERSION" \
|
||||||
|
"so-logstash:$BUILD$VERSION" \
|
||||||
|
"so-mysql:$BUILD$VERSION" \
|
||||||
|
"so-navigator:$BUILD$VERSION" \
|
||||||
|
"so-nginx:$BUILD$VERSION" \
|
||||||
|
"so-playbook:$BUILD$VERSION" \
|
||||||
|
"so-redis:$BUILD$VERSION" \
|
||||||
|
"so-soc:$BUILD$VERSION" \
|
||||||
|
"so-soctopus:$BUILD$VERSION" \
|
||||||
|
"so-steno:$BUILD$VERSION" \
|
||||||
|
"so-strelka:$BUILD$VERSION" \
|
||||||
|
"so-suricata:$BUILD$VERSION" \
|
||||||
|
"so-telegraf:$BUILD$VERSION" \
|
||||||
|
"so-thehive:$BUILD$VERSION" \
|
||||||
|
"so-thehive-es:$BUILD$VERSION" \
|
||||||
|
"so-wazuh:$BUILD$VERSION" \
|
||||||
|
"so-zeek:$BUILD$VERSION" )
|
||||||
|
else
|
||||||
|
TRUSTED_CONTAINERS=( \
|
||||||
|
"so-filebeat:$BUILD$VERSION" \
|
||||||
|
"so-idstools:$BUILD$VERSION" \
|
||||||
|
"so-logstash:$BUILD$VERSION" \
|
||||||
|
"so-nginx:$BUILD$VERSION" \
|
||||||
|
"so-redis:$BUILD$VERSION" \
|
||||||
|
"so-steno:$BUILD$VERSION" \
|
||||||
|
"so-suricata:$BUILD$VERSION" \
|
||||||
|
"so-telegraf:$BUILD$VERSION" \
|
||||||
|
"so-zeek:$BUILD$VERSION" )
|
||||||
|
fi
|
||||||
|
|
||||||
|
update_docker_containers
|
||||||
@@ -15,12 +15,13 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
default_salt_dir=/opt/so/saltstack/default
|
||||||
ELASTICSEARCH_HOST="{{ MASTERIP}}"
|
ELASTICSEARCH_HOST="{{ MASTERIP}}"
|
||||||
ELASTICSEARCH_PORT=9200
|
ELASTICSEARCH_PORT=9200
|
||||||
#ELASTICSEARCH_AUTH=""
|
#ELASTICSEARCH_AUTH=""
|
||||||
|
|
||||||
# Define a default directory to load pipelines from
|
# Define a default directory to load pipelines from
|
||||||
ELASTICSEARCH_TEMPLATES="/opt/so/saltstack/salt/logstash/pipelines/templates/so/"
|
ELASTICSEARCH_TEMPLATES="$default_salt_dir/salt/logstash/pipelines/templates/so/"
|
||||||
|
|
||||||
# Wait for ElasticSearch to initialize
|
# Wait for ElasticSearch to initialize
|
||||||
echo -n "Waiting for ElasticSearch..."
|
echo -n "Waiting for ElasticSearch..."
|
||||||
|
|||||||
@@ -15,10 +15,11 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
|
|
||||||
VERSION=$(grep soversion /opt/so/saltstack/pillar/static.sls | cut -d':' -f2|sed 's/ //g')
|
VERSION=$(grep soversion $local_salt_dir/pillar/static.sls | cut -d':' -f2|sed 's/ //g')
|
||||||
# Modify static.sls to enable Features
|
# Modify static.sls to enable Features
|
||||||
sed -i 's/features: False/features: True/' /opt/so/saltstack/pillar/static.sls
|
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/static.sls
|
||||||
SUFFIX="-features"
|
SUFFIX="-features"
|
||||||
TRUSTED_CONTAINERS=( \
|
TRUSTED_CONTAINERS=( \
|
||||||
"so-elasticsearch:$VERSION$SUFFIX" \
|
"so-elasticsearch:$VERSION$SUFFIX" \
|
||||||
|
|||||||
@@ -1,4 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
|
|
||||||
got_root() {
|
got_root() {
|
||||||
|
|
||||||
# Make sure you are root
|
# Make sure you are root
|
||||||
@@ -10,13 +13,13 @@ got_root() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
got_root
|
got_root
|
||||||
if [ ! -f /opt/so/saltstack/pillar/fireeye/init.sls ]; then
|
if [ ! -f $local_salt_dir/pillar/fireeye/init.sls ]; then
|
||||||
echo "This is nto configured for Helix Mode. Please re-install."
|
echo "This is nto configured for Helix Mode. Please re-install."
|
||||||
exit
|
exit
|
||||||
else
|
else
|
||||||
echo "Enter your Helix API Key: "
|
echo "Enter your Helix API Key: "
|
||||||
read APIKEY
|
read APIKEY
|
||||||
sed -i "s/^ api_key.*/ api_key: $APIKEY/g" /opt/so/saltstack/pillar/fireeye/init.sls
|
sed -i "s/^ api_key.*/ api_key: $APIKEY/g" $local_salt_dir/pillar/fireeye/init.sls
|
||||||
docker stop so-logstash
|
docker stop so-logstash
|
||||||
docker rm so-logstash
|
docker rm so-logstash
|
||||||
echo "Restarting Logstash for updated key"
|
echo "Restarting Logstash for updated key"
|
||||||
|
|||||||
57
salt/common/tools/sbin/so-saltstack-update
Normal file
57
salt/common/tools/sbin/so-saltstack-update
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
default_salt_dir=/opt/so/saltstack/default
|
||||||
|
clone_to_tmp() {
|
||||||
|
|
||||||
|
# TODO Need to add a air gap option
|
||||||
|
# Make a temp location for the files
|
||||||
|
mkdir /tmp/sogh
|
||||||
|
cd /tmp/sogh
|
||||||
|
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
|
||||||
|
git clone https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
|
||||||
|
cd /tmp
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
copy_new_files() {
|
||||||
|
|
||||||
|
# Copy new files over to the salt dir
|
||||||
|
cd /tmp/sogh/securityonion-saltstack
|
||||||
|
git checkout $BRANCH
|
||||||
|
rsync -a --exclude-from 'exclude-list.txt' salt $default_salt_dir/
|
||||||
|
rsync -a --exclude-from 'exclude-list.txt' pillar $default_salt_dir/
|
||||||
|
chown -R socore:socore $default_salt_dir/salt
|
||||||
|
chown -R socore:socore $default_salt_dir/pillar
|
||||||
|
chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh
|
||||||
|
rm -rf /tmp/sogh
|
||||||
|
}
|
||||||
|
|
||||||
|
got_root(){
|
||||||
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
|
echo "This script must be run using sudo!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
got_root
|
||||||
|
if [ $# -ne 1 ] ; then
|
||||||
|
BRANCH=master
|
||||||
|
else
|
||||||
|
BRANCH=$1
|
||||||
|
fi
|
||||||
|
clone_to_tmp
|
||||||
|
copy_new_files
|
||||||
@@ -32,5 +32,5 @@ fi
|
|||||||
case $1 in
|
case $1 in
|
||||||
"all") salt-call state.highstate queue=True;;
|
"all") salt-call state.highstate queue=True;;
|
||||||
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
|
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
|
||||||
*) if docker ps | grep -q so-$1; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||||
esac
|
esac
|
||||||
|
|||||||
21
salt/common/tools/sbin/so-thehive-es-restart
Executable file
21
salt/common/tools/sbin/so-thehive-es-restart
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
/usr/sbin/so-stop thehive-es $1
|
||||||
|
/usr/sbin/so-start thehive $1
|
||||||
20
salt/common/tools/sbin/so-thehive-es-start
Executable file
20
salt/common/tools/sbin/so-thehive-es-start
Executable file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
/usr/sbin/so-start thehive $1
|
||||||
20
salt/common/tools/sbin/so-thehive-es-stop
Executable file
20
salt/common/tools/sbin/so-thehive-es-stop
Executable file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
/usr/sbin/so-stop thehive-es $1
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
39
salt/common/tools/sbin/so-zeek-stats
Normal file
39
salt/common/tools/sbin/so-zeek-stats
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Show Zeek stats (capstats, netstats)
|
||||||
|
|
||||||
|
show_stats() {
|
||||||
|
echo '##############'
|
||||||
|
echo '# Zeek Stats #'
|
||||||
|
echo '##############'
|
||||||
|
echo
|
||||||
|
echo "Average throughput:"
|
||||||
|
echo
|
||||||
|
docker exec -it so-zeek /opt/zeek/bin/zeekctl capstats
|
||||||
|
echo
|
||||||
|
echo "Average packet loss:"
|
||||||
|
echo
|
||||||
|
docker exec -it so-zeek /opt/zeek/bin/zeekctl netstats
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
if docker ps | grep -q zeek; then
|
||||||
|
show_stats
|
||||||
|
else
|
||||||
|
echo "Zeek is not running! Try starting it with 'so-zeek-start'." && exit 1;
|
||||||
|
fi
|
||||||
@@ -89,7 +89,7 @@ curdel:
|
|||||||
|
|
||||||
so-curatorcloseddeletecron:
|
so-curatorcloseddeletecron:
|
||||||
cron.present:
|
cron.present:
|
||||||
- name: /usr/sbin/so-curator-closed-delete
|
- name: /usr/sbin/so-curator-closed-delete > /opt/so/log/curator/cron-closed-delete.log 2>&1
|
||||||
- user: root
|
- user: root
|
||||||
- minute: '*'
|
- minute: '*'
|
||||||
- hour: '*'
|
- hour: '*'
|
||||||
@@ -99,7 +99,7 @@ so-curatorcloseddeletecron:
|
|||||||
|
|
||||||
so-curatorclosecron:
|
so-curatorclosecron:
|
||||||
cron.present:
|
cron.present:
|
||||||
- name: /usr/sbin/so-curator-close
|
- name: /usr/sbin/so-curator-close > /opt/so/log/curator/cron-close.log 2>&1
|
||||||
- user: root
|
- user: root
|
||||||
- minute: '*'
|
- minute: '*'
|
||||||
- hour: '*'
|
- hour: '*'
|
||||||
@@ -109,7 +109,7 @@ so-curatorclosecron:
|
|||||||
|
|
||||||
so-curatordeletecron:
|
so-curatordeletecron:
|
||||||
cron.present:
|
cron.present:
|
||||||
- name: /usr/sbin/so-curator-delete
|
- name: /usr/sbin/so-curator-delete > /opt/so/log/curator/cron-delete.log 2>&1
|
||||||
- user: root
|
- user: root
|
||||||
- minute: '*'
|
- minute: '*'
|
||||||
- hour: '*'
|
- hour: '*'
|
||||||
|
|||||||
35
salt/elasticsearch/files/ingest/beats.common
Normal file
35
salt/elasticsearch/files/ingest/beats.common
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
{
|
||||||
|
"description" : "beats.common",
|
||||||
|
"processors" : [
|
||||||
|
{"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}},
|
||||||
|
{ "set": { "if": "ctx.winlog?.channel != null", "field": "dataset", "value": "wel-{{winlog.channel}}", "override": true } },
|
||||||
|
{ "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } },
|
||||||
|
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.category", "value": "host,process,network", "override": true } },
|
||||||
|
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.category", "value": "host,process", "override": true } },
|
||||||
|
{ "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } },
|
||||||
|
{ "pipeline": { "name": "common" } }
|
||||||
|
]
|
||||||
|
}
|
||||||
17
salt/elasticsearch/files/ingest/syslog
Normal file
17
salt/elasticsearch/files/ingest/syslog
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
{
|
||||||
|
"description" : "syslog",
|
||||||
|
"processors" : [
|
||||||
|
{
|
||||||
|
"dissect": {
|
||||||
|
"field": "message",
|
||||||
|
"pattern" : "%{message}",
|
||||||
|
"on_failure": [ { "drop" : { } } ]
|
||||||
|
},
|
||||||
|
"remove": {
|
||||||
|
"field": [ "type", "agent" ],
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ "pipeline": { "name": "common" } }
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -17,7 +17,7 @@
|
|||||||
{ "rename": { "field": "message2.orig_ip_bytes", "target_field": "client.ip_bytes", "ignore_missing": true } },
|
{ "rename": { "field": "message2.orig_ip_bytes", "target_field": "client.ip_bytes", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.resp_pkts", "target_field": "server.packets", "ignore_missing": true } },
|
{ "rename": { "field": "message2.resp_pkts", "target_field": "server.packets", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.resp_ip_bytes", "target_field": "server.ip_bytes", "ignore_missing": true } },
|
{ "rename": { "field": "message2.resp_ip_bytes", "target_field": "server.ip_bytes", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.tunnel_parents", "target_field": "connection.tunnel_parents", "ignore_missing": true } },
|
{ "rename": { "field": "message2.tunnel_parents", "target_field": "log.id.tunnel_parents", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.orig_cc", "target_field": "client.country_code","ignore_missing": true } },
|
{ "rename": { "field": "message2.orig_cc", "target_field": "client.country_code","ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } },
|
{ "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } },
|
{ "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } },
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||||
{ "rename": { "field": "message2.username", "target_field": "user.name", "ignore_missing": true } },
|
{ "rename": { "field": "message2.username", "target_field": "user.name", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.mac", "target_field": "host.mac", "ignore_missing": true } },
|
{ "rename": { "field": "message2.mac", "target_field": "host.mac", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.framed_addr", "target_field": "framed_addr", "ignore_missing": true } },
|
{ "rename": { "field": "message2.framed_addr", "target_field": "radius.framed_address", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.remote_ip", "target_field": "destination.ip", "ignore_missing": true } },
|
{ "rename": { "field": "message2.remote_ip", "target_field": "destination.ip", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.connect_info", "target_field": "radius.connect_info", "ignore_missing": true } },
|
{ "rename": { "field": "message2.connect_info", "target_field": "radius.connect_info", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.reply_msg", "target_field": "radius.reply_message", "ignore_missing": true } },
|
{ "rename": { "field": "message2.reply_msg", "target_field": "radius.reply_message", "ignore_missing": true } },
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
"processors" : [
|
"processors" : [
|
||||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||||
{ "rename": { "field": "message2.uid", "target_field": "uid", "ignore_missing": true } },
|
{ "rename": { "field": "message2.uid", "target_field": "log.id.uid", "ignore_missing": true } },
|
||||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||||
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
|
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
|
||||||
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
|
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
|
||||||
|
|||||||
@@ -75,6 +75,32 @@ filebeat.modules:
|
|||||||
filebeat.inputs:
|
filebeat.inputs:
|
||||||
#------------------------------ Log prospector --------------------------------
|
#------------------------------ Log prospector --------------------------------
|
||||||
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
|
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
|
||||||
|
|
||||||
|
- type: udp
|
||||||
|
enabled: true
|
||||||
|
host: "0.0.0.0:514"
|
||||||
|
fields:
|
||||||
|
module: syslog
|
||||||
|
dataset: syslog
|
||||||
|
pipeline: "syslog"
|
||||||
|
index: "so-syslog-%{+yyyy.MM.dd}"
|
||||||
|
processors:
|
||||||
|
- drop_fields:
|
||||||
|
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||||
|
fields_under_root: true
|
||||||
|
|
||||||
|
- type: tcp
|
||||||
|
enabled: true
|
||||||
|
host: "0.0.0.0:514"
|
||||||
|
fields:
|
||||||
|
module: syslog
|
||||||
|
dataset: syslog
|
||||||
|
pipeline: "syslog"
|
||||||
|
index: "so-syslog-%{+yyyy.MM.dd}"
|
||||||
|
processors:
|
||||||
|
- drop_fields:
|
||||||
|
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||||
|
fields_under_root: true
|
||||||
{%- if BROVER != 'SURICATA' %}
|
{%- if BROVER != 'SURICATA' %}
|
||||||
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
|
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
|
||||||
- type: log
|
- type: log
|
||||||
|
|||||||
@@ -57,12 +57,14 @@ so-filebeat:
|
|||||||
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
|
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
|
||||||
- /nsm/zeek:/nsm/zeek:ro
|
- /nsm/zeek:/nsm/zeek:ro
|
||||||
- /nsm/strelka/log:/nsm/strelka/log:ro
|
- /nsm/strelka/log:/nsm/strelka/log:ro
|
||||||
- /opt/so/log/suricata:/suricata:ro
|
- /nsm/suricata:/suricata:ro
|
||||||
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
||||||
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
||||||
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
|
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
|
||||||
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
|
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
|
||||||
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
|
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
|
||||||
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
|
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
|
||||||
|
- port_bindings:
|
||||||
|
- 0.0.0.0:514:514/udp
|
||||||
- watch:
|
- watch:
|
||||||
- file: /opt/so/conf/filebeat/etc/filebeat.yml
|
- file: /opt/so/conf/filebeat/etc/filebeat.yml
|
||||||
|
|||||||
@@ -137,6 +137,18 @@ enable_wazuh_manager_1514_udp_{{ip}}:
|
|||||||
- position: 1
|
- position: 1
|
||||||
- save: True
|
- save: True
|
||||||
|
|
||||||
|
# Allow syslog
|
||||||
|
enable_syslog_514_{{ip}}:
|
||||||
|
iptables.insert:
|
||||||
|
- table: filter
|
||||||
|
- chain: DOCKER-USER
|
||||||
|
- jump: ACCEPT
|
||||||
|
- proto: tcp
|
||||||
|
- source: {{ ip }}
|
||||||
|
- dport: 514
|
||||||
|
- position: 1
|
||||||
|
- save: True
|
||||||
|
|
||||||
# Rules if you are a Master
|
# Rules if you are a Master
|
||||||
{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
|
{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
|
||||||
#This should be more granular
|
#This should be more granular
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
|
||||||
{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
|
|
||||||
{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
|
|
||||||
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
|
|
||||||
|
|
||||||
hive_init(){
|
|
||||||
sleep 120
|
|
||||||
HIVE_IP="{{MASTERIP}}"
|
|
||||||
HIVE_USER="{{HIVEUSER}}"
|
|
||||||
HIVE_PASSWORD="{{HIVEPASSWORD}}"
|
|
||||||
HIVE_KEY="{{HIVEKEY}}"
|
|
||||||
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
|
|
||||||
|
|
||||||
echo -n "Waiting for TheHive..."
|
|
||||||
COUNT=0
|
|
||||||
HIVE_CONNECTED="no"
|
|
||||||
while [[ "$COUNT" -le 240 ]]; do
|
|
||||||
curl --output /dev/null --silent --head --fail -k "https://$HIVE_IP/thehive"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
HIVE_CONNECTED="yes"
|
|
||||||
echo "connected!"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
((COUNT+=1))
|
|
||||||
sleep 1
|
|
||||||
echo -n "."
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$HIVE_CONNECTED" == "yes" ]; then
|
|
||||||
|
|
||||||
# Migrate DB
|
|
||||||
curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate"
|
|
||||||
|
|
||||||
# Create intial TheHive user
|
|
||||||
curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
|
|
||||||
|
|
||||||
# Pre-load custom fields
|
|
||||||
#
|
|
||||||
# reputation
|
|
||||||
curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
|
|
||||||
|
|
||||||
|
|
||||||
touch /opt/so/state/thehive.txt
|
|
||||||
else
|
|
||||||
echo "We experienced an issue connecting to TheHive!"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ -f /opt/so/state/thehive.txt ]; then
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
rm -f garbage_file
|
|
||||||
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
|
|
||||||
do
|
|
||||||
echo "Waiting for Elasticsearch..."
|
|
||||||
rm -f garbage_file
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
rm -f garbage_file
|
|
||||||
sleep 5
|
|
||||||
hive_init
|
|
||||||
fi
|
|
||||||
@@ -39,7 +39,7 @@ idstoolsetcsync:
|
|||||||
|
|
||||||
so-ruleupdatecron:
|
so-ruleupdatecron:
|
||||||
cron.present:
|
cron.present:
|
||||||
- name: /usr/sbin/so-rule-update.sh > /opt/so/log/idstools/download.log
|
- name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1
|
||||||
- user: root
|
- user: root
|
||||||
- minute: '1'
|
- minute: '1'
|
||||||
- hour: '7'
|
- hour: '7'
|
||||||
@@ -58,11 +58,6 @@ synclocalnidsrules:
|
|||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
ruleslink:
|
|
||||||
file.symlink:
|
|
||||||
- name: /opt/so/saltstack/salt/suricata/rules
|
|
||||||
- target: /opt/so/rules/nids
|
|
||||||
|
|
||||||
so-idstools:
|
so-idstools:
|
||||||
docker_container.running:
|
docker_container.running:
|
||||||
- image: {{ MASTER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
|
- image: {{ MASTER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
|
||||||
|
|||||||
@@ -198,7 +198,7 @@ so-logstash:
|
|||||||
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
||||||
{%- if grains['role'] == 'so-eval' %}
|
{%- if grains['role'] == 'so-eval' %}
|
||||||
- /nsm/zeek:/nsm/zeek:ro
|
- /nsm/zeek:/nsm/zeek:ro
|
||||||
- /opt/so/log/suricata:/suricata:ro
|
- /nsm/suricata:/suricata:ro
|
||||||
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
||||||
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
||||||
- /opt/so/log/fleet/:/osquery/logs:ro
|
- /opt/so/log/fleet/:/osquery/logs:ro
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
# For custom logstash configs, they should be placed in /opt/so/saltstack/local/salt/logstash/pipelines/config/custom/
|
||||||
6
salt/logstash/pipelines/config/so/0009_input_beats.conf
Normal file
6
salt/logstash/pipelines/config/so/0009_input_beats.conf
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
input {
|
||||||
|
beats {
|
||||||
|
port => "5044"
|
||||||
|
tags => [ "beat-ext" ]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,24 +3,21 @@
|
|||||||
{%- else %}
|
{%- else %}
|
||||||
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
|
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
# Author: Justin Henderson
|
|
||||||
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
|
|
||||||
# Updated by: Doug Burks
|
|
||||||
# Last Update: 5/15/2017
|
|
||||||
|
|
||||||
filter {
|
filter {
|
||||||
if "syslog" in [tags] and "test_data" not in [tags] {
|
if [module] =~ "syslog" {
|
||||||
mutate {
|
mutate {
|
||||||
##add_tag => [ "conf_file_9034"]
|
##add_tag => [ "conf_file_9000"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
output {
|
output {
|
||||||
if "syslog" in [tags] and "test_data" not in [tags] {
|
if [module] =~ "syslog" {
|
||||||
elasticsearch {
|
elasticsearch {
|
||||||
|
pipeline => "%{module}"
|
||||||
hosts => "{{ ES }}"
|
hosts => "{{ ES }}"
|
||||||
index => "so-syslog-%{+YYYY.MM.dd}"
|
index => "so-syslog-%{+YYYY.MM.dd}"
|
||||||
template_name => "logstash"
|
template_name => "so-common"
|
||||||
template => "/so-common-template.json"
|
template => "/so-common-template.json"
|
||||||
template_overwrite => true
|
template_overwrite => true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,22 +3,15 @@
|
|||||||
{%- else %}
|
{%- else %}
|
||||||
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
|
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
# Author: Wes Lambert
|
|
||||||
# Last Update: 09/14/2018
|
|
||||||
filter {
|
|
||||||
if "beat" in [tags] {
|
|
||||||
mutate {
|
|
||||||
##add_tag => [ "conf_file_9500"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
output {
|
output {
|
||||||
if "beat" in [tags] {
|
if "beat-ext" in [tags] {
|
||||||
elasticsearch {
|
elasticsearch {
|
||||||
|
pipeline => "beats.common"
|
||||||
hosts => "{{ ES }}"
|
hosts => "{{ ES }}"
|
||||||
index => "so-beats-%{+YYYY.MM.dd}"
|
index => "so-beats-%{+YYYY.MM.dd}"
|
||||||
template_name => "so-beats"
|
template_name => "so-common"
|
||||||
template => "/so-beats-template.json"
|
template => "/so-common-template.json"
|
||||||
template_overwrite => true
|
template_overwrite => true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,2 +0,0 @@
|
|||||||
# Reference /usr/share/logstash/pipeline.custom/templates/YOURTEMPLATE.json
|
|
||||||
#
|
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
# Reference /usr/share/logstash/pipeline.custom/templates/YOURTEMPLATE.json
|
||||||
|
# For custom logstash templates, they should be placed in /opt/so/saltstack/local/salt/logstash/pipelines/templates/custom/
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,10 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
# This script adds pillar and schedule files securely
|
# This script adds pillar and schedule files securely
|
||||||
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
MINION=$1
|
MINION=$1
|
||||||
|
|
||||||
echo "Adding $1"
|
echo "Adding $1"
|
||||||
cp /tmp/$MINION/pillar/$MINION.sls /opt/so/saltstack/pillar/minions/
|
cp /tmp/$MINION/pillar/$MINION.sls $local_salt_dir/pillar/minions/
|
||||||
cp /tmp/$MINION/schedules/* /opt/so/saltstack/salt/patch/os/schedules/
|
cp --parents /tmp/$MINION/schedules/* $local_salt_dir/salt/patch/os/schedules/
|
||||||
rm -rf /tmp/$MINION
|
rm -rf /tmp/$MINION
|
||||||
@@ -61,6 +61,7 @@ so-aptcacherng:
|
|||||||
docker_container.running:
|
docker_container.running:
|
||||||
- image: {{ MASTER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
|
- image: {{ MASTER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
|
||||||
- hostname: so-acng
|
- hostname: so-acng
|
||||||
|
- restart_policy: always
|
||||||
- port_bindings:
|
- port_bindings:
|
||||||
- 0.0.0.0:3142:3142
|
- 0.0.0.0:3142:3142
|
||||||
- binds:
|
- binds:
|
||||||
|
|||||||
@@ -134,7 +134,7 @@ http {
|
|||||||
proxy_set_header Connection "Upgrade";
|
proxy_set_header Connection "Upgrade";
|
||||||
}
|
}
|
||||||
|
|
||||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||||
rewrite /auth/(.*) /$1 break;
|
rewrite /auth/(.*) /$1 break;
|
||||||
proxy_pass http://{{ masterip }}:4433;
|
proxy_pass http://{{ masterip }}:4433;
|
||||||
proxy_read_timeout 90;
|
proxy_read_timeout 90;
|
||||||
|
|||||||
@@ -134,7 +134,7 @@ http {
|
|||||||
proxy_set_header Connection "Upgrade";
|
proxy_set_header Connection "Upgrade";
|
||||||
}
|
}
|
||||||
|
|
||||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||||
rewrite /auth/(.*) /$1 break;
|
rewrite /auth/(.*) /$1 break;
|
||||||
proxy_pass http://{{ masterip }}:4433;
|
proxy_pass http://{{ masterip }}:4433;
|
||||||
proxy_read_timeout 90;
|
proxy_read_timeout 90;
|
||||||
|
|||||||
@@ -134,7 +134,7 @@ http {
|
|||||||
proxy_set_header Connection "Upgrade";
|
proxy_set_header Connection "Upgrade";
|
||||||
}
|
}
|
||||||
|
|
||||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||||
rewrite /auth/(.*) /$1 break;
|
rewrite /auth/(.*) /$1 break;
|
||||||
proxy_pass http://{{ masterip }}:4433;
|
proxy_pass http://{{ masterip }}:4433;
|
||||||
proxy_read_timeout 90;
|
proxy_read_timeout 90;
|
||||||
|
|||||||
@@ -134,7 +134,7 @@ http {
|
|||||||
proxy_set_header Connection "Upgrade";
|
proxy_set_header Connection "Upgrade";
|
||||||
}
|
}
|
||||||
|
|
||||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||||
rewrite /auth/(.*) /$1 break;
|
rewrite /auth/(.*) /$1 break;
|
||||||
proxy_pass http://{{ masterip }}:4433;
|
proxy_pass http://{{ masterip }}:4433;
|
||||||
proxy_read_timeout 90;
|
proxy_read_timeout 90;
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
{%- set ip = salt['pillar.get']('static:masterip', '') -%}
|
{%- set ip = salt['pillar.get']('static:masterip', '') -%}
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
default_salt_dir=/opt/so/saltstack/default
|
||||||
|
|
||||||
echo "Waiting for connection"
|
echo "Waiting for connection"
|
||||||
until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do
|
until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do
|
||||||
@@ -7,5 +8,5 @@ until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do
|
|||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
echo "Loading flows..."
|
echo "Loading flows..."
|
||||||
curl -XPOST -v -H "Content-Type: application/json" -d @/opt/so/saltstack/salt/nodered/so_flows.json {{ ip }}:1880/flows
|
curl -XPOST -v -H "Content-Type: application/json" -d @$default_salt_dir/salt/nodered/so_flows.json {{ ip }}:1880/flows
|
||||||
echo "Done loading..."
|
echo "Done loading..."
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ nodered:
|
|||||||
|
|
||||||
noderedflows:
|
noderedflows:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /opt/so/saltstack/salt/nodered/
|
- name: /opt/so/saltstack/default/salt/nodered/
|
||||||
- source: salt://nodered/files
|
- source: salt://nodered/files
|
||||||
- user: 947
|
- user: 947
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
|
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
docker cp /opt/so/saltstack/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql
|
default_salt_dir=/opt/so/saltstack/default
|
||||||
|
|
||||||
|
docker cp $default_salt_dir/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql
|
||||||
docker exec so-mysql /bin/bash -c "/usr/bin/mysql -b -uroot -p{{MYSQLPASS}} < /tmp/playbook_db_init.sql"
|
docker exec so-mysql /bin/bash -c "/usr/bin/mysql -b -uroot -p{{MYSQLPASS}} < /tmp/playbook_db_init.sql"
|
||||||
@@ -86,15 +86,22 @@ so-playbook:
|
|||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
playbooklogdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/log/playbook
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
so-playbooksynccron:
|
so-playbooksynccron:
|
||||||
cron.present:
|
cron.present:
|
||||||
- name: /usr/sbin/so-playbook-sync
|
- name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1
|
||||||
- user: root
|
- user: root
|
||||||
- minute: '*/5'
|
- minute: '*/5'
|
||||||
|
|
||||||
so-playbookruleupdatecron:
|
so-playbookruleupdatecron:
|
||||||
cron.present:
|
cron.present:
|
||||||
- name: /usr/sbin/so-playbook-ruleupdate
|
- name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1
|
||||||
- user: root
|
- user: root
|
||||||
- minute: '1'
|
- minute: '1'
|
||||||
- hour: '6'
|
- hour: '6'
|
||||||
@@ -9,9 +9,9 @@ import subprocess
|
|||||||
def run():
|
def run():
|
||||||
MINIONID = data['id']
|
MINIONID = data['id']
|
||||||
ACTION = data['data']['action']
|
ACTION = data['data']['action']
|
||||||
|
local_salt_dir = /opt/so/saltstack/local
|
||||||
STATICFILE = '/opt/so/saltstack/pillar/static.sls'
|
STATICFILE = local_salt_dir + '/pillar/static.sls'
|
||||||
SECRETSFILE = '/opt/so/saltstack/pillar/secrets.sls'
|
SECRETSFILE = local_salt_dir + '/pillar/secrets.sls'
|
||||||
|
|
||||||
if MINIONID.split('_')[-1] in ['master','eval','fleet','mastersearch','standalone']:
|
if MINIONID.split('_')[-1] in ['master','eval','fleet','mastersearch','standalone']:
|
||||||
if ACTION == 'enablefleet':
|
if ACTION == 'enablefleet':
|
||||||
@@ -58,7 +58,7 @@ def run():
|
|||||||
PACKAGEVERSION += 1
|
PACKAGEVERSION += 1
|
||||||
|
|
||||||
# Run Docker container that will build the packages
|
# Run Docker container that will build the packages
|
||||||
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", "type=bind,source=/opt/so/saltstack/salt/fleet/packages,target=/output", \
|
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", "type=bind,ssource=" + local_salt_dir + "/salt/fleet/packages,target=/output", \
|
||||||
"--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MASTER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
|
"--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MASTER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
|
||||||
f"{ESECRET}", f"{PACKAGEHOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii')
|
f"{ESECRET}", f"{PACKAGEHOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii')
|
||||||
|
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ so-dockerregistry:
|
|||||||
docker_container.running:
|
docker_container.running:
|
||||||
- image: registry:2
|
- image: registry:2
|
||||||
- hostname: so-registry
|
- hostname: so-registry
|
||||||
|
- restart_policy: always
|
||||||
- port_bindings:
|
- port_bindings:
|
||||||
- 0.0.0.0:5000:5000
|
- 0.0.0.0:5000:5000
|
||||||
- binds:
|
- binds:
|
||||||
|
|||||||
@@ -1,25 +0,0 @@
|
|||||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
|
|
||||||
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
|
|
||||||
{
|
|
||||||
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
|
|
||||||
"server": {
|
|
||||||
"bindAddress": "0.0.0.0:9822",
|
|
||||||
"maxPacketCount": 5000,
|
|
||||||
"htmlDir": "html",
|
|
||||||
"modules": {
|
|
||||||
"filedatastore": {
|
|
||||||
"jobDir": "jobs"
|
|
||||||
},
|
|
||||||
"securityonion": {
|
|
||||||
"elasticsearchHost": "http://{{ MASTERIP }}:9200",
|
|
||||||
"elasticsearchUsername": "",
|
|
||||||
"elasticsearchPassword": "",
|
|
||||||
"elasticsearchVerifyCert": false
|
|
||||||
},
|
|
||||||
"statickeyauth": {
|
|
||||||
"anonymousCidr": "172.17.0.0/24",
|
|
||||||
"apiKey": "{{ SENSORONIKEY }}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
|
||||||
{% set MASTER = salt['grains.get']('master') %}
|
|
||||||
|
|
||||||
sensoronidir:
|
|
||||||
file.directory:
|
|
||||||
- name: /opt/so/conf/sensoroni
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
sensoronidatadir:
|
|
||||||
file.directory:
|
|
||||||
- name: /nsm/sensoroni/jobs
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
sensoronilogdir:
|
|
||||||
file.directory:
|
|
||||||
- name: /opt/so/log/sensoroni
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
sensoronisync:
|
|
||||||
file.recurse:
|
|
||||||
- name: /opt/so/conf/sensoroni
|
|
||||||
- source: salt://sensoroni/files
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- template: jinja
|
|
||||||
|
|
||||||
so-sensoroni:
|
|
||||||
docker_container.running:
|
|
||||||
- image: {{ MASTER }}:5000/soshybridhunter/so-sensoroni:{{ VERSION }}
|
|
||||||
- hostname: sensoroni
|
|
||||||
- name: so-sensoroni
|
|
||||||
- binds:
|
|
||||||
- /nsm/sensoroni/jobs:/opt/sensoroni/jobs:rw
|
|
||||||
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
|
|
||||||
- /opt/so/log/sensoroni/:/opt/sensoroni/logs/:rw
|
|
||||||
- port_bindings:
|
|
||||||
- 0.0.0.0:9822:9822
|
|
||||||
- watch:
|
|
||||||
- file: /opt/so/conf/sensoroni
|
|
||||||
@@ -42,7 +42,7 @@ urls:
|
|||||||
login_ui: https://{{ WEBACCESS }}/login/
|
login_ui: https://{{ WEBACCESS }}/login/
|
||||||
registration_ui: https://{{ WEBACCESS }}/login/
|
registration_ui: https://{{ WEBACCESS }}/login/
|
||||||
error_ui: https://{{ WEBACCESS }}/login/
|
error_ui: https://{{ WEBACCESS }}/login/
|
||||||
settings_ui: https://{{ WEBACCESS }}/
|
settings_ui: https://{{ WEBACCESS }}/?r=/settings
|
||||||
verify_ui: https://{{ WEBACCESS }}/
|
verify_ui: https://{{ WEBACCESS }}/
|
||||||
mfa_ui: https://{{ WEBACCESS }}/
|
mfa_ui: https://{{ WEBACCESS }}/
|
||||||
|
|
||||||
|
|||||||
@@ -93,55 +93,55 @@
|
|||||||
{ "name": "Wazuh/OSSEC Users", "description": "Show all Wazuh alerts grouped by username", "query": "event.module:ossec AND event.dataset:alert | groupby user.name"},
|
{ "name": "Wazuh/OSSEC Users", "description": "Show all Wazuh alerts grouped by username", "query": "event.module:ossec AND event.dataset:alert | groupby user.name"},
|
||||||
{ "name": "Sysmon Events", "description": "Show all Sysmon logs grouped by event_id", "query": "event_type:sysmon | groupby event_id"},
|
{ "name": "Sysmon Events", "description": "Show all Sysmon logs grouped by event_id", "query": "event_type:sysmon | groupby event_id"},
|
||||||
{ "name": "Sysmon Usernames", "description": "Show all Sysmon logs grouped by username", "query": "event_type:sysmon | groupby username"},
|
{ "name": "Sysmon Usernames", "description": "Show all Sysmon logs grouped by username", "query": "event_type:sysmon | groupby username"},
|
||||||
{ "name": "Zeek Notice", "description": "Show notices from Zeek", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note notice.message"},
|
{ "name": "Zeek Notice", "description": "Show notices from Zeek", "query": "event.dataset:notice | groupby notice.note notice.message"},
|
||||||
{ "name": "Connections", "description": "Connections grouped by IP and Port", "query": "event.module:zeek AND event.dataset:conn | groupby source.ip destination.ip network.protocol destination.port"},
|
{ "name": "Connections", "description": "Connections grouped by IP and Port", "query": "event.dataset:conn | groupby source.ip destination.ip network.protocol destination.port"},
|
||||||
{ "name": "Connections", "description": "Connections grouped by Service", "query": "event.module:zeek AND event.dataset:conn | groupby network.protocol destination.port"},
|
{ "name": "Connections", "description": "Connections grouped by Service", "query": "event.dataset:conn | groupby network.protocol destination.port"},
|
||||||
{ "name": "Connections", "description": "Connections grouped by destination country", "query": "event.module:zeek AND event.dataset:conn | groupby destination.geo.country_name"},
|
{ "name": "Connections", "description": "Connections grouped by destination country", "query": "event.dataset:conn | groupby destination.geo.country_name"},
|
||||||
{ "name": "Connections", "description": "Connections grouped by source country", "query": "event.module:zeek AND event.dataset:conn | groupby source.geo.country_name"},
|
{ "name": "Connections", "description": "Connections grouped by source country", "query": "event.dataset:conn | groupby source.geo.country_name"},
|
||||||
{ "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.module:zeek AND event.dataset:dce_rpc | groupby dce_rpc.operation"},
|
{ "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.dataset:dce_rpc | groupby dce_rpc.operation"},
|
||||||
{ "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain dhcp.requested_address"},
|
{ "name": "DHCP", "description": "DHCP leases", "query": "event.dataset:dhcp | groupby host.hostname host.domain"},
|
||||||
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.module:zeek AND event.dataset:dhcp | groupby dhcp.message_types"},
|
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.dataset:dhcp | groupby dhcp.message_types"},
|
||||||
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.module:zeek AND event.dataset:dnp3 | groupby dnp3.fc_reply"},
|
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.dataset:dnp3 | groupby dnp3.fc_reply"},
|
||||||
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.name destination.port"},
|
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.dataset:dns | groupby dns.query.name destination.port"},
|
||||||
{ "name": "DNS", "description": "DNS queries grouped by type", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.type_name destination.port"},
|
{ "name": "DNS", "description": "DNS queries grouped by type", "query": "event.dataset:dns | groupby dns.query.type_name destination.port"},
|
||||||
{ "name": "DNS", "description": "DNS highest registered domain", "query": "event.module:zeek AND event.dataset:dns | groupby dns.highest_registered_domain.keyword"},
|
{ "name": "DNS", "description": "DNS highest registered domain", "query": "event.dataset:dns | groupby dns.highest_registered_domain.keyword"},
|
||||||
{ "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.module:zeek AND event.dataset:dns | groupby dns.parent_domain.keyword"},
|
{ "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.dataset:dns | groupby dns.parent_domain.keyword"},
|
||||||
{ "name": "Files", "description": "Files grouped by mimetype", "query": "event.module:zeek AND event.dataset:files | groupby file.mime_type source.ip"},
|
{ "name": "Files", "description": "Files grouped by mimetype", "query": "event.dataset:files | groupby file.mime_type source.ip"},
|
||||||
{ "name": "FTP", "description": "FTP grouped by argument", "query": "event.module:zeek AND event.dataset:ftp | groupby ftp.argument"},
|
{ "name": "FTP", "description": "FTP grouped by argument", "query": "event.dataset:ftp | groupby ftp.argument"},
|
||||||
{ "name": "FTP", "description": "FTP grouped by command", "query": "event.module:zeek AND event.dataset:ftp | groupby ftp.command"},
|
{ "name": "FTP", "description": "FTP grouped by command", "query": "event.dataset:ftp | groupby ftp.command"},
|
||||||
{ "name": "FTP", "description": "FTP grouped by username", "query": "event.module:zeek AND event.dataset:ftp | groupby ftp.user"},
|
{ "name": "FTP", "description": "FTP grouped by username", "query": "event.dataset:ftp | groupby ftp.user"},
|
||||||
{ "name": "HTTP", "description": "HTTP grouped by destination port", "query": "event.module:zeek AND event.dataset:http | groupby destination.port"},
|
{ "name": "HTTP", "description": "HTTP grouped by destination port", "query": "event.dataset:http | groupby destination.port"},
|
||||||
{ "name": "HTTP", "description": "HTTP grouped by method", "query": "event.module:zeek AND event.dataset:http | groupby http.method"},
|
{ "name": "HTTP", "description": "HTTP grouped by method", "query": "event.dataset:http | groupby http.method"},
|
||||||
{ "name": "HTTP", "description": "HTTP grouped by status code", "query": "event.module:zeek AND event.dataset:http | groupby http.status_code"},
|
{ "name": "HTTP", "description": "HTTP grouped by status code and message", "query": "event.dataset:http | groupby http.status_code http.status_message"},
|
||||||
{ "name": "HTTP", "description": "HTTP grouped by status message", "query": "event.module:zeek AND event.dataset:http | groupby http.status_message"},
|
{ "name": "HTTP", "description": "HTTP grouped by user agent", "query": "event.dataset:http | groupby http.useragent"},
|
||||||
{ "name": "HTTP", "description": "HTTP grouped by user agent", "query": "event.module:zeek AND event.dataset:http | groupby http.useragent"},
|
{ "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.dataset:http | groupby http.virtual_host"},
|
||||||
{ "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.module:zeek AND event.dataset:http | groupby http.virtual_host"},
|
{ "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.dataset:http AND file.resp_mime_types:dosexec | groupby http.virtual_host"},
|
||||||
{ "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.module:zeek AND event.dataset:http AND file.resp_mime_types:dosexec | groupby http.virtual_host"},
|
{ "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.dataset:intel | groupby intel.indicator"},
|
||||||
{ "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.module:zeek AND event.dataset:intel | groupby intel.indicator"},
|
{ "name": "IRC", "description": "IRC grouped by command", "query": "event.dataset:irc | groupby irc.command.type"},
|
||||||
{ "name": "IRC", "description": "IRC grouped by command", "query": "event.module:zeek AND event.dataset:irc | groupby irc.command.type"},
|
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.dataset:kerberos | groupby kerberos.service"},
|
||||||
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.module:zeek AND event.dataset:kerberos | groupby kerberos.service"},
|
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.dataset:modbus | groupby modbus.function"},
|
||||||
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.module:zeek AND event.dataset:modbus | groupby modbus.function"},
|
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.dataset:mysql | groupby mysql.command"},
|
||||||
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.module:zeek AND event.dataset:mysql | groupby mysql.command"},
|
{ "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.dataset:notice | groupby notice.note notice.message"},
|
||||||
{ "name": "NOTICE", "description": "Zeek notice logs grouped by note", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note"},
|
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.dataset:ntlm | groupby ntlm.server.dns.name"},
|
||||||
{ "name": "NOTICE", "description": "Zeek notice logs grouped by message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.message"},
|
{ "name": "PE", "description": "PE files list", "query": "event.dataset:pe | groupby file.machine file.os file.subsystem"},
|
||||||
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.module:zeek AND event.dataset:ntlm | groupby ntlm.server.dns.name"},
|
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.dataset:radius | groupby user.name.keyword"},
|
||||||
{ "name": "PE", "description": "PE files list", "query": "event.module:zeek AND event.dataset:pe | groupby file.machine file.os file.subsystem"},
|
{ "name": "RDP", "description": "RDP grouped by client name", "query": "event.dataset:rdp | groupby client.name"},
|
||||||
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.module:zeek AND event.dataset:radius | groupby user.name.keyword"},
|
{ "name": "RFB", "description": "RFB grouped by desktop name", "query": "event.dataset:rfb | groupby rfb.desktop.name"},
|
||||||
{ "name": "RDP", "description": "RDP grouped by client name", "query": "event.module:zeek AND event.dataset:rdp | groupby client.name"},
|
{ "name": "Signatures", "description": "Zeek signatures grouped by signature id", "query": "event.dataset:signatures | groupby signature_id"},
|
||||||
{ "name": "RFB", "description": "RFB grouped by desktop name", "query": "event.module:zeek AND event.dataset:rfb | groupby rfb.desktop.name"},
|
{ "name": "SIP", "description": "SIP grouped by user agent", "query": "event.dataset:sip | groupby client.user_agent"},
|
||||||
{ "name": "Signatures", "description": "Zeek signatures grouped by signature id", "query": "event.module:zeek AND event.dataset:signatures | groupby signature_id"},
|
{ "name": "SMB_Files", "description": "SMB files grouped by action", "query": "event.dataset:smb_files | groupby file.action"},
|
||||||
{ "name": "SIP", "description": "SIP grouped by user agent", "query": "event.module:zeek AND event.dataset:sip | groupby client.user_agent"},
|
{ "name": "SMB_Mapping", "description": "SMB mapping grouped by path", "query": "event.dataset:smb_mapping | groupby smb.path"},
|
||||||
{ "name": "SMB_Files", "description": "SMB files grouped by action", "query": "event.module:zeek AND event.dataset:smb_files | groupby file.action"},
|
{ "name": "SMTP", "description": "SMTP grouped by subject", "query": "event.dataset:smtp | groupby smtp.subject"},
|
||||||
{ "name": "SMB_Mapping", "description": "SMB mapping grouped by path", "query": "event.module:zeek AND event.dataset:smb_mapping | groupby smb.path"},
|
{ "name": "SNMP", "description": "SNMP grouped by version and string", "query": "event.dataset:snmp | groupby snmp.community snmp.version"},
|
||||||
{ "name": "SMTP", "description": "SMTP grouped by subject", "query": "event.module:zeek AND event.dataset:smtp | groupby smtp.subject"},
|
{ "name": "Software", "description": "List of software seen on the network", "query": "event.dataset:software | groupby software.type software.name"},
|
||||||
{ "name": "SNMP", "description": "SNMP grouped by version and string", "query": "event.module:zeek AND event.dataset:snmp | groupby snmp.community snmp.version"},
|
{ "name": "SSH", "description": "SSH grouped by version", "query": "event.dataset:ssh | groupby ssh.version"},
|
||||||
{ "name": "Software", "description": "List of software seen on the network", "query": "event.module:zeek AND event.dataset:software | groupby software.type software.name"},
|
{ "name": "SSL", "description": "SSL grouped by version and server name", "query": "event.dataset:ssl | groupby ssl.version ssl.server_name"},
|
||||||
{ "name": "SSH", "description": "SSH grouped by version", "query": "event.module:zeek AND event.dataset:ssh | groupby ssh.version"},
|
{ "name": "SYSLOG", "description": "SYSLOG grouped by severity and facility ", "query": "event.dataset:syslog | groupby syslog.severity syslog.facility"},
|
||||||
{ "name": "SSL", "description": "SSL grouped by version and server name", "query": "event.module:zeek AND event.dataset:ssl | groupby ssl.version ssl.server_name"},
|
{ "name": "Tunnels", "description": "Tunnels grouped by action", "query": "event.dataset:tunnels | groupby event.action"},
|
||||||
{ "name": "SYSLOG", "description": "SYSLOG grouped by severity and facility ", "query": "event.module:zeek AND event.dataset:syslog | groupby syslog.severity syslog.facility"},
|
{ "name": "Weird", "description": "Zeek weird log grouped by name", "query": "event.dataset:weird | groupby weird.name"},
|
||||||
{ "name": "Tunnels", "description": "Tunnels grouped by action", "query": "event.module:zeek AND event.dataset:tunnels | groupby event.action"},
|
{ "name": "x509", "description": "x.509 grouped by key length", "query": "event.dataset:x509 | groupby x509.certificate.key.length"},
|
||||||
{ "name": "Weird", "description": "Zeek weird log grouped by name", "query": "event.module:zeek AND event.dataset:weird | groupby weird.name"},
|
{ "name": "x509", "description": "x.509 grouped by issuer", "query": "event.dataset:x509 | groupby x509.certificate.issuer"},
|
||||||
{ "name": "x509", "description": "x.509 grouped by key length", "query": "event.module:zeek AND event.dataset:x509 | groupby x509.certificate.key.length"},
|
{ "name": "x509", "description": "x.509 grouped by subject", "query": "event.dataset:x509 | groupby x509.certificate.subject"},
|
||||||
{ "name": "Firewall", "description": "Firewall events grouped by action", "query": "event_type:firewall | groupby action"}
|
{ "name": "Firewall", "description": "Firewall events grouped by action", "query": "event_type:firewall | groupby action"}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||||
{% set MASTER = salt['grains.get']('master') %}
|
{% set MASTER = salt['grains.get']('master') %}
|
||||||
|
{%- set MASTER_URL = salt['pillar.get']('master:url_base', '') %}
|
||||||
|
{%- set MASTER_IP = salt['pillar.get']('static:masterip', '') %}
|
||||||
|
|
||||||
soctopusdir:
|
soctopusdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
@@ -69,3 +71,5 @@ so-soctopus:
|
|||||||
- /opt/so/conf/navigator/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
|
- /opt/so/conf/navigator/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
|
||||||
- port_bindings:
|
- port_bindings:
|
||||||
- 0.0.0.0:7000:7000
|
- 0.0.0.0:7000:7000
|
||||||
|
- extra_hosts:
|
||||||
|
- {{MASTER_URL}}:{{MASTER_IP}}
|
||||||
|
|||||||
@@ -86,17 +86,17 @@ chownilogstashfilebeatp8:
|
|||||||
# Create Symlinks to the keys so I can distribute it to all the things
|
# Create Symlinks to the keys so I can distribute it to all the things
|
||||||
filebeatdir:
|
filebeatdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /opt/so/saltstack/salt/filebeat/files
|
- name: /opt/so/saltstack/local/salt/filebeat/files
|
||||||
- mkdirs: True
|
- makedirs: True
|
||||||
|
|
||||||
fbkeylink:
|
fbkeylink:
|
||||||
file.symlink:
|
file.symlink:
|
||||||
- name: /opt/so/saltstack/salt/filebeat/files/filebeat.p8
|
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.p8
|
||||||
- target: /etc/pki/filebeat.p8
|
- target: /etc/pki/filebeat.p8
|
||||||
|
|
||||||
fbcrtlink:
|
fbcrtlink:
|
||||||
file.symlink:
|
file.symlink:
|
||||||
- name: /opt/so/saltstack/salt/filebeat/files/filebeat.crt
|
- name: /opt/so/saltstack/local/salt/filebeat/files/filebeat.crt
|
||||||
- target: /etc/pki/filebeat.crt
|
- target: /etc/pki/filebeat.crt
|
||||||
|
|
||||||
# Create a cert for the docker registry
|
# Create a cert for the docker registry
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,8 @@
|
|||||||
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
|
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
|
||||||
{%- if grains['role'] == 'so-eval' %}
|
{%- if grains['role'] == 'so-eval' %}
|
||||||
{%- set MTU = 1500 %}
|
{%- set MTU = 1500 %}
|
||||||
|
{%- elif grains['role'] == 'so-helix' %}
|
||||||
|
{%- set MTU = 9000 %}
|
||||||
{%- else %}
|
{%- else %}
|
||||||
{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
|
{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
@@ -23,6 +25,11 @@ vars:
|
|||||||
# more specific is better for alert accuracy and performance
|
# more specific is better for alert accuracy and performance
|
||||||
address-groups:
|
address-groups:
|
||||||
HOME_NET: "[{{ homenet }}]"
|
HOME_NET: "[{{ homenet }}]"
|
||||||
|
#HOME_NET: "[192.168.0.0/16]"
|
||||||
|
#HOME_NET: "[10.0.0.0/8]"
|
||||||
|
#HOME_NET: "[172.16.0.0/12]"
|
||||||
|
#HOME_NET: "any"
|
||||||
|
|
||||||
EXTERNAL_NET: "!$HOME_NET"
|
EXTERNAL_NET: "!$HOME_NET"
|
||||||
#EXTERNAL_NET: "any"
|
#EXTERNAL_NET: "any"
|
||||||
|
|
||||||
@@ -49,6 +56,8 @@ vars:
|
|||||||
MODBUS_PORTS: 502
|
MODBUS_PORTS: 502
|
||||||
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
|
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
|
||||||
FTP_PORTS: 21
|
FTP_PORTS: 21
|
||||||
|
VXLAN_PORTS: 4789
|
||||||
|
TEREDO_PORTS: 3544
|
||||||
|
|
||||||
##
|
##
|
||||||
## Step 2: select outputs to enable
|
## Step 2: select outputs to enable
|
||||||
@@ -64,9 +73,12 @@ stats:
|
|||||||
enabled: yes
|
enabled: yes
|
||||||
# The interval field (in seconds) controls at what interval
|
# The interval field (in seconds) controls at what interval
|
||||||
# the loggers are invoked.
|
# the loggers are invoked.
|
||||||
interval: 8
|
interval: 30
|
||||||
# Add decode events as stats.
|
# Add decode events as stats.
|
||||||
#decoder-events: true
|
#decoder-events: true
|
||||||
|
# Decoder event prefix in stats. Has been 'decoder' before, but that leads
|
||||||
|
# to missing events in the eve.stats records. See issue #2225.
|
||||||
|
#decoder-events-prefix: "decoder.event"
|
||||||
# Add stream events as stats.
|
# Add stream events as stats.
|
||||||
#stream-events: false
|
#stream-events: false
|
||||||
|
|
||||||
@@ -83,18 +95,35 @@ outputs:
|
|||||||
- eve-log:
|
- eve-log:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
|
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
|
||||||
filename: eve.json
|
filename: /nsm/eve.json
|
||||||
rotate-interval: hour
|
rotate-interval: hour
|
||||||
|
|
||||||
#prefix: "@cee: " # prefix to prepend to each log entry
|
#prefix: "@cee: " # prefix to prepend to each log entry
|
||||||
# the following are valid when type: syslog above
|
# the following are valid when type: syslog above
|
||||||
#identity: "suricata"
|
#identity: "suricata"
|
||||||
#facility: local5
|
#facility: local5
|
||||||
#level: Info ## possible levels: Emergency, Alert, Critical,
|
#level: Info ## possible levels: Emergency, Alert, Critical,
|
||||||
## Error, Warning, Notice, Info, Debug
|
## Error, Warning, Notice, Info, Debug
|
||||||
|
#redis:
|
||||||
|
# server: 127.0.0.1
|
||||||
|
# port: 6379
|
||||||
|
# async: true ## if redis replies are read asynchronously
|
||||||
|
# mode: list ## possible values: list|lpush (default), rpush, channel|publish
|
||||||
|
# ## lpush and rpush are using a Redis list. "list" is an alias for lpush
|
||||||
|
# ## publish is using a Redis channel. "channel" is an alias for publish
|
||||||
|
# key: suricata ## key or channel to use (default to suricata)
|
||||||
|
# Redis pipelining set up. This will enable to only do a query every
|
||||||
|
# 'batch-size' events. This should lower the latency induced by network
|
||||||
|
# connection at the cost of some memory. There is no flushing implemented
|
||||||
|
# so this setting as to be reserved to high traffic suricata.
|
||||||
|
# pipelining:
|
||||||
|
# enabled: yes ## set enable to yes to enable query pipelining
|
||||||
|
# batch-size: 10 ## number of entry to keep in buffer
|
||||||
|
|
||||||
# Include top level metadata. Default yes.
|
# Include top level metadata. Default yes.
|
||||||
#metadata: no
|
#metadata: no
|
||||||
|
|
||||||
|
# include the name of the input pcap file in pcap file processing mode
|
||||||
pcap-file: false
|
pcap-file: false
|
||||||
|
|
||||||
# Community Flow ID
|
# Community Flow ID
|
||||||
@@ -106,7 +135,7 @@ outputs:
|
|||||||
# to make the id less predictable.
|
# to make the id less predictable.
|
||||||
|
|
||||||
# enable/disable the community id feature.
|
# enable/disable the community id feature.
|
||||||
community-id: false
|
community-id: true
|
||||||
# Seed value for the ID output. Valid values are 0-65535.
|
# Seed value for the ID output. Valid values are 0-65535.
|
||||||
community-id-seed: 0
|
community-id-seed: 0
|
||||||
|
|
||||||
@@ -130,36 +159,76 @@ outputs:
|
|||||||
|
|
||||||
types:
|
types:
|
||||||
- alert:
|
- alert:
|
||||||
# payload: yes # enable dumping payload in Base64
|
payload: no # enable dumping payload in Base64
|
||||||
# payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
|
payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
|
||||||
# payload-printable: yes # enable dumping payload in printable (lossy) format
|
payload-printable: yes # enable dumping payload in printable (lossy) format
|
||||||
# packet: yes # enable dumping of packet (without stream segments)
|
packet: yes # enable dumping of packet (without stream segments)
|
||||||
# http-body: yes # enable dumping of http body in Base64
|
metadata:
|
||||||
# http-body-printable: yes # enable dumping of http body in printable format
|
app-layer: false
|
||||||
# metadata: no # enable inclusion of app layer metadata with alert. Default yes
|
flow: false
|
||||||
|
rule:
|
||||||
|
metadata: true
|
||||||
|
raw: true
|
||||||
|
|
||||||
|
# http-body: yes # Requires metadata; enable dumping of http body in Base64
|
||||||
|
# http-body-printable: yes # Requires metadata; enable dumping of http body in printable format
|
||||||
|
|
||||||
# Enable the logging of tagged packets for rules using the
|
# Enable the logging of tagged packets for rules using the
|
||||||
# "tag" keyword.
|
# "tag" keyword.
|
||||||
tagged-packets: no
|
tagged-packets: no
|
||||||
|
- anomaly:
|
||||||
|
# Anomaly log records describe unexpected conditions such
|
||||||
|
# as truncated packets, packets with invalid IP/UDP/TCP
|
||||||
|
# length values, and other events that render the packet
|
||||||
|
# invalid for further processing or describe unexpected
|
||||||
|
# behavior on an established stream. Networks which
|
||||||
|
# experience high occurrences of anomalies may experience
|
||||||
|
# packet processing degradation.
|
||||||
|
#
|
||||||
|
# Anomalies are reported for the following:
|
||||||
|
# 1. Decode: Values and conditions that are detected while
|
||||||
|
# decoding individual packets. This includes invalid or
|
||||||
|
# unexpected values for low-level protocol lengths as well
|
||||||
|
# as stream related events (TCP 3-way handshake issues,
|
||||||
|
# unexpected sequence number, etc).
|
||||||
|
# 2. Stream: This includes stream related events (TCP
|
||||||
|
# 3-way handshake issues, unexpected sequence number,
|
||||||
|
# etc).
|
||||||
|
# 3. Application layer: These denote application layer
|
||||||
|
# specific conditions that are unexpected, invalid or are
|
||||||
|
# unexpected given the application monitoring state.
|
||||||
|
#
|
||||||
|
# By default, anomaly logging is disabled. When anomaly
|
||||||
|
# logging is enabled, applayer anomaly reporting is
|
||||||
|
# enabled.
|
||||||
|
enabled: no
|
||||||
|
#
|
||||||
|
# Choose one or more types of anomaly logging and whether to enable
|
||||||
|
# logging of the packet header for packet anomalies.
|
||||||
|
types:
|
||||||
|
decode: no
|
||||||
|
stream: no
|
||||||
|
applayer: yes
|
||||||
|
packethdr: no
|
||||||
- http:
|
- http:
|
||||||
extended: yes # enable this for extended logging information
|
extended: yes # enable this for extended logging information
|
||||||
# custom allows additional http fields to be included in eve-log
|
# custom allows additional http fields to be included in eve-log
|
||||||
# the example below adds three additional fields when uncommented
|
# the example below adds three additional fields when uncommented
|
||||||
#custom: [Accept-Encoding, Accept-Language, Authorization]
|
#custom: [Accept-Encoding, Accept-Language, Authorization]
|
||||||
|
# set this value to one and only one among {both, request, response}
|
||||||
|
# to dump all http headers for every http request and/or response
|
||||||
|
# dump-all-headers: none
|
||||||
- dns:
|
- dns:
|
||||||
# This configuration uses the new DNS logging format,
|
# This configuration uses the new DNS logging format,
|
||||||
# the old configuration is still available:
|
# the old configuration is still available:
|
||||||
# http://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html#eve-extensible-event-format
|
# https://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html#dns-v1-format
|
||||||
# Use version 2 logging with the new format:
|
|
||||||
# DNS answers will be logged in one single event
|
# As of Suricata 5.0, version 2 of the eve dns output
|
||||||
# rather than an event for each of it.
|
# format is the default.
|
||||||
# Without setting a version the version
|
|
||||||
# will fallback to 1 for backwards compatibility.
|
|
||||||
# Note: version 1 is not available with rust enabled
|
|
||||||
version: 2
|
version: 2
|
||||||
|
|
||||||
# Enable/disable this logger. Default: enabled.
|
# Enable/disable this logger. Default: enabled.
|
||||||
#enabled: no
|
enabled: yes
|
||||||
|
|
||||||
# Control logging of requests and responses:
|
# Control logging of requests and responses:
|
||||||
# - requests: enable logging of DNS queries
|
# - requests: enable logging of DNS queries
|
||||||
@@ -174,8 +243,8 @@ outputs:
|
|||||||
# Default: all
|
# Default: all
|
||||||
#formats: [detailed, grouped]
|
#formats: [detailed, grouped]
|
||||||
|
|
||||||
# Answer types to log.
|
# Types to log, based on the query type.
|
||||||
# Default: all
|
# Default: all.
|
||||||
#types: [a, aaaa, cname, mx, ns, ptr, txt]
|
#types: [a, aaaa, cname, mx, ns, ptr, txt]
|
||||||
- tls:
|
- tls:
|
||||||
extended: yes # enable this for extended logging information
|
extended: yes # enable this for extended logging information
|
||||||
@@ -184,7 +253,7 @@ outputs:
|
|||||||
#session-resumption: no
|
#session-resumption: no
|
||||||
# custom allows to control which tls fields that are included
|
# custom allows to control which tls fields that are included
|
||||||
# in eve-log
|
# in eve-log
|
||||||
#custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3]
|
#custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s]
|
||||||
- files:
|
- files:
|
||||||
force-magic: no # force logging magic on all logged files
|
force-magic: no # force logging magic on all logged files
|
||||||
# force logging of checksums, available hash functions are md5,
|
# force logging of checksums, available hash functions are md5,
|
||||||
@@ -207,20 +276,23 @@ outputs:
|
|||||||
# to yes
|
# to yes
|
||||||
#md5: [body, subject]
|
#md5: [body, subject]
|
||||||
|
|
||||||
#- dnp3
|
- dnp3
|
||||||
|
- ftp
|
||||||
|
- rdp
|
||||||
- nfs
|
- nfs
|
||||||
- smb
|
- smb
|
||||||
- tftp
|
- tftp
|
||||||
- ikev2
|
- ikev2
|
||||||
- krb5
|
- krb5
|
||||||
|
- snmp
|
||||||
|
- sip
|
||||||
- dhcp:
|
- dhcp:
|
||||||
# DHCP logging requires Rust.
|
|
||||||
enabled: yes
|
enabled: yes
|
||||||
# When extended mode is on, all DHCP messages are logged
|
# When extended mode is on, all DHCP messages are logged
|
||||||
# with full detail. When extended mode is off (the
|
# with full detail. When extended mode is off (the
|
||||||
# default), just enough information to map a MAC address
|
# default), just enough information to map a MAC address
|
||||||
# to an IP address is logged.
|
# to an IP address is logged.
|
||||||
extended: no
|
# extended: no
|
||||||
- ssh
|
- ssh
|
||||||
#- stats:
|
#- stats:
|
||||||
# totals: yes # stats for all threads merged together
|
# totals: yes # stats for all threads merged together
|
||||||
@@ -236,47 +308,11 @@ outputs:
|
|||||||
# flowints.
|
# flowints.
|
||||||
#- metadata
|
#- metadata
|
||||||
|
|
||||||
# alert output for use with Barnyard2
|
# deprecated - unified2 alert format for use with Barnyard2
|
||||||
- unified2-alert:
|
- unified2-alert:
|
||||||
enabled: no
|
enabled: no
|
||||||
filename: unified2.alert
|
# for further options see:
|
||||||
|
# https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#alert-output-for-use-with-barnyard2-unified2-alert
|
||||||
# File size limit. Can be specified in kb, mb, gb. Just a number
|
|
||||||
# is parsed as bytes.
|
|
||||||
#limit: 32mb
|
|
||||||
|
|
||||||
# By default unified2 log files have the file creation time (in
|
|
||||||
# unix epoch format) appended to the filename. Set this to yes to
|
|
||||||
# disable this behaviour.
|
|
||||||
#nostamp: no
|
|
||||||
|
|
||||||
# Sensor ID field of unified2 alerts.
|
|
||||||
#sensor-id: 0
|
|
||||||
|
|
||||||
# Include payload of packets related to alerts. Defaults to true, set to
|
|
||||||
# false if payload is not required.
|
|
||||||
#payload: yes
|
|
||||||
|
|
||||||
# HTTP X-Forwarded-For support by adding the unified2 extra header or
|
|
||||||
# overwriting the source or destination IP address (depending on flow
|
|
||||||
# direction) with the one reported in the X-Forwarded-For HTTP header.
|
|
||||||
# This is helpful when reviewing alerts for traffic that is being reverse
|
|
||||||
# or forward proxied.
|
|
||||||
xff:
|
|
||||||
enabled: no
|
|
||||||
# Two operation modes are available, "extra-data" and "overwrite". Note
|
|
||||||
# that in the "overwrite" mode, if the reported IP address in the HTTP
|
|
||||||
# X-Forwarded-For header is of a different version of the packet
|
|
||||||
# received, it will fall-back to "extra-data" mode.
|
|
||||||
mode: extra-data
|
|
||||||
# Two proxy deployments are supported, "reverse" and "forward". In
|
|
||||||
# a "reverse" deployment the IP address used is the last one, in a
|
|
||||||
# "forward" deployment the first IP address is used.
|
|
||||||
deployment: reverse
|
|
||||||
# Header name where the actual IP address will be reported, if more
|
|
||||||
# than one IP address is present, the last IP address will be the
|
|
||||||
# one taken into consideration.
|
|
||||||
header: X-Forwarded-For
|
|
||||||
|
|
||||||
# a line based log of HTTP requests (no alerts)
|
# a line based log of HTTP requests (no alerts)
|
||||||
- http-log:
|
- http-log:
|
||||||
@@ -285,6 +321,7 @@ outputs:
|
|||||||
append: yes
|
append: yes
|
||||||
#extended: yes # enable this for extended logging information
|
#extended: yes # enable this for extended logging information
|
||||||
#custom: yes # enabled the custom logging format (defined by customformat)
|
#custom: yes # enabled the custom logging format (defined by customformat)
|
||||||
|
#customformat: ""
|
||||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||||
|
|
||||||
# a line based log of TLS handshake parameters (no alerts)
|
# a line based log of TLS handshake parameters (no alerts)
|
||||||
@@ -294,6 +331,7 @@ outputs:
|
|||||||
append: yes
|
append: yes
|
||||||
#extended: yes # Log extended information like fingerprint
|
#extended: yes # Log extended information like fingerprint
|
||||||
#custom: yes # enabled the custom logging format (defined by customformat)
|
#custom: yes # enabled the custom logging format (defined by customformat)
|
||||||
|
#customformat: ""
|
||||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||||
# output TLS transaction where the session is resumed using a
|
# output TLS transaction where the session is resumed using a
|
||||||
# session id
|
# session id
|
||||||
@@ -304,14 +342,6 @@ outputs:
|
|||||||
enabled: no
|
enabled: no
|
||||||
#certs-log-dir: certs # directory to store the certificates files
|
#certs-log-dir: certs # directory to store the certificates files
|
||||||
|
|
||||||
# a line based log of DNS requests and/or replies (no alerts)
|
|
||||||
# Note: not available when Rust is enabled (--enable-rust).
|
|
||||||
- dns-log:
|
|
||||||
enabled: no
|
|
||||||
filename: dns.log
|
|
||||||
append: yes
|
|
||||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
|
||||||
|
|
||||||
# Packet log... log packets in pcap format. 3 modes of operation: "normal"
|
# Packet log... log packets in pcap format. 3 modes of operation: "normal"
|
||||||
# "multi" and "sguil".
|
# "multi" and "sguil".
|
||||||
#
|
#
|
||||||
@@ -382,7 +412,7 @@ outputs:
|
|||||||
append: yes
|
append: yes
|
||||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
||||||
|
|
||||||
# alert output to prelude (http://www.prelude-technologies.com/) only
|
# alert output to prelude (https://www.prelude-siem.org/) only
|
||||||
# available if Suricata has been compiled with --enable-prelude
|
# available if Suricata has been compiled with --enable-prelude
|
||||||
- alert-prelude:
|
- alert-prelude:
|
||||||
enabled: no
|
enabled: no
|
||||||
@@ -397,7 +427,7 @@ outputs:
|
|||||||
append: yes # append to file (yes) or overwrite it (no)
|
append: yes # append to file (yes) or overwrite it (no)
|
||||||
totals: yes # stats for all threads merged together
|
totals: yes # stats for all threads merged together
|
||||||
threads: no # per thread stats
|
threads: no # per thread stats
|
||||||
#null-values: yes # print counters that have value 0
|
null-values: yes # print counters that have value 0
|
||||||
|
|
||||||
# a line based alerts log similar to fast.log into syslog
|
# a line based alerts log similar to fast.log into syslog
|
||||||
- syslog:
|
- syslog:
|
||||||
@@ -409,12 +439,11 @@ outputs:
|
|||||||
#level: Info ## possible levels: Emergency, Alert, Critical,
|
#level: Info ## possible levels: Emergency, Alert, Critical,
|
||||||
## Error, Warning, Notice, Info, Debug
|
## Error, Warning, Notice, Info, Debug
|
||||||
|
|
||||||
# a line based information for dropped packets in IPS mode
|
# deprecated a line based information for dropped packets in IPS mode
|
||||||
- drop:
|
- drop:
|
||||||
enabled: no
|
enabled: no
|
||||||
filename: drop.log
|
# further options documented at:
|
||||||
append: yes
|
# https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#drop-log-a-line-based-information-for-dropped-packets
|
||||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
|
||||||
|
|
||||||
# Output module for storing files on disk. Files are stored in a
|
# Output module for storing files on disk. Files are stored in a
|
||||||
# directory names consisting of the first 2 characters of the
|
# directory names consisting of the first 2 characters of the
|
||||||
@@ -481,58 +510,18 @@ outputs:
|
|||||||
# one taken into consideration.
|
# one taken into consideration.
|
||||||
header: X-Forwarded-For
|
header: X-Forwarded-For
|
||||||
|
|
||||||
# output module to store extracted files to disk (old style, deprecated)
|
# deprecated - file-store v1
|
||||||
#
|
|
||||||
# The files are stored to the log-dir in a format "file.<id>" where <id> is
|
|
||||||
# an incrementing number starting at 1. For each file "file.<id>" a meta
|
|
||||||
# file "file.<id>.meta" is created. Before they are finalized, they will
|
|
||||||
# have a ".tmp" suffix to indicate that they are still being processed.
|
|
||||||
#
|
|
||||||
# If include-pid is yes, then the files are instead "file.<pid>.<id>", with
|
|
||||||
# meta files named as "file.<pid>.<id>.meta"
|
|
||||||
#
|
|
||||||
# File extraction depends on a lot of things to be fully done:
|
|
||||||
# - file-store stream-depth. For optimal results, set this to 0 (unlimited)
|
|
||||||
# - http request / response body sizes. Again set to 0 for optimal results.
|
|
||||||
# - rules that contain the "filestore" keyword.
|
|
||||||
- file-store:
|
- file-store:
|
||||||
enabled: no # set to yes to enable
|
|
||||||
log-dir: files # directory to store the files
|
|
||||||
force-magic: no # force logging magic on all stored files
|
|
||||||
# force logging of checksums, available hash functions are md5,
|
|
||||||
# sha1 and sha256
|
|
||||||
#force-hash: [md5]
|
|
||||||
force-filestore: no # force storing of all files
|
|
||||||
# override global stream-depth for sessions in which we want to
|
|
||||||
# perform file extraction. Set to 0 for unlimited.
|
|
||||||
#stream-depth: 0
|
|
||||||
#waldo: file.waldo # waldo file to store the file_id across runs
|
|
||||||
# uncomment to disable meta file writing
|
|
||||||
#write-meta: no
|
|
||||||
# uncomment the following variable to define how many files can
|
|
||||||
# remain open for filestore by Suricata. Default value is 0 which
|
|
||||||
# means files get closed after each write
|
|
||||||
#max-open-files: 1000
|
|
||||||
include-pid: no # set to yes to include pid in file names
|
|
||||||
|
|
||||||
# output module to log files tracked in a easily parsable JSON format
|
|
||||||
- file-log:
|
|
||||||
enabled: no
|
enabled: no
|
||||||
filename: files-json.log
|
# further options documented at:
|
||||||
append: yes
|
# https://suricata.readthedocs.io/en/suricata-5.0.0/file-extraction/file-extraction.html#file-store-version-1
|
||||||
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
|
|
||||||
|
|
||||||
force-magic: no # force logging magic on all logged files
|
|
||||||
# force logging of checksums, available hash functions are md5,
|
|
||||||
# sha1 and sha256
|
|
||||||
#force-hash: [md5]
|
|
||||||
|
|
||||||
# Log TCP data after stream normalization
|
# Log TCP data after stream normalization
|
||||||
# 2 types: file or dir. File logs into a single logfile. Dir creates
|
# 2 types: file or dir. File logs into a single logfile. Dir creates
|
||||||
# 2 files per TCP session and stores the raw TCP data into them.
|
# 2 files per TCP session and stores the raw TCP data into them.
|
||||||
# Using 'both' will enable both file and dir modes.
|
# Using 'both' will enable both file and dir modes.
|
||||||
#
|
#
|
||||||
# Note: limited by stream.depth
|
# Note: limited by stream.reassembly.depth
|
||||||
- tcp-data:
|
- tcp-data:
|
||||||
enabled: no
|
enabled: no
|
||||||
type: file
|
type: file
|
||||||
@@ -591,10 +580,14 @@ logging:
|
|||||||
- file:
|
- file:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
level: info
|
level: info
|
||||||
filename: /var/log/suricata/suricata.log
|
filename: suricata.log
|
||||||
# type: json
|
# type: json
|
||||||
- syslog:
|
- syslog:
|
||||||
enabled: no
|
enabled: no
|
||||||
|
facility: local5
|
||||||
|
format: "[%i] <%d> -- "
|
||||||
|
# type: json
|
||||||
|
|
||||||
|
|
||||||
##
|
##
|
||||||
## Step 4: configure common capture settings
|
## Step 4: configure common capture settings
|
||||||
@@ -613,16 +606,11 @@ af-packet:
|
|||||||
# Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
|
# Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
|
||||||
# This is only supported for Linux kernel > 3.1
|
# This is only supported for Linux kernel > 3.1
|
||||||
# possible value are:
|
# possible value are:
|
||||||
# * cluster_round_robin: round robin load balancing
|
|
||||||
# * cluster_flow: all packets of a given flow are send to the same socket
|
# * cluster_flow: all packets of a given flow are send to the same socket
|
||||||
# * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket
|
# * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket
|
||||||
# * cluster_qm: all packets linked by network card to a RSS queue are sent to the same
|
# * cluster_qm: all packets linked by network card to a RSS queue are sent to the same
|
||||||
# socket. Requires at least Linux 3.14.
|
# socket. Requires at least Linux 3.14.
|
||||||
# * cluster_random: packets are sent randomly to sockets but with an equipartition.
|
# * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for
|
||||||
# Requires at least Linux 3.14.
|
|
||||||
# * cluster_rollover: kernel rotates between sockets filling each socket before moving
|
|
||||||
# to the next. Requires at least Linux 3.10.
|
|
||||||
# * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture/ebpf-xdt.rst for
|
|
||||||
# more info.
|
# more info.
|
||||||
# Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
|
# Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
|
||||||
# with capture card using RSS (require cpu affinity tuning and system irq tuning)
|
# with capture card using RSS (require cpu affinity tuning and system irq tuning)
|
||||||
@@ -630,12 +618,8 @@ af-packet:
|
|||||||
# In some fragmentation case, the hash can not be computed. If "defrag" is set
|
# In some fragmentation case, the hash can not be computed. If "defrag" is set
|
||||||
# to yes, the kernel will do the needed defragmentation before sending the packets.
|
# to yes, the kernel will do the needed defragmentation before sending the packets.
|
||||||
defrag: yes
|
defrag: yes
|
||||||
# After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is
|
|
||||||
# full then kernel will send the packet on the next socket with room available. This option
|
|
||||||
# can minimize packet drop and increase the treated bandwidth on single intensive flow.
|
|
||||||
#rollover: yes
|
|
||||||
# To use the ring feature of AF_PACKET, set 'use-mmap' to yes
|
# To use the ring feature of AF_PACKET, set 'use-mmap' to yes
|
||||||
#use-mmap: yes
|
use-mmap: yes
|
||||||
# Lock memory map to avoid it goes to swap. Be careful that over subscribing could lock
|
# Lock memory map to avoid it goes to swap. Be careful that over subscribing could lock
|
||||||
# your system
|
# your system
|
||||||
#mmap-locked: yes
|
#mmap-locked: yes
|
||||||
@@ -683,14 +667,13 @@ af-packet:
|
|||||||
#copy-mode: ips
|
#copy-mode: ips
|
||||||
#copy-iface: eth1
|
#copy-iface: eth1
|
||||||
# For eBPF and XDP setup including bypass, filter and load balancing, please
|
# For eBPF and XDP setup including bypass, filter and load balancing, please
|
||||||
# see doc/userguide/capture/ebpf-xdt.rst for more info.
|
# see doc/userguide/capture-hardware/ebpf-xdp.rst for more info.
|
||||||
|
|
||||||
# Put default values here. These will be used for an interface that is not
|
# Put default values here. These will be used for an interface that is not
|
||||||
# in the list above.
|
# in the list above.
|
||||||
- interface: default
|
- interface: default
|
||||||
#threads: auto
|
#threads: auto
|
||||||
#use-mmap: no
|
#use-mmap: no
|
||||||
#rollover: yes
|
|
||||||
#tpacket-v3: yes
|
#tpacket-v3: yes
|
||||||
|
|
||||||
# Cross platform libpcap capture support
|
# Cross platform libpcap capture support
|
||||||
@@ -753,6 +736,8 @@ app-layer:
|
|||||||
protocols:
|
protocols:
|
||||||
krb5:
|
krb5:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
snmp:
|
||||||
|
enabled: yes
|
||||||
ikev2:
|
ikev2:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
tls:
|
tls:
|
||||||
@@ -760,8 +745,9 @@ app-layer:
|
|||||||
detection-ports:
|
detection-ports:
|
||||||
dp: 443
|
dp: 443
|
||||||
|
|
||||||
# Generate JA3 fingerprint from client hello
|
# Generate JA3 fingerprint from client hello. If not specified it
|
||||||
ja3-fingerprints: yes
|
# will be disabled by default, but enabled if rules require it.
|
||||||
|
#ja3-fingerprints: auto
|
||||||
|
|
||||||
# What to do when the encrypted communications start:
|
# What to do when the encrypted communications start:
|
||||||
# - default: keep tracking TLS session, check for protocol anomalies,
|
# - default: keep tracking TLS session, check for protocol anomalies,
|
||||||
@@ -775,17 +761,21 @@ app-layer:
|
|||||||
#
|
#
|
||||||
# For best performance, select 'bypass'.
|
# For best performance, select 'bypass'.
|
||||||
#
|
#
|
||||||
#encrypt-handling: default
|
#encryption-handling: default
|
||||||
|
|
||||||
dcerpc:
|
dcerpc:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
ftp:
|
ftp:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
# memcap: 64mb
|
# memcap: 64mb
|
||||||
|
# RDP, disabled by default.
|
||||||
|
rdp:
|
||||||
|
#enabled: no
|
||||||
ssh:
|
ssh:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
smtp:
|
smtp:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
raw-extraction: no
|
||||||
# Configure SMTP-MIME Decoder
|
# Configure SMTP-MIME Decoder
|
||||||
mime:
|
mime:
|
||||||
# Decode MIME messages from SMTP transactions
|
# Decode MIME messages from SMTP transactions
|
||||||
@@ -814,10 +804,6 @@ app-layer:
|
|||||||
content-inspect-window: 4096
|
content-inspect-window: 4096
|
||||||
imap:
|
imap:
|
||||||
enabled: detection-only
|
enabled: detection-only
|
||||||
msn:
|
|
||||||
enabled: detection-only
|
|
||||||
# Note: --enable-rust is required for full SMB1/2 support. W/o rust
|
|
||||||
# only minimal SMB1 support is available.
|
|
||||||
smb:
|
smb:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
detection-ports:
|
detection-ports:
|
||||||
@@ -826,8 +812,6 @@ app-layer:
|
|||||||
# Stream reassembly size for SMB streams. By default track it completely.
|
# Stream reassembly size for SMB streams. By default track it completely.
|
||||||
#stream-depth: 0
|
#stream-depth: 0
|
||||||
|
|
||||||
# Note: NFS parser depends on Rust support: pass --enable-rust
|
|
||||||
# to configure.
|
|
||||||
nfs:
|
nfs:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
tftp:
|
tftp:
|
||||||
@@ -851,7 +835,8 @@ app-layer:
|
|||||||
dp: 53
|
dp: 53
|
||||||
http:
|
http:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
# memcap: 64mb
|
# memcap: Maximum memory capacity for http
|
||||||
|
# Default is unlimited, value can be such as 64mb
|
||||||
|
|
||||||
# default-config: Used when no server-config matches
|
# default-config: Used when no server-config matches
|
||||||
# personality: List of personalities used by default
|
# personality: List of personalities used by default
|
||||||
@@ -859,37 +844,15 @@ app-layer:
|
|||||||
# by http_client_body & pcre /P option.
|
# by http_client_body & pcre /P option.
|
||||||
# response-body-limit: Limit reassembly of response body for inspection
|
# response-body-limit: Limit reassembly of response body for inspection
|
||||||
# by file_data, http_server_body & pcre /Q option.
|
# by file_data, http_server_body & pcre /Q option.
|
||||||
# double-decode-path: Double decode path section of the URI
|
|
||||||
# double-decode-query: Double decode query section of the URI
|
|
||||||
# response-body-decompress-layer-limit:
|
|
||||||
# Limit to how many layers of compression will be
|
|
||||||
# decompressed. Defaults to 2.
|
|
||||||
#
|
#
|
||||||
|
# For advanced options, see the user guide
|
||||||
|
|
||||||
|
|
||||||
# server-config: List of server configurations to use if address matches
|
# server-config: List of server configurations to use if address matches
|
||||||
# address: List of IP addresses or networks for this block
|
# address: List of IP addresses or networks for this block
|
||||||
# personalitiy: List of personalities used by this block
|
# personalitiy: List of personalities used by this block
|
||||||
# request-body-limit: Limit reassembly of request body for inspection
|
|
||||||
# by http_client_body & pcre /P option.
|
|
||||||
# response-body-limit: Limit reassembly of response body for inspection
|
|
||||||
# by file_data, http_server_body & pcre /Q option.
|
|
||||||
# double-decode-path: Double decode path section of the URI
|
|
||||||
# double-decode-query: Double decode query section of the URI
|
|
||||||
#
|
#
|
||||||
# uri-include-all: Include all parts of the URI. By default the
|
# Then, all the fields from default-config can be overloaded
|
||||||
# 'scheme', username/password, hostname and port
|
|
||||||
# are excluded. Setting this option to true adds
|
|
||||||
# all of them to the normalized uri as inspected
|
|
||||||
# by http_uri, urilen, pcre with /U and the other
|
|
||||||
# keywords that inspect the normalized uri.
|
|
||||||
# Note that this does not affect http_raw_uri.
|
|
||||||
# Also, note that including all was the default in
|
|
||||||
# 1.4 and 2.0beta1.
|
|
||||||
#
|
|
||||||
# meta-field-limit: Hard size limit for request and response size
|
|
||||||
# limits. Applies to request line and headers,
|
|
||||||
# response line and headers. Does not apply to
|
|
||||||
# request or response bodies. Default is 18k.
|
|
||||||
# If this limit is reached an event is raised.
|
|
||||||
#
|
#
|
||||||
# Currently Available Personalities:
|
# Currently Available Personalities:
|
||||||
# Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
|
# Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
|
||||||
@@ -943,6 +906,15 @@ app-layer:
|
|||||||
double-decode-path: no
|
double-decode-path: no
|
||||||
double-decode-query: no
|
double-decode-query: no
|
||||||
|
|
||||||
|
# Can disable LZMA decompression
|
||||||
|
#lzma-enabled: yes
|
||||||
|
# Memory limit usage for LZMA decompression dictionary
|
||||||
|
# Data is decompressed until dictionary reaches this size
|
||||||
|
#lzma-memlimit: 1mb
|
||||||
|
# Maximum decompressed size with a compression ratio
|
||||||
|
# above 2048 (only LZMA can reach this ratio, deflate cannot)
|
||||||
|
#compression-bomb-limit: 1mb
|
||||||
|
|
||||||
server-config:
|
server-config:
|
||||||
|
|
||||||
#- apache:
|
#- apache:
|
||||||
@@ -1002,13 +974,16 @@ app-layer:
|
|||||||
dp: 44818
|
dp: 44818
|
||||||
sp: 44818
|
sp: 44818
|
||||||
|
|
||||||
# Note: parser depends on Rust support
|
|
||||||
ntp:
|
ntp:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
|
||||||
dhcp:
|
dhcp:
|
||||||
enabled: yes
|
enabled: yes
|
||||||
|
|
||||||
|
# SIP, disabled by default.
|
||||||
|
sip:
|
||||||
|
#enabled: no
|
||||||
|
|
||||||
# Limit for the maximum number of asn1 frames to decode (default 256)
|
# Limit for the maximum number of asn1 frames to decode (default 256)
|
||||||
asn1-max-frames: 256
|
asn1-max-frames: 256
|
||||||
|
|
||||||
@@ -1024,9 +999,9 @@ asn1-max-frames: 256
|
|||||||
##
|
##
|
||||||
|
|
||||||
# Run suricata as user and group.
|
# Run suricata as user and group.
|
||||||
#run-as:
|
run-as:
|
||||||
# user: suri
|
user: suricata
|
||||||
# group: suri
|
group: suricata
|
||||||
|
|
||||||
# Some logging module will use that name in event as identifier. The default
|
# Some logging module will use that name in event as identifier. The default
|
||||||
# value is the hostname
|
# value is the hostname
|
||||||
@@ -1069,29 +1044,26 @@ host-mode: auto
|
|||||||
# Number of packets preallocated per thread. The default is 1024. A higher number
|
# Number of packets preallocated per thread. The default is 1024. A higher number
|
||||||
# will make sure each CPU will be more easily kept busy, but may negatively
|
# will make sure each CPU will be more easily kept busy, but may negatively
|
||||||
# impact caching.
|
# impact caching.
|
||||||
#max-pending-packets: 1024
|
max-pending-packets: 5000
|
||||||
|
|
||||||
# Runmode the engine should use. Please check --list-runmodes to get the available
|
# Runmode the engine should use. Please check --list-runmodes to get the available
|
||||||
# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned
|
# runmodes for each packet acquisition method. Default depends on selected capture
|
||||||
# load balancing).
|
# method. 'workers' generally gives best performance.
|
||||||
runmode: workers
|
runmode: workers
|
||||||
|
|
||||||
# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
|
# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
|
||||||
#
|
#
|
||||||
# Supported schedulers are:
|
# Supported schedulers are:
|
||||||
#
|
#
|
||||||
# round-robin - Flows assigned to threads in a round robin fashion.
|
# hash - Flow assigned to threads using the 5-7 tuple hash.
|
||||||
# active-packets - Flows assigned to threads that have the lowest number of
|
# ippair - Flow assigned to threads using addresses only.
|
||||||
# unprocessed packets (default).
|
|
||||||
# hash - Flow allocated using the address hash. More of a random
|
|
||||||
# technique. Was the default in Suricata 1.2.1 and older.
|
|
||||||
#
|
#
|
||||||
#autofp-scheduler: active-packets
|
#autofp-scheduler: hash
|
||||||
|
|
||||||
# Preallocated size for packet. Default is 1514 which is the classical
|
# Preallocated size for packet. Default is 1514 which is the classical
|
||||||
# size for pcap on ethernet. You should adjust this value to the highest
|
# size for pcap on ethernet. You should adjust this value to the highest
|
||||||
# packet size (MTU + hardware header) on your system.
|
# packet size (MTU + hardware header) on your system.
|
||||||
#default-packet-size: 1514
|
default-packet-size: {{ MTU + 15 }}
|
||||||
|
|
||||||
# Unix command socket can be used to pass commands to Suricata.
|
# Unix command socket can be used to pass commands to Suricata.
|
||||||
# An external tool can then connect to get information from Suricata
|
# An external tool can then connect to get information from Suricata
|
||||||
@@ -1107,6 +1079,10 @@ unix-command:
|
|||||||
#magic-file: /usr/share/file/magic
|
#magic-file: /usr/share/file/magic
|
||||||
#magic-file:
|
#magic-file:
|
||||||
|
|
||||||
|
# GeoIP2 database file. Specify path and filename of GeoIP2 database
|
||||||
|
# if using rules with "geoip" rule option.
|
||||||
|
#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb
|
||||||
|
|
||||||
legacy:
|
legacy:
|
||||||
uricontent: enabled
|
uricontent: enabled
|
||||||
|
|
||||||
@@ -1300,7 +1276,9 @@ flow-timeouts:
|
|||||||
# inline: no # stream inline mode
|
# inline: no # stream inline mode
|
||||||
# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine
|
# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine
|
||||||
# max-synack-queued: 5 # Max different SYN/ACKs to queue
|
# max-synack-queued: 5 # Max different SYN/ACKs to queue
|
||||||
# bypass: no # Bypass packets when stream.depth is reached
|
# bypass: no # Bypass packets when stream.reassembly.depth is reached.
|
||||||
|
# # Warning: first side to reach this triggers
|
||||||
|
# # the bypass.
|
||||||
#
|
#
|
||||||
# reassembly:
|
# reassembly:
|
||||||
# memcap: 64mb # Can be specified in kb, mb, gb. Just a number
|
# memcap: 64mb # Can be specified in kb, mb, gb. Just a number
|
||||||
@@ -1373,9 +1351,22 @@ host:
|
|||||||
|
|
||||||
decoder:
|
decoder:
|
||||||
# Teredo decoder is known to not be completely accurate
|
# Teredo decoder is known to not be completely accurate
|
||||||
# it will sometimes detect non-teredo as teredo.
|
# as it will sometimes detect non-teredo as teredo.
|
||||||
teredo:
|
teredo:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
# ports to look for Teredo. Max 4 ports. If no ports are given, or
|
||||||
|
# the value is set to 'any', Teredo detection runs on _all_ UDP packets.
|
||||||
|
ports: $TEREDO_PORTS # syntax: '[3544, 1234]' or '3533' or 'any'.
|
||||||
|
|
||||||
|
# VXLAN decoder is assigned to up to 4 UDP ports. By default only the
|
||||||
|
# IANA assigned port 4789 is enabled.
|
||||||
|
vxlan:
|
||||||
|
enabled: true
|
||||||
|
ports: $VXLAN_PORTS # syntax: '8472, 4789'
|
||||||
|
# ERSPAN Type I decode support
|
||||||
|
erspan:
|
||||||
|
typeI:
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
|
||||||
##
|
##
|
||||||
@@ -1484,19 +1475,26 @@ threading:
|
|||||||
{%- if salt['pillar.get']('sensor:suriprocs') %}
|
{%- if salt['pillar.get']('sensor:suriprocs') %}
|
||||||
cpu-affinity:
|
cpu-affinity:
|
||||||
- management-cpu-set:
|
- management-cpu-set:
|
||||||
cpu: [ all ] # include only these cpus in affinity settings
|
cpu: [ all ] # include only these CPUs in affinity settings
|
||||||
- receive-cpu-set:
|
- receive-cpu-set:
|
||||||
cpu: [ all ] # include only these cpus in affinity settings
|
cpu: [ all ] # include only these CPUs in affinity settings
|
||||||
- worker-cpu-set:
|
- worker-cpu-set:
|
||||||
cpu: [ "all" ]
|
cpu: [ "all" ]
|
||||||
mode: "exclusive"
|
mode: "exclusive"
|
||||||
# Use explicitely 3 threads and don't compute number by using
|
# Use explicitely 3 threads and don't compute number by using
|
||||||
# detect-thread-ratio variable:
|
# detect-thread-ratio variable:
|
||||||
|
# threads: 3
|
||||||
threads: {{ salt['pillar.get']('sensor:suriprocs') }}
|
threads: {{ salt['pillar.get']('sensor:suriprocs') }}
|
||||||
prio:
|
prio:
|
||||||
default: "medium"
|
low: [ 0 ]
|
||||||
{% endif %}
|
medium: [ "1-2" ]
|
||||||
|
high: [ 3 ]
|
||||||
|
default: "high"
|
||||||
|
#- verdict-cpu-set:
|
||||||
|
# cpu: [ 0 ]
|
||||||
|
# prio:
|
||||||
|
# default: "high"
|
||||||
|
{%- endif -%}
|
||||||
{%- if salt['pillar.get']('sensor:suripins') %}
|
{%- if salt['pillar.get']('sensor:suripins') %}
|
||||||
cpu-affinity:
|
cpu-affinity:
|
||||||
- management-cpu-set:
|
- management-cpu-set:
|
||||||
@@ -1512,6 +1510,8 @@ threading:
|
|||||||
prio:
|
prio:
|
||||||
default: "high"
|
default: "high"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
#
|
||||||
# By default Suricata creates one "detect" thread per available CPU/CPU core.
|
# By default Suricata creates one "detect" thread per available CPU/CPU core.
|
||||||
# This setting allows controlling this behaviour. A ratio setting of 2 will
|
# This setting allows controlling this behaviour. A ratio setting of 2 will
|
||||||
# create 2 detect threads for each CPU/CPU core. So for a dual core CPU this
|
# create 2 detect threads for each CPU/CPU core. So for a dual core CPU this
|
||||||
@@ -1545,7 +1545,7 @@ profiling:
|
|||||||
|
|
||||||
# Profiling can be disabled here, but it will still have a
|
# Profiling can be disabled here, but it will still have a
|
||||||
# performance impact if compiled in.
|
# performance impact if compiled in.
|
||||||
enabled: no
|
enabled: yes
|
||||||
filename: rule_perf.log
|
filename: rule_perf.log
|
||||||
append: yes
|
append: yes
|
||||||
|
|
||||||
@@ -1668,7 +1668,7 @@ capture:
|
|||||||
|
|
||||||
# Netmap support
|
# Netmap support
|
||||||
#
|
#
|
||||||
# Netmap operates with NIC directly in driver, so you need FreeBSD which have
|
# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which have
|
||||||
# built-in netmap support or compile and install netmap module and appropriate
|
# built-in netmap support or compile and install netmap module and appropriate
|
||||||
# NIC driver on your Linux system.
|
# NIC driver on your Linux system.
|
||||||
# To reach maximum throughput disable all receive-, segmentation-,
|
# To reach maximum throughput disable all receive-, segmentation-,
|
||||||
@@ -1680,7 +1680,9 @@ capture:
|
|||||||
netmap:
|
netmap:
|
||||||
# To specify OS endpoint add plus sign at the end (e.g. "eth0+")
|
# To specify OS endpoint add plus sign at the end (e.g. "eth0+")
|
||||||
- interface: eth2
|
- interface: eth2
|
||||||
# Number of receive threads. "auto" uses number of RSS queues on interface.
|
# Number of capture threads. "auto" uses number of RSS queues on interface.
|
||||||
|
# Warning: unless the RSS hashing is symmetrical, this will lead to
|
||||||
|
# accuracy issues.
|
||||||
#threads: auto
|
#threads: auto
|
||||||
# You can use the following variables to activate netmap tap or IPS mode.
|
# You can use the following variables to activate netmap tap or IPS mode.
|
||||||
# If copy-mode is set to ips or tap, the traffic coming to the current
|
# If copy-mode is set to ips or tap, the traffic coming to the current
|
||||||
@@ -1793,45 +1795,63 @@ napatech:
|
|||||||
# (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
|
# (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
|
||||||
# This may be enabled when sharing streams with another application.
|
# This may be enabled when sharing streams with another application.
|
||||||
# Otherwise, it should be turned off.
|
# Otherwise, it should be turned off.
|
||||||
hba: -1
|
#hba: -1
|
||||||
|
|
||||||
# use_all_streams set to "yes" will query the Napatech service for all configured
|
# When use_all_streams is set to "yes" the initialization code will query
|
||||||
# streams and listen on all of them. When set to "no" the streams config array
|
# the Napatech service for all configured streams and listen on all of them.
|
||||||
# will be used.
|
# When set to "no" the streams config array will be used.
|
||||||
use-all-streams: yes
|
#
|
||||||
|
# This option necessitates running the appropriate NTPL commands to create
|
||||||
|
# the desired streams prior to running suricata.
|
||||||
|
#use-all-streams: no
|
||||||
|
|
||||||
# The streams to listen on. This can be either:
|
# The streams to listen on when auto-config is disabled or when and threading
|
||||||
# a list of individual streams (e.g. streams: [0,1,2,3])
|
# cpu-affinity is disabled. This can be either:
|
||||||
|
# an individual stream (e.g. streams: [0])
|
||||||
# or
|
# or
|
||||||
# a range of streams (e.g. streams: ["0-3"])
|
# a range of streams (e.g. streams: ["0-3"])
|
||||||
|
#
|
||||||
streams: ["0-3"]
|
streams: ["0-3"]
|
||||||
|
|
||||||
# Tilera mpipe configuration. for use on Tilera TILE-Gx.
|
# When auto-config is enabled the streams will be created and assigned
|
||||||
mpipe:
|
# automatically to the NUMA node where the thread resides. If cpu-affinity
|
||||||
|
# is enabled in the threading section. Then the streams will be created
|
||||||
|
# according to the number of worker threads specified in the worker cpu set.
|
||||||
|
# Otherwise, the streams array is used to define the streams.
|
||||||
|
#
|
||||||
|
# This option cannot be used simultaneous with "use-all-streams".
|
||||||
|
#
|
||||||
|
auto-config: yes
|
||||||
|
|
||||||
# Load balancing modes: "static", "dynamic", "sticky", or "round-robin".
|
# Ports indicates which napatech ports are to be used in auto-config mode.
|
||||||
load-balance: dynamic
|
# these are the port ID's of the ports that will be merged prior to the
|
||||||
|
# traffic being distributed to the streams.
|
||||||
|
#
|
||||||
|
# This can be specified in any of the following ways:
|
||||||
|
#
|
||||||
|
# a list of individual ports (e.g. ports: [0,1,2,3])
|
||||||
|
#
|
||||||
|
# a range of ports (e.g. ports: [0-3])
|
||||||
|
#
|
||||||
|
# "all" to indicate that all ports are to be merged together
|
||||||
|
# (e.g. ports: [all])
|
||||||
|
#
|
||||||
|
# This has no effect if auto-config is disabled.
|
||||||
|
#
|
||||||
|
ports: [all]
|
||||||
|
|
||||||
# Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536
|
# When auto-config is enabled the hashmode specifies the algorithm for
|
||||||
iqueue-packets: 2048
|
# determining to which stream a given packet is to be delivered.
|
||||||
|
# This can be any valid Napatech NTPL hashmode command.
|
||||||
# List of interfaces we will listen on.
|
#
|
||||||
inputs:
|
# The most common hashmode commands are: hash2tuple, hash2tuplesorted,
|
||||||
- interface: xgbe2
|
# hash5tuple, hash5tuplesorted and roundrobin.
|
||||||
- interface: xgbe3
|
#
|
||||||
- interface: xgbe4
|
# See Napatech NTPL documentation other hashmodes and details on their use.
|
||||||
|
#
|
||||||
|
# This has no effect if auto-config is disabled.
|
||||||
# Relative weight of memory for packets of each mPipe buffer size.
|
#
|
||||||
stack:
|
hashmode: hash5tuplesorted
|
||||||
size128: 0
|
|
||||||
size256: 9
|
|
||||||
size512: 0
|
|
||||||
size1024: 0
|
|
||||||
size1664: 7
|
|
||||||
size4096: 0
|
|
||||||
size10386: 0
|
|
||||||
size16384: 0
|
|
||||||
|
|
||||||
##
|
##
|
||||||
## Configure Suricata to load Suricata-Update managed rules.
|
## Configure Suricata to load Suricata-Update managed rules.
|
||||||
@@ -1841,78 +1861,10 @@ mpipe:
|
|||||||
##
|
##
|
||||||
|
|
||||||
default-rule-path: /etc/suricata/rules
|
default-rule-path: /etc/suricata/rules
|
||||||
|
|
||||||
rule-files:
|
rule-files:
|
||||||
- all.rules
|
- all.rules
|
||||||
|
|
||||||
##
|
|
||||||
## Advanced rule file configuration.
|
|
||||||
##
|
|
||||||
## If this section is completely commented out then your configuration
|
|
||||||
## is setup for suricata-update as it was most likely bundled and
|
|
||||||
## installed with Suricata.
|
|
||||||
##
|
|
||||||
|
|
||||||
#default-rule-path: /var/lib/suricata/rules
|
|
||||||
|
|
||||||
#rule-files:
|
|
||||||
# - botcc.rules
|
|
||||||
# # - botcc.portgrouped.rules
|
|
||||||
# - ciarmy.rules
|
|
||||||
# - compromised.rules
|
|
||||||
# - drop.rules
|
|
||||||
# - dshield.rules
|
|
||||||
## - emerging-activex.rules
|
|
||||||
# - emerging-attack_response.rules
|
|
||||||
# - emerging-chat.rules
|
|
||||||
# - emerging-current_events.rules
|
|
||||||
# - emerging-dns.rules
|
|
||||||
# - emerging-dos.rules
|
|
||||||
# - emerging-exploit.rules
|
|
||||||
# - emerging-ftp.rules
|
|
||||||
## - emerging-games.rules
|
|
||||||
## - emerging-icmp_info.rules
|
|
||||||
## - emerging-icmp.rules
|
|
||||||
# - emerging-imap.rules
|
|
||||||
## - emerging-inappropriate.rules
|
|
||||||
## - emerging-info.rules
|
|
||||||
# - emerging-malware.rules
|
|
||||||
# - emerging-misc.rules
|
|
||||||
# - emerging-mobile_malware.rules
|
|
||||||
# - emerging-netbios.rules
|
|
||||||
# - emerging-p2p.rules
|
|
||||||
# - emerging-policy.rules
|
|
||||||
# - emerging-pop3.rules
|
|
||||||
# - emerging-rpc.rules
|
|
||||||
## - emerging-scada.rules
|
|
||||||
## - emerging-scada_special.rules
|
|
||||||
# - emerging-scan.rules
|
|
||||||
## - emerging-shellcode.rules
|
|
||||||
# - emerging-smtp.rules
|
|
||||||
# - emerging-snmp.rules
|
|
||||||
# - emerging-sql.rules
|
|
||||||
# - emerging-telnet.rules
|
|
||||||
# - emerging-tftp.rules
|
|
||||||
# - emerging-trojan.rules
|
|
||||||
# - emerging-user_agents.rules
|
|
||||||
# - emerging-voip.rules
|
|
||||||
# - emerging-web_client.rules
|
|
||||||
# - emerging-web_server.rules
|
|
||||||
## - emerging-web_specific_apps.rules
|
|
||||||
# - emerging-worm.rules
|
|
||||||
# - tor.rules
|
|
||||||
## - decoder-events.rules # available in suricata sources under rules dir
|
|
||||||
## - stream-events.rules # available in suricata sources under rules dir
|
|
||||||
# - http-events.rules # available in suricata sources under rules dir
|
|
||||||
# - smtp-events.rules # available in suricata sources under rules dir
|
|
||||||
# - dns-events.rules # available in suricata sources under rules dir
|
|
||||||
# - tls-events.rules # available in suricata sources under rules dir
|
|
||||||
## - modbus-events.rules # available in suricata sources under rules dir
|
|
||||||
## - app-layer-events.rules # available in suricata sources under rules dir
|
|
||||||
## - dnp3-events.rules # available in suricata sources under rules dir
|
|
||||||
## - ntp-events.rules # available in suricata sources under rules dir
|
|
||||||
## - ipsec-events.rules # available in suricata sources under rules dir
|
|
||||||
## - kerberos-events.rules # available in suricata sources under rules dir
|
|
||||||
|
|
||||||
##
|
##
|
||||||
## Auxiliary configuration files.
|
## Auxiliary configuration files.
|
||||||
##
|
##
|
||||||
|
|||||||
@@ -55,6 +55,12 @@ surilogdir:
|
|||||||
- user: 940
|
- user: 940
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
|
suridatadir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/suricata
|
||||||
|
- user: 940
|
||||||
|
- group: 939
|
||||||
|
|
||||||
surirulesync:
|
surirulesync:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /opt/so/conf/suricata/rules/
|
- name: /opt/so/conf/suricata/rules/
|
||||||
@@ -119,6 +125,7 @@ so-suricata:
|
|||||||
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
|
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
|
||||||
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
|
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
|
||||||
- /opt/so/log/suricata/:/var/log/suricata/:rw
|
- /opt/so/log/suricata/:/var/log/suricata/:rw
|
||||||
|
- /nsm/suricata/:/nsm/:rw
|
||||||
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
|
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
|
||||||
- network_mode: host
|
- network_mode: host
|
||||||
- watch:
|
- watch:
|
||||||
|
|||||||
19
salt/suricata/master.sls
Normal file
19
salt/suricata/master.sls
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
surilocaldir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/saltstack/local/salt/suricata
|
||||||
|
- user: socore
|
||||||
|
- group: socore
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
ruleslink:
|
||||||
|
file.symlink:
|
||||||
|
- name: /opt/so/saltstack/local/salt/suricata/rules
|
||||||
|
- user: socore
|
||||||
|
- group: socore
|
||||||
|
- target: /opt/so/rules/nids
|
||||||
|
|
||||||
|
refresh_salt_master_fileserver_suricata_ruleslink:
|
||||||
|
salt.runner:
|
||||||
|
- name: fileserver.update
|
||||||
|
- onchanges:
|
||||||
|
- file: ruleslink
|
||||||
@@ -12,7 +12,7 @@ search {
|
|||||||
# Name of the index
|
# Name of the index
|
||||||
index = the_hive
|
index = the_hive
|
||||||
# Name of the Elasticsearch cluster
|
# Name of the Elasticsearch cluster
|
||||||
cluster = hive
|
cluster = thehive
|
||||||
# Address of the Elasticsearch instance
|
# Address of the Elasticsearch instance
|
||||||
host = ["{{ MASTERIP }}:9500"]
|
host = ["{{ MASTERIP }}:9500"]
|
||||||
#search.uri = "http://{{ MASTERIP }}:9500"
|
#search.uri = "http://{{ MASTERIP }}:9500"
|
||||||
@@ -12,7 +12,7 @@ search {
|
|||||||
# Name of the index
|
# Name of the index
|
||||||
index = cortex
|
index = cortex
|
||||||
# Name of the Elasticsearch cluster
|
# Name of the Elasticsearch cluster
|
||||||
cluster = hive
|
cluster = thehive
|
||||||
# Address of the Elasticsearch instance
|
# Address of the Elasticsearch instance
|
||||||
host = ["{{ MASTERIP }}:9500"]
|
host = ["{{ MASTERIP }}:9500"]
|
||||||
# Scroll keepalive
|
# Scroll keepalive
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
cluster.name: "hive"
|
cluster.name: "thehive"
|
||||||
network.host: 0.0.0.0
|
network.host: 0.0.0.0
|
||||||
discovery.zen.minimum_master_nodes: 1
|
discovery.zen.minimum_master_nodes: 1
|
||||||
# This is a test -- if this is here, then the volume is mounted correctly.
|
# This is a test -- if this is here, then the volume is mounted correctly.
|
||||||
@@ -1,24 +1,24 @@
|
|||||||
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
|
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
|
||||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||||
{% set MASTER = salt['grains.get']('master') %}
|
{% set MASTER = salt['grains.get']('master') %}
|
||||||
hiveconfdir:
|
thehiveconfdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /opt/so/conf/hive/etc
|
- name: /opt/so/conf/thehive/etc
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
hivelogdir:
|
thehivelogdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /opt/so/log/hive
|
- name: /opt/so/log/thehive
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
hiveconf:
|
thehiveconf:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /opt/so/conf/hive/etc
|
- name: /opt/so/conf/thehive/etc
|
||||||
- source: salt://hive/thehive/etc
|
- source: salt://thehive/etc
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
- template: jinja
|
- template: jinja
|
||||||
@@ -40,7 +40,7 @@ cortexlogdir:
|
|||||||
cortexconf:
|
cortexconf:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /opt/so/conf/cortex
|
- name: /opt/so/conf/cortex
|
||||||
- source: salt://hive/thehive/etc
|
- source: salt://thehive/etc
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
- template: jinja
|
- template: jinja
|
||||||
@@ -48,9 +48,9 @@ cortexconf:
|
|||||||
# Install Elasticsearch
|
# Install Elasticsearch
|
||||||
|
|
||||||
# Made directory for ES data to live in
|
# Made directory for ES data to live in
|
||||||
hiveesdata:
|
thehiveesdata:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /nsm/hive/esdata
|
- name: /nsm/thehive/esdata
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
@@ -64,16 +64,16 @@ so-thehive-es:
|
|||||||
- interactive: True
|
- interactive: True
|
||||||
- tty: True
|
- tty: True
|
||||||
- binds:
|
- binds:
|
||||||
- /nsm/hive/esdata:/usr/share/elasticsearch/data:rw
|
- /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw
|
||||||
- /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
|
- /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
|
||||||
- /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
|
- /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
|
||||||
- /opt/so/log/hive:/var/log/elasticsearch:rw
|
- /opt/so/log/thehive:/var/log/elasticsearch:rw
|
||||||
- environment:
|
- environment:
|
||||||
- http.host=0.0.0.0
|
- http.host=0.0.0.0
|
||||||
- http.port=9400
|
- http.port=9400
|
||||||
- transport.tcp.port=9500
|
- transport.tcp.port=9500
|
||||||
- transport.host=0.0.0.0
|
- transport.host=0.0.0.0
|
||||||
- cluster.name=hive
|
- cluster.name=thehive
|
||||||
- thread_pool.index.queue_size=100000
|
- thread_pool.index.queue_size=100000
|
||||||
- thread_pool.search.queue_size=100000
|
- thread_pool.search.queue_size=100000
|
||||||
- thread_pool.bulk.queue_size=100000
|
- thread_pool.bulk.queue_size=100000
|
||||||
@@ -90,13 +90,13 @@ so-cortex:
|
|||||||
- name: so-cortex
|
- name: so-cortex
|
||||||
- user: 939
|
- user: 939
|
||||||
- binds:
|
- binds:
|
||||||
- /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
|
- /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
|
||||||
- port_bindings:
|
- port_bindings:
|
||||||
- 0.0.0.0:9001:9001
|
- 0.0.0.0:9001:9001
|
||||||
|
|
||||||
cortexscript:
|
cortexscript:
|
||||||
cmd.script:
|
cmd.script:
|
||||||
- source: salt://hive/thehive/scripts/cortex_init
|
- source: salt://thehive/scripts/cortex_init
|
||||||
- cwd: /opt/so
|
- cwd: /opt/so
|
||||||
- template: jinja
|
- template: jinja
|
||||||
|
|
||||||
@@ -109,12 +109,12 @@ so-thehive:
|
|||||||
- name: so-thehive
|
- name: so-thehive
|
||||||
- user: 939
|
- user: 939
|
||||||
- binds:
|
- binds:
|
||||||
- /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro
|
- /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
|
||||||
- port_bindings:
|
- port_bindings:
|
||||||
- 0.0.0.0:9000:9000
|
- 0.0.0.0:9000:9000
|
||||||
|
|
||||||
hivescript:
|
thehivescript:
|
||||||
cmd.script:
|
cmd.script:
|
||||||
- source: salt://hive/thehive/scripts/hive_init
|
- source: salt://thehive/scripts/hive_init
|
||||||
- cwd: /opt/so
|
- cwd: /opt/so
|
||||||
- template: jinja
|
- template: jinja
|
||||||
@@ -7,6 +7,8 @@
|
|||||||
{%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %}
|
{%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %}
|
||||||
{%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
|
{%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
|
||||||
|
|
||||||
|
default_salt_dir=/opt/so/saltstack/default
|
||||||
|
|
||||||
cortex_init(){
|
cortex_init(){
|
||||||
sleep 60
|
sleep 60
|
||||||
CORTEX_IP="{{MASTERIP}}"
|
CORTEX_IP="{{MASTERIP}}"
|
||||||
@@ -17,7 +19,7 @@ cortex_init(){
|
|||||||
CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup"
|
CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup"
|
||||||
CORTEX_ORG_USER="{{CORTEXORGUSER}}"
|
CORTEX_ORG_USER="{{CORTEXORGUSER}}"
|
||||||
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
|
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
|
||||||
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
|
SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf"
|
||||||
|
|
||||||
|
|
||||||
# Migrate DB
|
# Migrate DB
|
||||||
64
salt/thehive/scripts/hive_init
Executable file
64
salt/thehive/scripts/hive_init
Executable file
@@ -0,0 +1,64 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||||
|
{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
|
||||||
|
{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
|
||||||
|
{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
|
||||||
|
|
||||||
|
thehive_init(){
|
||||||
|
sleep 120
|
||||||
|
THEHIVE_IP="{{MASTERIP}}"
|
||||||
|
THEHIVE_USER="{{THEHIVEUSER}}"
|
||||||
|
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
|
||||||
|
THEHIVE_KEY="{{THEHIVEKEY}}"
|
||||||
|
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
|
||||||
|
|
||||||
|
echo -n "Waiting for TheHive..."
|
||||||
|
COUNT=0
|
||||||
|
THEHIVE_CONNECTED="no"
|
||||||
|
while [[ "$COUNT" -le 240 ]]; do
|
||||||
|
curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive"
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
THEHIVE_CONNECTED="yes"
|
||||||
|
echo "connected!"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
((COUNT+=1))
|
||||||
|
sleep 1
|
||||||
|
echo -n "."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||||
|
|
||||||
|
# Migrate DB
|
||||||
|
curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
|
||||||
|
|
||||||
|
# Create intial TheHive user
|
||||||
|
curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
|
||||||
|
|
||||||
|
# Pre-load custom fields
|
||||||
|
#
|
||||||
|
# reputation
|
||||||
|
curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
|
||||||
|
|
||||||
|
|
||||||
|
touch /opt/so/state/thehive.txt
|
||||||
|
else
|
||||||
|
echo "We experienced an issue connecting to TheHive!"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -f /opt/so/state/thehive.txt ]; then
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
rm -f garbage_file
|
||||||
|
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
|
||||||
|
do
|
||||||
|
echo "Waiting for Elasticsearch..."
|
||||||
|
rm -f garbage_file
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
rm -f garbage_file
|
||||||
|
sleep 5
|
||||||
|
thehive_init
|
||||||
|
fi
|
||||||
17
salt/top.sls
17
salt/top.sls
@@ -30,6 +30,7 @@ base:
|
|||||||
- telegraf
|
- telegraf
|
||||||
- firewall
|
- firewall
|
||||||
- idstools
|
- idstools
|
||||||
|
- suricata.master
|
||||||
- pcap
|
- pcap
|
||||||
- suricata
|
- suricata
|
||||||
- zeek
|
- zeek
|
||||||
@@ -73,6 +74,7 @@ base:
|
|||||||
- soc
|
- soc
|
||||||
- firewall
|
- firewall
|
||||||
- idstools
|
- idstools
|
||||||
|
- suricata.master
|
||||||
- healthcheck
|
- healthcheck
|
||||||
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
||||||
- mysql
|
- mysql
|
||||||
@@ -100,7 +102,7 @@ base:
|
|||||||
- schedule
|
- schedule
|
||||||
- soctopus
|
- soctopus
|
||||||
{%- if THEHIVE != 0 %}
|
{%- if THEHIVE != 0 %}
|
||||||
- hive
|
- thehive
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if PLAYBOOK != 0 %}
|
{%- if PLAYBOOK != 0 %}
|
||||||
- playbook
|
- playbook
|
||||||
@@ -129,6 +131,7 @@ base:
|
|||||||
- firewall
|
- firewall
|
||||||
- master
|
- master
|
||||||
- idstools
|
- idstools
|
||||||
|
- suricata.master
|
||||||
- redis
|
- redis
|
||||||
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
||||||
- mysql
|
- mysql
|
||||||
@@ -149,11 +152,14 @@ base:
|
|||||||
{%- endif %}
|
{%- endif %}
|
||||||
- soctopus
|
- soctopus
|
||||||
{%- if THEHIVE != 0 %}
|
{%- if THEHIVE != 0 %}
|
||||||
- hive
|
- thehive
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if PLAYBOOK != 0 %}
|
{%- if PLAYBOOK != 0 %}
|
||||||
- playbook
|
- playbook
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
{%- if NAVIGATOR != 0 %}
|
||||||
|
- navigator
|
||||||
|
{%- endif %}
|
||||||
{%- if FREQSERVER != 0 %}
|
{%- if FREQSERVER != 0 %}
|
||||||
- freqserver
|
- freqserver
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
@@ -174,6 +180,7 @@ base:
|
|||||||
- soc
|
- soc
|
||||||
- firewall
|
- firewall
|
||||||
- idstools
|
- idstools
|
||||||
|
- suricata.master
|
||||||
- healthcheck
|
- healthcheck
|
||||||
- redis
|
- redis
|
||||||
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
||||||
@@ -203,7 +210,7 @@ base:
|
|||||||
- schedule
|
- schedule
|
||||||
- soctopus
|
- soctopus
|
||||||
{%- if THEHIVE != 0 %}
|
{%- if THEHIVE != 0 %}
|
||||||
- hive
|
- thehive
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if PLAYBOOK != 0 %}
|
{%- if PLAYBOOK != 0 %}
|
||||||
- playbook
|
- playbook
|
||||||
@@ -256,6 +263,7 @@ base:
|
|||||||
- ca
|
- ca
|
||||||
- ssl
|
- ssl
|
||||||
- common
|
- common
|
||||||
|
- nginx
|
||||||
- telegraf
|
- telegraf
|
||||||
- firewall
|
- firewall
|
||||||
{%- if WAZUH != 0 %}
|
{%- if WAZUH != 0 %}
|
||||||
@@ -297,6 +305,7 @@ base:
|
|||||||
- firewall
|
- firewall
|
||||||
- master
|
- master
|
||||||
- idstools
|
- idstools
|
||||||
|
- suricata.master
|
||||||
- redis
|
- redis
|
||||||
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
{%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
|
||||||
- mysql
|
- mysql
|
||||||
@@ -318,7 +327,7 @@ base:
|
|||||||
{%- endif %}
|
{%- endif %}
|
||||||
- soctopus
|
- soctopus
|
||||||
{%- if THEHIVE != 0 %}
|
{%- if THEHIVE != 0 %}
|
||||||
- hive
|
- thehive
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if PLAYBOOK != 0 %}
|
{%- if PLAYBOOK != 0 %}
|
||||||
- playbook
|
- playbook
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||||
|
{%- set WAZUH_ENABLED = salt['pillar.get']('static:wazuh', '0') %}
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
local_salt_dir=/opt/so/saltstack/local
|
||||||
|
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
@@ -17,7 +19,7 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
# Check if Wazuh enabled
|
# Check if Wazuh enabled
|
||||||
if grep -q -R "wazuh: 1" /opt/so/saltstack/pillar/*; then
|
if [ {{ WAZUH_ENABLED }} ]; then
|
||||||
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
|
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
|
||||||
if ! grep -q "<white_list>{{ MASTERIP }}</white_list>" $WAZUH_MGR_CFG ; then
|
if ! grep -q "<white_list>{{ MASTERIP }}</white_list>" $WAZUH_MGR_CFG ; then
|
||||||
DATE=`date`
|
DATE=`date`
|
||||||
|
|||||||
@@ -80,11 +80,6 @@ wazuhmgrwhitelist:
|
|||||||
- mode: 755
|
- mode: 755
|
||||||
- template: jinja
|
- template: jinja
|
||||||
|
|
||||||
wazuhagentservice:
|
|
||||||
service.running:
|
|
||||||
- name: wazuh-agent
|
|
||||||
- enable: True
|
|
||||||
|
|
||||||
so-wazuh:
|
so-wazuh:
|
||||||
docker_container.running:
|
docker_container.running:
|
||||||
- image: {{ MASTER }}:5000/soshybridhunter/so-wazuh:{{ VERSION }}
|
- image: {{ MASTER }}:5000/soshybridhunter/so-wazuh:{{ VERSION }}
|
||||||
@@ -110,3 +105,8 @@ whitelistmanager:
|
|||||||
cmd.run:
|
cmd.run:
|
||||||
- name: /usr/sbin/wazuh-manager-whitelist
|
- name: /usr/sbin/wazuh-manager-whitelist
|
||||||
- cwd: /
|
- cwd: /
|
||||||
|
|
||||||
|
wazuhagentservice:
|
||||||
|
service.running:
|
||||||
|
- name: wazuh-agent
|
||||||
|
- enable: True
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
/usr/bin/docker exec so-zeek /opt/zeek/bin/zeekctl netstats | awk '{print $(NF-2),$(NF-1),$NF}' | awk -F '[ =]' '{RCVD += $2;DRP += $4;TTL += $6} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/zeek/logs/packetloss.log
|
/usr/bin/docker exec so-zeek /opt/zeek/bin/zeekctl netstats | awk '{print $(NF-2),$(NF-1),$NF}' | awk -F '[ =]' '{RCVD += $2;DRP += $4;TTL += $6} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/zeek/logs/packetloss.log 2>&1
|
||||||
|
|||||||
@@ -21,6 +21,8 @@ address_type=DHCP
|
|||||||
ADMINUSER=onionuser
|
ADMINUSER=onionuser
|
||||||
ADMINPASS1=onionuser
|
ADMINPASS1=onionuser
|
||||||
ADMINPASS2=onionuser
|
ADMINPASS2=onionuser
|
||||||
|
ALLOW_CIDR=0.0.0.0/0
|
||||||
|
ALLOW_ROLE=a
|
||||||
BASICBRO=7
|
BASICBRO=7
|
||||||
BASICSURI=7
|
BASICSURI=7
|
||||||
# BLOGS=
|
# BLOGS=
|
||||||
@@ -65,6 +67,7 @@ PLAYBOOK=1
|
|||||||
REDIRECTINFO=IP
|
REDIRECTINFO=IP
|
||||||
RULESETUP=ETOPEN
|
RULESETUP=ETOPEN
|
||||||
# SHARDCOUNT=
|
# SHARDCOUNT=
|
||||||
|
SKIP_REBOOT=1
|
||||||
SOREMOTEPASS1=onionuser
|
SOREMOTEPASS1=onionuser
|
||||||
SOREMOTEPASS2=onionuser
|
SOREMOTEPASS2=onionuser
|
||||||
STRELKA=1
|
STRELKA=1
|
||||||
|
|||||||
@@ -38,31 +38,3 @@ calculate_useable_cores() {
|
|||||||
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
|
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
|
||||||
export lb_procs
|
export lb_procs
|
||||||
}
|
}
|
||||||
|
|
||||||
set_defaul_log_size() {
|
|
||||||
local percentage
|
|
||||||
|
|
||||||
case $INSTALLTYPE in
|
|
||||||
EVAL | HEAVYNODE)
|
|
||||||
percentage=50
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
percentage=80
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
local disk_dir="/"
|
|
||||||
if [ -d /nsm ]; then
|
|
||||||
disk_dir="/nsm"
|
|
||||||
fi
|
|
||||||
local disk_size_1k
|
|
||||||
disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
|
|
||||||
|
|
||||||
local ratio="1048576"
|
|
||||||
|
|
||||||
local disk_size_gb
|
|
||||||
disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
|
|
||||||
|
|
||||||
log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
|
|
||||||
export log_size_limit
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -56,6 +56,19 @@ add_master_hostfile() {
|
|||||||
whiptail_check_exitstatus $exitstatus
|
whiptail_check_exitstatus $exitstatus
|
||||||
}
|
}
|
||||||
|
|
||||||
|
addtotab_generate_templates() {
|
||||||
|
|
||||||
|
local addtotab_path=$local_salt_dir/pillar/data
|
||||||
|
|
||||||
|
for i in evaltab mastersearchtab mastertab nodestab sensorstab; do
|
||||||
|
printf '%s\n'\
|
||||||
|
"$i:"\
|
||||||
|
"" > "$addtotab_path"/$i.sls
|
||||||
|
echo "Added $i Template"
|
||||||
|
done
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
# $5 => (optional) password variable
|
# $5 => (optional) password variable
|
||||||
so_add_user() {
|
so_add_user() {
|
||||||
local username=$1
|
local username=$1
|
||||||
@@ -116,16 +129,16 @@ add_web_user() {
|
|||||||
|
|
||||||
# Create an secrets pillar so that passwords survive re-install
|
# Create an secrets pillar so that passwords survive re-install
|
||||||
secrets_pillar(){
|
secrets_pillar(){
|
||||||
if [ ! -f /opt/so/saltstack/pillar/secrets.sls ]; then
|
if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then
|
||||||
echo "Creating Secrets Pillar" >> "$setup_log" 2>&1
|
echo "Creating Secrets Pillar" >> "$setup_log" 2>&1
|
||||||
mkdir -p /opt/so/saltstack/pillar
|
mkdir -p $local_salt_dir/pillar
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
"secrets:"\
|
"secrets:"\
|
||||||
" mysql: $MYSQLPASS"\
|
" mysql: $MYSQLPASS"\
|
||||||
" playbook: $PLAYBOOKPASS"\
|
" playbook: $PLAYBOOKPASS"\
|
||||||
" fleet: $FLEETPASS"\
|
" fleet: $FLEETPASS"\
|
||||||
" fleet_jwt: $FLEETJWT"\
|
" fleet_jwt: $FLEETJWT"\
|
||||||
" fleet_enroll-secret: False" > /opt/so/saltstack/pillar/secrets.sls
|
" fleet_enroll-secret: False" > $local_salt_dir/pillar/secrets.sls
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -193,7 +206,7 @@ check_admin_pass() {
|
|||||||
check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH"
|
check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH"
|
||||||
}
|
}
|
||||||
|
|
||||||
check_hive_init_then_reboot() {
|
check_hive_init() {
|
||||||
|
|
||||||
wait_for_file /opt/so/state/thehive.txt 20 5
|
wait_for_file /opt/so/state/thehive.txt 20 5
|
||||||
local return_val=$?
|
local return_val=$?
|
||||||
@@ -203,7 +216,6 @@ check_hive_init_then_reboot() {
|
|||||||
|
|
||||||
docker stop so-thehive
|
docker stop so-thehive
|
||||||
docker rm so-thehive
|
docker rm so-thehive
|
||||||
shutdown -r now
|
|
||||||
}
|
}
|
||||||
|
|
||||||
check_network_manager_conf() {
|
check_network_manager_conf() {
|
||||||
@@ -261,7 +273,7 @@ clear_master() {
|
|||||||
{
|
{
|
||||||
echo "Clearing old master key";
|
echo "Clearing old master key";
|
||||||
rm -f /etc/salt/pki/minion/minion_master.pub;
|
rm -f /etc/salt/pki/minion/minion_master.pub;
|
||||||
sytemctl -q restart salt-minion;
|
systemctl -q restart salt-minion;
|
||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -355,10 +367,10 @@ configure_minion() {
|
|||||||
"mysql.host: '$MAINIP'"\
|
"mysql.host: '$MAINIP'"\
|
||||||
"mysql.port: 3306"\
|
"mysql.port: 3306"\
|
||||||
"mysql.user: 'root'" >> "$minion_config"
|
"mysql.user: 'root'" >> "$minion_config"
|
||||||
if [ ! -f /opt/so/saltstack/pillar/secrets.sls ]; then
|
if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then
|
||||||
echo "mysql.pass: '$MYSQLPASS'" >> "$minion_config"
|
echo "mysql.pass: '$MYSQLPASS'" >> "$minion_config"
|
||||||
else
|
else
|
||||||
OLDPASS=$(grep "mysql" /opt/so/saltstack/pillar/secrets.sls | awk '{print $2}')
|
OLDPASS=$(grep "mysql" $local_salt_dir/pillar/secrets.sls | awk '{print $2}')
|
||||||
echo "mysql.pass: '$OLDPASS'" >> "$minion_config"
|
echo "mysql.pass: '$OLDPASS'" >> "$minion_config"
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
@@ -438,20 +450,20 @@ copy_master_config() {
|
|||||||
copy_minion_tmp_files() {
|
copy_minion_tmp_files() {
|
||||||
case "$install_type" in
|
case "$install_type" in
|
||||||
'MASTER' | 'EVAL' | 'HELIXSENSOR' | 'MASTERSEARCH' | 'STANDALONE')
|
'MASTER' | 'EVAL' | 'HELIXSENSOR' | 'MASTERSEARCH' | 'STANDALONE')
|
||||||
echo "Copying pillar and salt files in $temp_install_dir to /opt/so/saltstack"
|
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
|
||||||
cp -Rv "$temp_install_dir"/pillar/ /opt/so/saltstack/ >> "$setup_log" 2>&1
|
cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
|
||||||
if [ -d "$temp_install_dir"/salt ] ; then
|
if [ -d "$temp_install_dir"/salt ] ; then
|
||||||
cp -Rv "$temp_install_dir"/salt/ /opt/so/saltstack/ >> "$setup_log" 2>&1
|
cp -Rv "$temp_install_dir"/salt/ $local_salt_dir/ >> "$setup_log" 2>&1
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
{
|
{
|
||||||
echo "scp pillar and salt files in $temp_install_dir to master /opt/so/saltstack";
|
echo "scp pillar and salt files in $temp_install_dir to master $local_salt_dir";
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
|
||||||
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
|
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
|
||||||
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/salt/patch/os/schedules/* soremote@"$MSRV":/tmp/"$MINION_ID"/schedules;
|
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/salt/patch/os/schedules/* soremote@"$MSRV":/tmp/"$MINION_ID"/schedules;
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/salt/master/files/add_minion.sh "$MINION_ID";
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/master/files/add_minion.sh "$MINION_ID";
|
||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -469,6 +481,20 @@ copy_ssh_key() {
|
|||||||
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
|
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
create_local_directories() {
|
||||||
|
echo "Creating local pillar and salt directories"
|
||||||
|
PILLARSALTDIR=${SCRIPTDIR::-5}
|
||||||
|
for i in "pillar" "salt"; do
|
||||||
|
for d in `find $PILLARSALTDIR/$i -type d`; do
|
||||||
|
suffixdir=${d//$PILLARSALTDIR/}
|
||||||
|
if [ ! -d "$local_salt_dir/$suffixdir" ]; then
|
||||||
|
mkdir -v "$local_salt_dir$suffixdir" >> "$setup_log" 2>&1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
chown -R socore:socore "$local_salt_dir/$i"
|
||||||
|
done
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
create_sensor_bond() {
|
create_sensor_bond() {
|
||||||
echo "Setting up sensor bond" >> "$setup_log" 2>&1
|
echo "Setting up sensor bond" >> "$setup_log" 2>&1
|
||||||
@@ -588,6 +614,9 @@ disable_misc_network_features() {
|
|||||||
filter_unused_nics
|
filter_unused_nics
|
||||||
if [ ${#filtered_nics[@]} -ne 0 ]; then
|
if [ ${#filtered_nics[@]} -ne 0 ]; then
|
||||||
for unused_nic in "${filtered_nics[@]}"; do
|
for unused_nic in "${filtered_nics[@]}"; do
|
||||||
|
if [ -n "$unused_nic" ]; then
|
||||||
|
echo "Disabling unused NIC: $unused_nic" >> "$setup_log" 2>&1
|
||||||
|
|
||||||
# Disable DHCPv4/v6 and autoconnect
|
# Disable DHCPv4/v6 and autoconnect
|
||||||
nmcli con mod "$unused_nic" \
|
nmcli con mod "$unused_nic" \
|
||||||
ipv4.method disabled \
|
ipv4.method disabled \
|
||||||
@@ -596,6 +625,7 @@ disable_misc_network_features() {
|
|||||||
|
|
||||||
# Flush any existing IPs
|
# Flush any existing IPs
|
||||||
ip addr flush "$unused_nic" >> "$setup_log" 2>&1
|
ip addr flush "$unused_nic" >> "$setup_log" 2>&1
|
||||||
|
fi
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
# Disable IPv6
|
# Disable IPv6
|
||||||
@@ -612,9 +642,9 @@ docker_install() {
|
|||||||
{
|
{
|
||||||
yum clean expire-cache;
|
yum clean expire-cache;
|
||||||
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
|
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
|
||||||
yum -y install docker-ce-19.03.9-3.el7 containerd.io-1.2.6-3.el7;
|
yum -y install docker-ce-19.03.11-3.el7 containerd.io-1.2.13-3.2.el7;
|
||||||
yum versionlock docker-ce-19.03.9-3.el7;
|
yum versionlock docker-ce-19.03.11-3.el7;
|
||||||
yum versionlock containerd.io-1.2.6-3.el7
|
yum versionlock containerd.io-1.2.13-3.2.el7
|
||||||
} >> "$setup_log" 2>&1
|
} >> "$setup_log" 2>&1
|
||||||
|
|
||||||
else
|
else
|
||||||
@@ -730,7 +760,7 @@ docker_seed_registry() {
|
|||||||
|
|
||||||
fireeye_pillar() {
|
fireeye_pillar() {
|
||||||
|
|
||||||
local fireeye_pillar_path=/opt/so/saltstack/pillar/fireeye
|
local fireeye_pillar_path=$local_salt_dir/pillar/fireeye
|
||||||
mkdir -p "$fireeye_pillar_path"
|
mkdir -p "$fireeye_pillar_path"
|
||||||
|
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
@@ -744,7 +774,7 @@ fireeye_pillar() {
|
|||||||
# Generate Firewall Templates
|
# Generate Firewall Templates
|
||||||
firewall_generate_templates() {
|
firewall_generate_templates() {
|
||||||
|
|
||||||
local firewall_pillar_path=/opt/so/saltstack/pillar/firewall
|
local firewall_pillar_path=$local_salt_dir/pillar/firewall
|
||||||
mkdir -p "$firewall_pillar_path"
|
mkdir -p "$firewall_pillar_path"
|
||||||
|
|
||||||
for i in analyst beats_endpoint forward_nodes masterfw minions osquery_endpoint search_nodes wazuh_endpoint
|
for i in analyst beats_endpoint forward_nodes masterfw minions osquery_endpoint search_nodes wazuh_endpoint
|
||||||
@@ -808,7 +838,7 @@ get_minion_type() {
|
|||||||
'HELIXSENSOR')
|
'HELIXSENSOR')
|
||||||
minion_type='helix'
|
minion_type='helix'
|
||||||
;;
|
;;
|
||||||
'*NODE')
|
*'NODE')
|
||||||
minion_type='node'
|
minion_type='node'
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -897,7 +927,7 @@ master_pillar() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
master_static() {
|
master_static() {
|
||||||
local static_pillar="/opt/so/saltstack/pillar/static.sls"
|
local static_pillar="$local_salt_dir/pillar/static.sls"
|
||||||
|
|
||||||
# Create a static file for global values
|
# Create a static file for global values
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
@@ -995,54 +1025,6 @@ node_pillar() {
|
|||||||
cat "$pillar_file" >> "$setup_log" 2>&1
|
cat "$pillar_file" >> "$setup_log" 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
parse_options() {
|
|
||||||
case "$1" in
|
|
||||||
--turbo=*)
|
|
||||||
local proxy
|
|
||||||
proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}')
|
|
||||||
proxy_url="http://$proxy"
|
|
||||||
TURBO="$proxy_url"
|
|
||||||
;;
|
|
||||||
--proxy=*)
|
|
||||||
local proxy
|
|
||||||
proxy=$(echo "$1" | tr -d '"' | awk -F'--proxy=' '{print $2}')
|
|
||||||
|
|
||||||
local proxy_protocol
|
|
||||||
proxy_protocol=$(echo "$proxy" | awk 'match($0, /http|https/) { print substr($0, RSTART, RLENGTH) }')
|
|
||||||
|
|
||||||
if [[ ! $proxy_protocol =~ ^(http|https)$ ]]; then
|
|
||||||
echo "Invalid proxy protocol"
|
|
||||||
echo "Ignoring proxy"
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $2 == --proxy-user=* && $3 == --proxy-pass=* ]]; then
|
|
||||||
local proxy_user
|
|
||||||
local proxy_password
|
|
||||||
proxy_user=$(echo "$2" | tr -d '"' | awk -F'--proxy-user=' '{print $2}')
|
|
||||||
proxy_password=$(echo "$3" | tr -d '"' | awk -F'--proxy-pass=' '{print $2}')
|
|
||||||
|
|
||||||
local proxy_addr
|
|
||||||
proxy_addr=$(echo "$proxy" | awk -F'http\:\/\/|https\:\/\/' '{print $2}')
|
|
||||||
|
|
||||||
export http_proxy="${proxy_protocol}://${proxy_user}:${proxy_password}@${proxy_addr}"
|
|
||||||
|
|
||||||
elif [[ (-z $2 || -z $3) && (-n $2 || -n $3) || ( -n $2 && -n $3 && ($2 != --proxy-user=* || $3 != --proxy-pass=*) ) ]]; then
|
|
||||||
echo "Invalid options passed for proxy. Order is --proxy-user=<user> --proxy-pass=<password>"
|
|
||||||
echo "Ignoring proxy"
|
|
||||||
return
|
|
||||||
|
|
||||||
else
|
|
||||||
export http_proxy="$proxy"
|
|
||||||
fi
|
|
||||||
|
|
||||||
export {https,ftp,rsync,all}_proxy="$http_proxy"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Invalid option"
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
patch_pillar() {
|
patch_pillar() {
|
||||||
|
|
||||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||||
@@ -1276,7 +1258,7 @@ salt_checkin() {
|
|||||||
|
|
||||||
# Run a salt command to generate the minion key
|
# Run a salt command to generate the minion key
|
||||||
salt_firstcheckin() {
|
salt_firstcheckin() {
|
||||||
salt-call state.show_top >> /dev/null # send output to /dev/null because we don't actually care about the ouput
|
salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
|
||||||
}
|
}
|
||||||
|
|
||||||
set_base_heapsizes() {
|
set_base_heapsizes() {
|
||||||
@@ -1290,16 +1272,18 @@ set_main_ip() {
|
|||||||
|
|
||||||
setup_salt_master_dirs() {
|
setup_salt_master_dirs() {
|
||||||
# Create salt paster directories
|
# Create salt paster directories
|
||||||
mkdir -p /opt/so/saltstack/salt
|
mkdir -p $default_salt_dir/pillar
|
||||||
mkdir -p /opt/so/saltstack/pillar
|
mkdir -p $default_salt_dir/salt
|
||||||
|
mkdir -p $local_salt_dir/pillar
|
||||||
|
mkdir -p $local_salt_dir/salt
|
||||||
|
|
||||||
# Copy over the salt code and templates
|
# Copy over the salt code and templates
|
||||||
if [ "$setup_type" = 'iso' ]; then
|
if [ "$setup_type" = 'iso' ]; then
|
||||||
rsync -avh --exclude 'TRANS.TBL' /home/onion/SecurityOnion/pillar/* /opt/so/saltstack/pillar/ >> "$setup_log" 2>&1
|
rsync -avh --exclude 'TRANS.TBL' /home/onion/SecurityOnion/pillar/* $default_salt_dir/pillar/ >> "$setup_log" 2>&1
|
||||||
rsync -avh --exclude 'TRANS.TBL' /home/onion/SecurityOnion/salt/* /opt/so/saltstack/salt/ >> "$setup_log" 2>&1
|
rsync -avh --exclude 'TRANS.TBL' /home/onion/SecurityOnion/salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1
|
||||||
else
|
else
|
||||||
cp -R ../pillar/* /opt/so/saltstack/pillar/ >> "$setup_log" 2>&1
|
cp -R ../pillar/* $default_salt_dir/pillar/ >> "$setup_log" 2>&1
|
||||||
cp -R ../salt/* /opt/so/saltstack/salt/ >> "$setup_log" 2>&1
|
cp -R ../salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Chown the salt dirs on the master for socore" >> "$setup_log" 2>&1
|
echo "Chown the salt dirs on the master for socore" >> "$setup_log" 2>&1
|
||||||
@@ -1372,6 +1356,33 @@ sensor_pillar() {
|
|||||||
cat "$pillar_file" >> "$setup_log" 2>&1
|
cat "$pillar_file" >> "$setup_log" 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
set_default_log_size() {
|
||||||
|
local percentage
|
||||||
|
|
||||||
|
case $INSTALLTYPE in
|
||||||
|
EVAL | HEAVYNODE)
|
||||||
|
percentage=50
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
percentage=80
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
local disk_dir="/"
|
||||||
|
if [ -d /nsm ]; then
|
||||||
|
disk_dir="/nsm"
|
||||||
|
fi
|
||||||
|
local disk_size_1k
|
||||||
|
disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
|
||||||
|
|
||||||
|
local ratio="1048576"
|
||||||
|
|
||||||
|
local disk_size_gb
|
||||||
|
disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
|
||||||
|
|
||||||
|
log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
|
||||||
|
}
|
||||||
|
|
||||||
set_hostname() {
|
set_hostname() {
|
||||||
|
|
||||||
set_hostname_iso
|
set_hostname_iso
|
||||||
@@ -1399,49 +1410,49 @@ set_initial_firewall_policy() {
|
|||||||
|
|
||||||
set_main_ip
|
set_main_ip
|
||||||
|
|
||||||
if [ -f /opt/so/saltstack/pillar/data/addtotab.sh ]; then chmod +x /opt/so/saltstack/pillar/data/addtotab.sh; fi
|
if [ -f $default_salt_dir/pillar/data/addtotab.sh ]; then chmod +x $default_salt_dir/pillar/data/addtotab.sh; fi
|
||||||
if [ -f /opt/so/saltstack/pillar/firewall/addfirewall.sh ]; then chmod +x /opt/so/saltstack/pillar/firewall/addfirewall.sh; fi
|
if [ -f $default_salt_dir/pillar/firewall/addfirewall.sh ]; then chmod +x $default_salt_dir/pillar/firewall/addfirewall.sh; fi
|
||||||
|
|
||||||
case "$install_type" in
|
case "$install_type" in
|
||||||
'MASTER')
|
'MASTER')
|
||||||
printf " - %s\n" "$MAINIP" | tee -a /opt/so/saltstack/pillar/firewall/minions.sls /opt/so/saltstack/pillar/firewall/masterfw.sls
|
printf " - %s\n" "$MAINIP" | tee -a $local_salt_dir/pillar/firewall/minions.sls $local_salt_dir/pillar/firewall/masterfw.sls
|
||||||
/opt/so/saltstack/pillar/data/addtotab.sh mastertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
$default_salt_dir/pillar/data/addtotab.sh mastertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
||||||
;;
|
;;
|
||||||
'EVAL' | 'MASTERSEARCH')
|
'EVAL' | 'MASTERSEARCH')
|
||||||
printf " - %s\n" "$MAINIP" | tee -a /opt/so/saltstack/pillar/firewall/minions.sls\
|
printf " - %s\n" "$MAINIP" | tee -a $local_salt_dir/pillar/firewall/minions.sls\
|
||||||
/opt/so/saltstack/pillar/firewall/masterfw.sls\
|
$local_salt_dir/pillar/firewall/masterfw.sls\
|
||||||
/opt/so/saltstack/pillar/firewall/forward_nodes.sls\
|
$local_salt_dir/pillar/firewall/forward_nodes.sls\
|
||||||
/opt/so/saltstack/pillar/firewall/search_nodes.sls
|
$local_salt_dir/pillar/firewall/search_nodes.sls
|
||||||
case "$install_type" in
|
case "$install_type" in
|
||||||
'EVAL')
|
'EVAL')
|
||||||
/opt/so/saltstack/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
|
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0 True
|
||||||
;;
|
;;
|
||||||
'MASTERSEARCH')
|
'MASTERSEARCH')
|
||||||
/opt/so/saltstack/pillar/data/addtotab.sh mastersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
$default_salt_dir/pillar/data/addtotab.sh mastersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
'HELIXSENSOR')
|
'HELIXSENSOR')
|
||||||
printf " - %s\n" "$MAINIP" | tee -a /opt/so/saltstack/pillar/firewall/minions.sls\
|
printf " - %s\n" "$MAINIP" | tee -a $local_salt_dir/pillar/firewall/minions.sls\
|
||||||
/opt/so/saltstack/pillar/firewall/masterfw.sls\
|
$local_salt_dir/pillar/firewall/masterfw.sls\
|
||||||
/opt/so/saltstack/pillar/firewall/forward_nodes.sls
|
$local_salt_dir/pillar/firewall/forward_nodes.sls
|
||||||
;;
|
;;
|
||||||
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET')
|
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET')
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions "$MAINIP"
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh minions "$MAINIP"
|
||||||
case "$install_type" in
|
case "$install_type" in
|
||||||
'SENSOR')
|
'SENSOR')
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
|
||||||
;;
|
;;
|
||||||
'SEARCHNODE')
|
'SEARCHNODE')
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
||||||
;;
|
;;
|
||||||
'HEAVYNODE')
|
'HEAVYNODE')
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh forward_nodes "$MAINIP"
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/firewall/addfirewall.sh search_nodes "$MAINIP"
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" bond0
|
||||||
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
@@ -1519,9 +1530,9 @@ update_sudoers() {
|
|||||||
if ! grep -qE '^soremote\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then
|
if ! grep -qE '^soremote\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then
|
||||||
# Update Sudoers so that soremote can accept keys without a password
|
# Update Sudoers so that soremote can accept keys without a password
|
||||||
echo "soremote ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers
|
echo "soremote ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers
|
||||||
echo "soremote ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | tee -a /etc/sudoers
|
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/firewall/addfirewall.sh" | tee -a /etc/sudoers
|
||||||
echo "soremote ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/data/addtotab.sh" | tee -a /etc/sudoers
|
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/data/addtotab.sh" | tee -a /etc/sudoers
|
||||||
echo "soremote ALL=(ALL) NOPASSWD:/opt/so/saltstack/salt/master/files/add_minion.sh" | tee -a /etc/sudoers
|
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/master/files/add_minion.sh" | tee -a /etc/sudoers
|
||||||
else
|
else
|
||||||
echo "User soremote already granted sudo privileges" >> "$setup_log" 2>&1
|
echo "User soremote already granted sudo privileges" >> "$setup_log" 2>&1
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -21,18 +21,40 @@ source ./so-common-functions
|
|||||||
source ./so-whiptail
|
source ./so-whiptail
|
||||||
source ./so-variables
|
source ./so-variables
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
setup_type=$1
|
setup_type=$1
|
||||||
export setup_type
|
|
||||||
|
|
||||||
automation=$2
|
automation=$2
|
||||||
|
|
||||||
automated=no
|
while [[ $# -gt 0 ]]; do
|
||||||
|
arg="$1"
|
||||||
|
shift
|
||||||
|
case "$arg" in
|
||||||
|
"--turbo="* )
|
||||||
|
export TURBO="http://${arg#*=}";;
|
||||||
|
"--proxy="* )
|
||||||
|
export {http,https,ftp,rsync,all}_proxy="${arg#*=}";;
|
||||||
|
"--allow-role="* )
|
||||||
|
export ALLOW_ROLE="${arg#*=}";;
|
||||||
|
"--allow-cidr="* )
|
||||||
|
export ALLOW_CIDR="${arg#*=}";;
|
||||||
|
"--skip-reboot" )
|
||||||
|
export SKIP_REBOOT=1;;
|
||||||
|
* )
|
||||||
|
if [[ "$arg" == "--"* ]]; then
|
||||||
|
echo "Invalid option"
|
||||||
|
fi
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Begin Installation pre-processing
|
||||||
echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1
|
echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1
|
||||||
|
|
||||||
|
automated=no
|
||||||
function progress() {
|
function progress() {
|
||||||
if [ $automated == no ]; then
|
if [ $automated == no ]; then
|
||||||
whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0
|
whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0
|
||||||
|
else
|
||||||
|
cat >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -41,7 +63,7 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th
|
|||||||
source automation/$automation
|
source automation/$automation
|
||||||
automated=yes
|
automated=yes
|
||||||
|
|
||||||
echo "Checking network configuration" >> $setup_log 2>&1g
|
echo "Checking network configuration" >> $setup_log 2>&1
|
||||||
ip a >> $setup_log 2>&1
|
ip a >> $setup_log 2>&1
|
||||||
|
|
||||||
attempt=1
|
attempt=1
|
||||||
@@ -76,11 +98,6 @@ export PATH=$PATH:../salt/common/tools/sbin
|
|||||||
|
|
||||||
got_root
|
got_root
|
||||||
|
|
||||||
if [[ $# -gt 1 ]]; then
|
|
||||||
set -- "${@:2}"
|
|
||||||
parse_options "$@" >> $setup_log 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
detect_os
|
detect_os
|
||||||
|
|
||||||
if [ "$OS" == ubuntu ]; then
|
if [ "$OS" == ubuntu ]; then
|
||||||
@@ -178,17 +195,21 @@ echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1
|
|||||||
|
|
||||||
minion_type=$(get_minion_type)
|
minion_type=$(get_minion_type)
|
||||||
|
|
||||||
# Set any constants needed
|
# Set any variables needed
|
||||||
|
set_default_log_size >> $setup_log 2>&1
|
||||||
|
|
||||||
if [[ $is_helix ]]; then
|
if [[ $is_helix ]]; then
|
||||||
RULESETUP=ETOPEN
|
RULESETUP=ETOPEN
|
||||||
NSMSETUP=BASIC
|
NSMSETUP=BASIC
|
||||||
HNSENSOR=inherit
|
HNSENSOR=inherit
|
||||||
MASTERUPDATES=0
|
MASTERUPDATES=0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_helix || ( $is_master && $is_node ) ]]; then
|
if [[ $is_helix || ( $is_master && $is_node ) ]]; then
|
||||||
RULESETUP=ETOPEN
|
RULESETUP=ETOPEN
|
||||||
NSMSETUP=BASIC
|
NSMSETUP=BASIC
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_master && $is_node ]]; then
|
if [[ $is_master && $is_node ]]; then
|
||||||
LSPIPELINEWORKERS=1
|
LSPIPELINEWORKERS=1
|
||||||
LSPIPELINEBATCH=125
|
LSPIPELINEBATCH=125
|
||||||
@@ -197,6 +218,7 @@ if [[ $is_master && $is_node ]]; then
|
|||||||
NIDS=Suricata
|
NIDS=Suricata
|
||||||
BROVERSION=ZEEK
|
BROVERSION=ZEEK
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_node ]]; then
|
if [[ $is_node ]]; then
|
||||||
CURCLOSEDAYS=30
|
CURCLOSEDAYS=30
|
||||||
fi
|
fi
|
||||||
@@ -339,22 +361,22 @@ fi
|
|||||||
# Set initial percentage to 0
|
# Set initial percentage to 0
|
||||||
export percentage=0
|
export percentage=0
|
||||||
|
|
||||||
set_progress_str 1 'Updating packages'
|
if [[ $is_minion ]]; then
|
||||||
|
set_progress_str 1 'Configuring firewall'
|
||||||
|
set_initial_firewall_policy >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
|
set_progress_str 2 'Updating packages'
|
||||||
update_packages >> $setup_log 2>&1
|
update_packages >> $setup_log 2>&1
|
||||||
|
|
||||||
if [[ $is_sensor || $is_helix ]]; then
|
if [[ $is_sensor || $is_helix ]]; then
|
||||||
set_progress_str 2 'Creating bond interface'
|
set_progress_str 3 'Creating bond interface'
|
||||||
create_sensor_bond >> $setup_log 2>&1
|
create_sensor_bond >> $setup_log 2>&1
|
||||||
|
|
||||||
set_progress_str 3 'Generating sensor pillar'
|
set_progress_str 4 'Generating sensor pillar'
|
||||||
sensor_pillar >> $setup_log 2>&1
|
sensor_pillar >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_minion ]]; then
|
|
||||||
set_progress_str 4 'Configuring firewall'
|
|
||||||
set_initial_firewall_policy >> $setup_log 2>&1
|
|
||||||
fi
|
|
||||||
|
|
||||||
set_progress_str 5 'Installing Salt and dependencies'
|
set_progress_str 5 'Installing Salt and dependencies'
|
||||||
saltify 2>> $setup_log
|
saltify 2>> $setup_log
|
||||||
|
|
||||||
@@ -370,6 +392,8 @@ fi
|
|||||||
|
|
||||||
if [[ $is_master || $is_helix ]]; then
|
if [[ $is_master || $is_helix ]]; then
|
||||||
set_progress_str 10 'Configuring Salt master'
|
set_progress_str 10 'Configuring Salt master'
|
||||||
|
create_local_directories >> $setup_log 2>&1
|
||||||
|
addtotab_generate_templates >> $setup_log 2>&1
|
||||||
copy_master_config >> $setup_log 2>&1
|
copy_master_config >> $setup_log 2>&1
|
||||||
setup_salt_master_dirs >> $setup_log 2>&1
|
setup_salt_master_dirs >> $setup_log 2>&1
|
||||||
firewall_generate_templates >> $setup_log 2>&1
|
firewall_generate_templates >> $setup_log 2>&1
|
||||||
@@ -430,12 +454,15 @@ fi
|
|||||||
salt-call state.apply -l info registry >> $setup_log 2>&1
|
salt-call state.apply -l info registry >> $setup_log 2>&1
|
||||||
docker_seed_registry 2>> "$setup_log" # ~ 60% when finished
|
docker_seed_registry 2>> "$setup_log" # ~ 60% when finished
|
||||||
|
|
||||||
set_progress_str 61 "$(print_salt_state_apply 'master')"
|
set_progress_str 60 "$(print_salt_state_apply 'master')"
|
||||||
salt-call state.apply -l info master >> $setup_log 2>&1
|
salt-call state.apply -l info master >> $setup_log 2>&1
|
||||||
|
|
||||||
set_progress_str 62 "$(print_salt_state_apply 'idstools')"
|
set_progress_str 61 "$(print_salt_state_apply 'idstools')"
|
||||||
salt-call state.apply -l info idstools >> $setup_log 2>&1
|
salt-call state.apply -l info idstools >> $setup_log 2>&1
|
||||||
|
|
||||||
|
set_progress_str 61 "$(print_salt_state_apply 'suricata.master')"
|
||||||
|
salt-call state.apply -l info suricata.master >> $setup_log 2>&1
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set_progress_str 62 "$(print_salt_state_apply 'firewall')"
|
set_progress_str 62 "$(print_salt_state_apply 'firewall')"
|
||||||
@@ -566,12 +593,17 @@ fi
|
|||||||
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
|
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
|
||||||
if [[ "$success" = 0 ]]; then
|
if [[ "$success" = 0 ]]; then
|
||||||
whiptail_setup_complete
|
whiptail_setup_complete
|
||||||
|
if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then
|
||||||
|
export IP=$ALLOW_CIDR
|
||||||
|
so-allow -$ALLOW_ROLE >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
if [[ $THEHIVE == 1 ]]; then
|
if [[ $THEHIVE == 1 ]]; then
|
||||||
check_hive_init_then_reboot
|
check_hive_init
|
||||||
else
|
|
||||||
shutdown -r now
|
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
whiptail_setup_failed
|
whiptail_setup_failed
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $SKIP_REBOOT ]]; then
|
||||||
shutdown -r now
|
shutdown -r now
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -34,3 +34,8 @@ export temp_install_dir=/root/installtmp
|
|||||||
export percentage_str='Getting started'
|
export percentage_str='Getting started'
|
||||||
|
|
||||||
export DEBIAN_FRONTEND=noninteractive
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
export default_salt_dir=/opt/so/saltstack/default
|
||||||
|
export local_salt_dir=/opt/so/saltstack/local
|
||||||
|
|
||||||
|
export SCRIPTDIR=$(cd `dirname $0` && pwd)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ whiptail_basic_bro() {
|
|||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
BASICBRO=$(whiptail --title "Security Onion Setup" --inputbox \
|
BASICBRO=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||||
"Enter the number of bro processes:" 10 75 "$lb_procs" 3>&1 1>&2 2>&3)
|
"Enter the number of zeek processes:" 10 75 "$lb_procs" 3>&1 1>&2 2>&3)
|
||||||
|
|
||||||
local exitstatus=$?
|
local exitstatus=$?
|
||||||
whiptail_check_exitstatus $exitstatus
|
whiptail_check_exitstatus $exitstatus
|
||||||
@@ -51,7 +51,7 @@ whiptail_bro_pins() {
|
|||||||
cpu_core_list_whiptail+=("$item" "OFF")
|
cpu_core_list_whiptail+=("$item" "OFF")
|
||||||
done
|
done
|
||||||
|
|
||||||
BROPINS=$(whiptail --noitem --title "Pin Bro CPUS" --checklist "Please select $lb_procs cores to pin Bro to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 )
|
BROPINS=$(whiptail --noitem --title "Pin Zeek CPUS" --checklist "Please select $lb_procs cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 )
|
||||||
local exitstatus=$?
|
local exitstatus=$?
|
||||||
whiptail_check_exitstatus $exitstatus
|
whiptail_check_exitstatus $exitstatus
|
||||||
|
|
||||||
@@ -458,7 +458,6 @@ whiptail_log_size_limit() {
|
|||||||
|
|
||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
set_defaul_log_size
|
|
||||||
|
|
||||||
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
|
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||||
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
|
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
|
||||||
|
|||||||
@@ -95,9 +95,9 @@ copy_new_files() {
|
|||||||
|
|
||||||
# Copy new files over to the salt dir
|
# Copy new files over to the salt dir
|
||||||
cd /tmp/sogh/securityonion-saltstack
|
cd /tmp/sogh/securityonion-saltstack
|
||||||
rsync -a --exclude-from 'exclude-list.txt' salt /opt/so/saltstack/
|
rsync -a --exclude-from 'exclude-list.txt' salt $default_salt_dir/
|
||||||
chown -R socore:socore /opt/so/saltstack/salt
|
chown -R socore:socore $default_salt_dir/salt
|
||||||
chmod 755 /opt/so/saltstack/pillar/firewall/addfirewall.sh
|
chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh
|
||||||
cd /tmp
|
cd /tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user