Merge branch 'dev' into feature/issue124

This commit is contained in:
Mike Reeves
2020-01-21 16:48:26 -05:00
committed by GitHub
58 changed files with 5157 additions and 2937 deletions

38
salt/auth/init.sls Normal file
View File

@@ -0,0 +1,38 @@
so-auth-api-dir:
file.directory:
- name: /opt/so/conf/auth/api
- user: 939
- group: 939
- makedirs: True
so-auth-api-image:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-auth-api:HH1.1.4
so-auth-ui-image:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-auth-ui:HH1.1.4
so-auth-api:
docker_container.running:
- require:
- so-auth-api-image
- image: docker.io/soshybridhunter/so-auth-api:HH1.1.4
- hostname: so-auth-api
- name: so-auth-api
- environment:
- BASE_PATH: "/so-auth/api"
- binds:
- /opt/so/conf/auth/api:/data
- port_bindings:
- 0.0.0.0:5656:5656
so-auth-ui:
docker_container.running:
- require:
- so-auth-ui-image
- image: docker.io/soshybridhunter/so-auth-ui:HH1.1.4
- hostname: so-auth-ui
- name: so-auth-ui
- port_bindings:
- 0.0.0.0:4242:80

View File

@@ -1,3 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
{%- set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
# Add socore Group
socoregroup:
@@ -114,16 +116,9 @@ nginxtmp:
- group: 939
- makedirs: True
# Start the core docker
so-coreimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-core:HH1.1.3
so-core:
docker_container.running:
- require:
- so-coreimage
- image: docker.io/soshybridhunter/so-core:HH1.1.3
- image: {{ MASTER }}:5000/soshybridhunter/so-core:HH{{ VERSION }}
- hostname: so-core
- user: socore
- binds:
@@ -175,15 +170,9 @@ tgrafconf:
- template: jinja
- source: salt://common/telegraf/etc/telegraf.conf
so-telegrafimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-telegraf:HH1.1.0
so-telegraf:
docker_container.running:
- require:
- so-telegrafimage
- image: docker.io/soshybridhunter/so-telegraf:HH1.1.0
- image: {{ MASTER }}:5000/soshybridhunter/so-telegraf:HH{{ VERSION }}
- environment:
- HOST_PROC=/host/proc
- HOST_ETC=/host/etc
@@ -214,7 +203,7 @@ so-telegraf:
- /opt/so/conf/telegraf/etc/telegraf.conf
- /opt/so/conf/telegraf/scripts
# If its a master or eval lets install the back end for now
# If its a master or eval lets install the back end for now
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' and GRAFANA == 1 %}
# Influx DB
@@ -236,15 +225,9 @@ influxdbconf:
- template: jinja
- source: salt://common/influxdb/etc/influxdb.conf
so-influximage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-influxdb:HH1.1.0
so-influxdb:
docker_container.running:
- require:
- so-influximage
- image: docker.io/soshybridhunter/so-influxdb:HH1.1.0
- image: {{ MASTER }}:5000/soshybridhunter/so-influxdb:HH{{ VERSION }}
- hostname: influxdb
- environment:
- INFLUXDB_HTTP_LOG_ENABLED=false
@@ -303,7 +286,7 @@ grafanadashfndir:
grafanadashsndir:
file.directory:
- name: /opt/so/conf/grafana/grafana_dashboards/storage_nodes
- name: /opt/so/conf/grafana/grafana_dashboards/search_nodes
- user: 939
- group: 939
- makedirs: True
@@ -362,11 +345,11 @@ dashboard-{{ SN }}:
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/storage_nodes/{{ SN }}-Node.json
- name: /opt/so/conf/grafana/grafana_dashboards/search_nodes/{{ SN }}-Node.json
- user: 939
- group: 939
- template: jinja
- source: salt://common/grafana/grafana_dashboards/storage_nodes/storage.json
- source: salt://common/grafana/grafana_dashboards/search_nodes/searchnode.json
- defaults:
SERVERNAME: {{ SN }}
MANINT: {{ SNDATA.manint }}
@@ -400,14 +383,9 @@ dashboard-{{ SN }}:
{% endfor %}
{% endif %}
# Install the docker. This needs to be behind nginx at some point
so-grafanaimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-grafana:HH1.1.0
so-grafana:
docker_container.running:
- image: docker.io/soshybridhunter/so-grafana:HH1.1.0
- image: {{ MASTER }}:5000/soshybridhunter/so-grafana:HH{{ VERSION }}
- hostname: grafana
- user: socore
- binds:

View File

@@ -58,9 +58,9 @@ http {
# }
#}
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
@@ -88,8 +88,8 @@ http {
# }
location /grafana/ {
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:3000/;
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -100,10 +100,9 @@ http {
}
location /kibana/ {
auth_basic "Security Onion";
auth_basic_user_file /opt/so/conf/nginx/.htpasswd;
rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:5601/;
auth_request /so-auth/api/auth/;
rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -114,7 +113,7 @@ http {
}
location /playbook/ {
proxy_pass http://{{ masterip }}:3200/playbook/;
proxy_pass http://{{ masterip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -126,9 +125,8 @@ http {
location /navigator/ {
auth_basic "Security Onion";
auth_basic_user_file /opt/so/conf/nginx/.htpasswd;
proxy_pass http://{{ masterip }}:4200/navigator/;
auth_request /so-auth/api/auth/;
proxy_pass http://{{ masterip }}:4200/navigator/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -139,7 +137,7 @@ http {
}
location /api/ {
proxy_pass https://{{ masterip }}:8080/api/;
proxy_pass https://{{ masterip }}:8080/api/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Upgrade $http_upgrade;
@@ -152,7 +150,7 @@ http {
}
location /fleet/ {
proxy_pass https://{{ masterip }}:8080/fleet/;
proxy_pass https://{{ masterip }}:8080/fleet/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -163,10 +161,10 @@ http {
}
location /thehive/ {
proxy_pass http://{{ masterip }}:9000/thehive/;
proxy_pass http://{{ masterip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -175,10 +173,10 @@ http {
}
location /cortex/ {
proxy_pass http://{{ masterip }}:9001/cortex/;
proxy_pass http://{{ masterip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -186,20 +184,8 @@ http {
}
location /cyberchef/ {
proxy_pass http://{{ masterip }}:9080/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
}
location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/;
proxy_pass http://{{ masterip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -210,17 +196,16 @@ http {
}
location /sensoroni/ {
auth_basic "Security Onion";
auth_basic_user_file /opt/so/conf/nginx/.htpasswd;
proxy_pass http://{{ masterip }}:9822/;
auth_request /so-auth/api/auth/;
proxy_pass http://{{ masterip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
}
@@ -237,15 +222,34 @@ http {
}
location /sensoroniagents/ {
proxy_pass http://{{ masterip }}:9822/;
proxy_pass http://{{ masterip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
}
location /so-auth/loginpage/ {
proxy_pass http://{{ masterip }}:4242/;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
location /so-auth/api/ {
proxy_pass http://{{ masterip }}:5656/;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $host;
}
error_page 401 = @error401;
location @error401 {
add_header Set-Cookie "NSREDIRECT=http://{{ masterip }}$request_uri;Domain={{ masterip }};Path=/";
return 302 http://{{ masterip }}/so-auth/loginpage/;
}
error_page 404 /404.html;
location = /40x.html {
}

View File

@@ -188,18 +188,6 @@ http {
}
location /cyberchef/ {
proxy_pass http://{{ masterip }}:9080/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
}
location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/;
proxy_read_timeout 90;

View File

@@ -1,41 +1,82 @@
#!/bin/bash
got_root() {
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Make sure you are root
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
. /usr/sbin/so-common
}
SKIP=0
got_root
while getopts "abowi:" OPTION
do
case $OPTION in
echo "This program allows you to add a firewall rule to allow connections from a new IP address."
echo ""
echo "Choose the role for the IP or Range you would like to add"
echo ""
echo "[a] - Analyst - ports 80/tcp and 443/tcp"
echo "[b] - Logstash Beat - port 5044/tcp"
echo "[o] - Osquery endpoint - port 8080/tcp"
echo "[w] - Wazuh endpoint - port 1514"
echo ""
echo "Please enter your selection (a - analyst, b - beats, o - osquery, w - wazuh):"
read ROLE
echo "Enter a single ip address or range to allow (example: 10.10.10.10 or 10.10.0.0/16):"
read IP
h)
usage
exit 0
;;
a)
FULLROLE="analyst"
SKIP=1
;;
b)
FULLROLE="beats_endpoint"
SKIP=1
;;
i) IP=$OPTARG
;;
o)
FULLROLE="osquery_endpoint"
SKIP=1
;;
w)
FULLROLE="wazuh_endpoint"
SKIP=1
;;
esac
done
if [ "$SKIP" -eq 0 ]; then
echo "This program allows you to add a firewall rule to allow connections from a new IP address."
echo ""
echo "Choose the role for the IP or Range you would like to add"
echo ""
echo "[a] - Analyst - ports 80/tcp and 443/tcp"
echo "[b] - Logstash Beat - port 5044/tcp"
echo "[o] - Osquery endpoint - port 8080/tcp"
echo "[w] - Wazuh endpoint - port 1514"
echo ""
echo "Please enter your selection (a - analyst, b - beats, o - osquery, w - wazuh):"
read ROLE
echo "Enter a single ip address or range to allow (example: 10.10.10.10 or 10.10.0.0/16):"
read IP
if [ "$ROLE" == "a" ]; then
FULLROLE=analyst
elif [ "$ROLE" == "b" ]; then
FULLROLE=beats_endpoint
elif [ "$ROLE" == "o" ]; then
FULLROLE=osquery_endpoint
elif [ "$ROLE" == "w" ]; then
FULLROLE=wazuh_endpoint
else
echo "I don't recognize that role"
exit 1
fi
if [ "$ROLE" == "a" ]; then
FULLROLE=analyst
elif [ "$ROLE" == "b" ]; then
FULLROLE=beats_endpoint
elif [ "$ROLE" == "o" ]; then
FULLROLE=osquery_endpoint
elif [ "$ROLE" == "w" ]; then
FULLROLE=wazuh_endpoint
else
echo "I don't recognize that role"
exit 1
fi
echo "Adding $IP to the $FULLROLE role. This can take a few seconds"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,142 @@
#!/bin/bash
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Originally written by Bryant Treacle
# https://raw.githubusercontent.com/bryant-treacle/so-elastalert-test-rule/master/so-elastalert-test
# Modified by Doug Burks and Wes Lambert
#
# Purpose: This script will allow you to test your elastalert rule without entering the Docker container.
. /usr/sbin/so-elastic-common
OPTIONS=""
SKIP=0
RESULTS_TO_LOG="n"
RULE_NAME=""
FILE_SAVE_LOCATION=""
usage()
{
cat <<EOF
Test Elastalert Rule
Options:
-h This message
-a Trigger real alerts instead of the debug alert
-l <path_to_file> Write results to specified log file
-o '<options>' Specify Elastalert options ( Ex. --schema-only , --count-only, --days N )
-r <rule_name> Specify path/name of rule to test
EOF
}
while getopts "hal:o:r:" OPTION
do
case $OPTION in
h)
usage
exit 0
;;
a)
OPTIONS="--alert"
;;
l)
RESULTS_TO_LOG="y"
FILE_SAVE_LOCATION=$OPTARG
;;
o)
OPTIONS=$OPTARG
;;
r)
RULE_NAME=$OPTARG
SKIP=1
;;
*)
usage
exit 0
;;
esac
done
docker_exec(){
if [ ${RESULTS_TO_LOG,,} = "y" ] ; then
docker exec -it so-elastalert bash -c "elastalert-test-rule $RULE_NAME $OPTIONS" > $FILE_SAVE_LOCATION
else
docker exec -it so-elastalert bash -c "elastalert-test-rule $RULE_NAME $OPTIONS"
fi
}
rule_prompt(){
CURRENT_RULES=$(find /opt/so/rules/elastalert -name "*.yaml")
echo
echo "This script will allow you to test an Elastalert rule."
echo
echo "Below is a list of active Elastalert rules:"
echo
echo "-----------------------------------"
echo
echo "$CURRENT_RULES"
echo
echo "-----------------------------------"
echo
echo "Note: To test a rule it must be accessible by the Elastalert Docker container."
echo
echo "Make sure to swap the local path (/opt/so/rules/elastalert/) for the docker path (/etc/elastalert/rules/)"
echo "Example: /opt/so/rules/elastalert/nids2hive.yaml would be /etc/elastalert/rules/nids2hive.yaml"
echo
while [ -z $RULE_NAME ]; do
echo "Please enter the file path and rule name you want to test."
read -e RULE_NAME
done
}
log_save_prompt(){
RESULTS_TO_LOG=""
while [ -z $RESULTS_TO_LOG ]; do
echo "The results can be rather long. Would you like to write the results to a file? (Y/N)"
read RESULTS_TO_LOG
done
}
log_path_prompt(){
while [ -z $FILE_SAVE_LOCATION ]; do
echo "Please enter the file path and file name."
read -e FILE_SAVE_LOCATION
done
echo "Depending on the rule this may take a while."
}
if [ $SKIP -eq 0 ]; then
rule_prompt
log_save_prompt
if [ ${RESULTS_TO_LOG,,} = "y" ] ; then
log_path_prompt
fi
fi
docker_exec
if [ $? -eq 0 ]; then
echo "Test completed successfully!"
else
echo "Something went wrong..."
fi
echo

View File

@@ -0,0 +1,33 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Source common settings
. /usr/sbin/so-common
# Check for log files
for FILE in /opt/so/log/elasticsearch/*.log /opt/so/log/logstash/*.log /opt/so/log/kibana/*.log /opt/so/log/elastalert/*.log /opt/so/log/curator/*.log /opt/so/log/freqserver/*.log /opt/so/log/nginx/*.log; do
# If file exists, then look for errors or warnings
if [ -f $FILE ]; then
MESSAGE=`grep -i 'ERROR\|FAIL\|WARN' $FILE`
if [ ! -z "$MESSAGE" ]; then
header $FILE
echo $MESSAGE | sed 's/WARN/\nWARN/g' | sed 's/WARNING/\nWARNING/g' | sed 's/ERROR/\nERROR/g' | sort | uniq -c | sort -nr
echo
fi
fi
done

View File

@@ -0,0 +1,46 @@
#!/bin/bash
MASTER=MASTER
VERSION="HH1.1.4"
TRUSTED_CONTAINERS=( \
"so-auth-api:$VERSION" \
"so-auth-ui:$VERSION" \
"so-core:$VERSION" \
"so-thehive-cortex:$VERSION" \
"so-curator:$VERSION" \
"so-domainstats:$VERSION" \
"so-elastalert:$VERSION" \
"so-elasticsearch:$VERSION" \
"so-filebeat:$VERSION" \
"so-fleet:$VERSION" \
"so-fleet-launcher:$VERSION" \
"so-freqserver:$VERSION" \
"so-grafana:$VERSION" \
"so-idstools:$VERSION" \
"so-influxdb:$VERSION" \
"so-kibana:$VERSION" \
"so-logstash:$VERSION" \
"so-mysql:$VERSION" \
"so-navigator:$VERSION" \
"so-playbook:$VERSION" \
"so-redis:$VERSION" \
"so-sensoroni:$VERSION" \
"so-soctopus:$VERSION" \
"so-steno:$VERSION" \
#"so-strelka:$VERSION" \
"so-suricata:$VERSION" \
"so-telegraf:$VERSION" \
"so-thehive:$VERSION" \
"so-thehive-es:$VERSION" \
"so-wazuh:$VERSION" \
"so-zeek:$VERSION" )
for i in "${TRUSTED_CONTAINERS[@]}"
do
# Pull down the trusted docker image
echo "Downloading $i"
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination
docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
docker push $MASTER:5000/soshybridhunter/$i
docker rmi soshybridhunter/$i
done

View File

@@ -0,0 +1,28 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
REPLAY_ENABLED=$(docker images | grep so-tcpreplay)
REPLAY_RUNNING=$(docker ps | grep so-tcpreplay)
if [ "$REPLAY_ENABLED" != "" ] && [ "$REPLAY_RUNNING" != "" ]; then
docker cp so-tcpreplay:/opt/samples /opt/samples
docker exec -it so-tcpreplay /usr/bin/tcpreplay -i bond0 -M10 $1
else
echo "Replay functionality not enabled! To enable, run `so-tcpreplay-start`"
echo
echo "Note that you will need internet access to download the appropiriate components"
fi

View File

@@ -0,0 +1,21 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
/usr/sbin/so-restart tcreplay $1

View File

@@ -0,0 +1,20 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
/usr/sbin/so-start tcpreplay $1

View File

@@ -0,0 +1,21 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
/usr/sbin/so-stop tcpreplay $1

View File

@@ -1,3 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
{% if grains['role'] == 'so-node' or grains['role'] == 'so-eval' %}
# Curator
# Create the group
@@ -112,15 +114,9 @@ curdel:
- month: '*'
- dayweek: '*'
so-curatorimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-curator:HH1.1.0
so-curator:
docker_container.running:
- require:
- so-curatorimage
- image: docker.io/soshybridhunter/so-curator:HH1.1.0
- image: {{ MASTER }}:5000/soshybridhunter/so-curator:HH{{ VERSION }}
- hostname: curator
- name: so-curator
- user: curator

View File

@@ -12,7 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
{% if grains['role'] == 'so-master' %}
{% set esalert = salt['pillar.get']('master:elastalert', '1') %}
@@ -109,15 +110,9 @@ elastaconf:
- group: 939
- template: jinja
so-elastalertimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-elastalert:HH1.1.1
so-elastalert:
docker_container.running:
- require:
- so-elastalertimage
- image: docker.io/soshybridhunter/so-elastalert:HH1.1.1
- image: {{ MASTER }}:5000/soshybridhunter/so-elastalert:HH{{ VERSION }}
- hostname: elastalert
- name: so-elastalert
- user: elastalert

View File

@@ -12,6 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
{% if grains['role'] == 'so-master' %}
{% set esclustername = salt['pillar.get']('master:esclustername', '') %}
@@ -98,15 +100,9 @@ eslogdir:
- group: 939
- makedirs: True
so-elasticsearchimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-elasticsearch:HH1.1.0
so-elasticsearch:
docker_container.running:
- require:
- so-elasticsearchimage
- image: docker.io/soshybridhunter/so-elasticsearch:HH1.1.0
- image: {{ MASTER }}:5000/soshybridhunter/so-elasticsearch:HH{{ VERSION }}
- hostname: elasticsearch
- name: so-elasticsearch
- user: elasticsearch
@@ -139,7 +135,4 @@ so-elasticsearch-pipelines-file:
so-elasticsearch-pipelines:
cmd.run:
- name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines {{ esclustername }}
# Tell the main cluster I am here
#curl -XPUT http://\$ELASTICSEARCH_HOST:\$ELASTICSEARCH_PORT/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"$HOSTNAME": {"skip_unavailable": "true", "seeds": ["$DOCKER_INTERFACE:$REVERSE_PORT"]}}}}}'
- name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines {{ esclustername }}

View File

@@ -1,4 +1,4 @@
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -12,7 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set MASTER = grains['master'] %}
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
# Filebeat Setup
@@ -46,25 +47,9 @@ filebeatconfsync:
- group: 0
- template: jinja
#filebeatcrt:
# file.managed:
# - name: /opt/so/conf/filebeat/etc/pki/filebeat.crt
# - source: salt://filebeat/files/filebeat.crt
#filebeatkey:
# file.managed:
# - name: /opt/so/conf/filebeat/etc/pki/filebeat.key
# - source: salt://filebeat/files/filebeat.key
so-filebeatimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-filebeat:HH1.1.1
so-filebeat:
docker_container.running:
- require:
- so-filebeatimage
- image: docker.io/soshybridhunter/so-filebeat:HH1.1.1
- image: {{ MASTER }}:5000/soshybridhunter/so-filebeat:HH{{ VERSION }}
- hostname: so-filebeat
- user: root
- extra_hosts: {{ MASTER }}:{{ MASTERIP }}

View File

@@ -1,6 +1,8 @@
{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', 'iwonttellyou') %}
{%- set FLEETPASS = salt['pillar.get']('auth:fleet', 'bazinga') -%}
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# Fleet Setup
fleetcdir:
@@ -59,15 +61,9 @@ fleetdbpriv:
- user: fleetdbuser
- host: 172.17.0.0/255.255.0.0
so-fleetimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-fleet:HH1.1.3
so-fleet:
docker_container.running:
- require:
- so-fleetimage
- image: docker.io/soshybridhunter/so-fleet:HH1.1.3
- image: {{ MASTER }}:5000/soshybridhunter/so-fleet:HH{{ VERSION }}
- hostname: so-fleet
- port_bindings:
- 0.0.0.0:8080:8080

View File

@@ -1,4 +1,6 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
hiveconfdir:
file.directory:
- name: /opt/so/conf/hive/etc
@@ -53,15 +55,9 @@ hiveesdata:
- user: 939
- group: 939
so-thehive-esimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-thehive-es:HH1.1.1
so-thehive-es:
docker_container.running:
- require:
- so-thehive-esimage
- image: docker.io/soshybridhunter/so-thehive-es:HH1.1.1
- image: {{ MASTER }}:5000/soshybridhunter/so-thehive-es:HH{{ VERSION }}
- hostname: so-thehive-es
- name: so-thehive-es
- user: 939

View File

@@ -12,7 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# IDSTools Setup
idstoolsdir:
file.directory:
@@ -61,15 +62,9 @@ ruleslink:
- name: /opt/so/saltstack/salt/suricata/rules
- target: /opt/so/rules/nids
so-idstoolsimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-idstools:HH1.1.0
so-idstools:
docker_container.running:
- require:
- so-idstoolsimage
- image: docker.io/soshybridhunter/so-idstools:HH1.1.0
- image: {{ MASTER }}:5000/soshybridhunter/so-idstools:HH{{ VERSION }}
- hostname: so-idstools
- user: socore
- binds:

View File

@@ -1,4 +1,5 @@
{% set master = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# Add ES Group
kibanasearchgroup:
@@ -52,25 +53,17 @@ synckibanacustom:
- user: 932
- group: 939
# File.Recurse for custom saved dashboards
so-kibanaimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-kibana:HH1.1.1
# Start the kibana docker
so-kibana:
docker_container.running:
- require:
- so-kibanaimage
- image: docker.io/soshybridhunter/so-kibana:HH1.1.1
- image: {{ MASTER }}:5000/soshybridhunter/so-kibana:HH{{ VERSION }}
- hostname: kibana
- user: kibana
- environment:
- KIBANA_DEFAULTAPPID=dashboard/94b52620-342a-11e7-9d52-4f090484f59e
- ELASTICSEARCH_HOST={{ master }}
- ELASTICSEARCH_HOST={{ MASTER }}
- ELASTICSEARCH_PORT=9200
- MASTER={{ master }}
- MASTER={{ MASTER }}
- binds:
- /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw
- /opt/so/log/kibana:/var/log/kibana:rw
@@ -78,11 +71,3 @@ so-kibana:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- port_bindings:
- 0.0.0.0:5601:5601
# Keep the setting correct
#KibanaHappy:
# cmd.script:
# - shell: /bin/bash
# - runas: socore
# - source: salt://kibana/bin/keepkibanahappy.sh
# - template: jinja

View File

@@ -12,7 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# Logstash Section - Decide which pillar to use
{% if grains['role'] == 'so-sensor' %}

View File

@@ -12,7 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
{% set masterproxy = salt['pillar.get']('static:masterupdate', '0') %}
socore_own_saltstack:
@@ -24,8 +25,6 @@ socore_own_saltstack:
- user
- group
{% if masterproxy == 1 %}
# Create the directories for apt-cacher-ng
aptcacherconfdir:
file.directory:
@@ -55,16 +54,10 @@ acngcopyconf:
- name: /opt/so/conf/aptcacher-ng/etc/acng.conf
- source: salt://master/files/acng/acng.conf
so-acngimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-acng:HH1.1.0
# Install the apt-cacher-ng container
so-aptcacherng:
docker_container.running:
- require:
- so-acngimage
- image: docker.io/soshybridhunter/so-acng:HH1.1.0
- image: {{ MASTER }}:5000/soshybridhunter/so-acng:HH{{ VERSION }}
- hostname: so-acng
- port_bindings:
- 0.0.0.0:3142:3142
@@ -73,39 +66,4 @@ so-aptcacherng:
- /opt/so/log/aptcacher-ng:/var/log/apt-cacher-ng:rw
- /opt/so/conf/aptcacher-ng/etc/acng.conf:/etc/apt-cacher-ng/acng.conf:ro
# Create the config directory for the docker registry
dockerregistryconfdir:
file.directory:
- name: /opt/so/conf/docker-registry/etc
- user: 939
- group: 939
- makedirs: True
dockerregistrylogdir:
file.directory:
- name: /opt/so/log/docker-registry
- user: 939
- group: 939
- makedirs: true
# Copy the config
dockerregistryconf:
file.managed:
- name: /opt/so/conf/docker-registry/etc/config.yml
- source: salt://master/files/registry/config.yml
# Install the registry container
so-dockerregistry:
docker_container.running:
- image: registry:2
- hostname: so-registry
- port_bindings:
- 0.0.0.0:5000:5000
- binds:
- /opt/so/conf/docker-registry/etc/config.yml:/etc/docker/registry/config.yml:ro
- /opt/so/conf/docker-registry:/var/lib/registry:rw
- /etc/pki/registry.crt:/etc/pki/registry.crt:ro
- /etc/pki/registry.key:/etc/pki/registry.key:ro
{% endif %}

View File

@@ -1,6 +1,8 @@
{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', 'iwonttellyou') %}
{%- set FLEETPASS = salt['pillar.get']('auth:fleet', 'bazinga') %}
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# MySQL Setup
mysqlpkgs:
pkg.installed:
@@ -48,15 +50,9 @@ mysqldatadir:
- group: 939
- makedirs: True
so-mysqlimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-mysql:HH1.1.0
so-mysql:
docker_container.running:
- require:
- so-mysqlimage
- image: docker.io/soshybridhunter/so-mysql:HH1.1.0
- image: {{ MASTER }}:5000/soshybridhunter/so-mysql:HH{{ VERSION }}
- hostname: so-mysql
- user: socore
- port_bindings:

View File

@@ -1,4 +1,4 @@
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -12,7 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# PCAP Section
# Create the logstash group
@@ -94,15 +95,9 @@ stenolog:
- group: 941
- makedirs: True
so-stenoimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-steno:HH1.1.3
so-steno:
docker_container.running:
- require:
- so-stenoimage
- image: docker.io/soshybridhunter/so-steno:HH1.1.3
- image: {{ MASTER }}:5000/soshybridhunter/so-steno:HH{{ VERSION }}
- network_mode: host
- privileged: True
- port_bindings:

Binary file not shown.

View File

@@ -1,4 +1,6 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
playbookdb:
file.managed:
@@ -24,15 +26,9 @@ navigatorconfig:
- makedirs: True
- template: jinja
so-playbookimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-playbook:HH1.1.3
so-playbook:
docker_container.running:
- require:
- so-playbookimage
- image: docker.io/soshybridhunter/so-playbook:HH1.1.3
- image: {{ MASTER }}:5000/soshybridhunter/so-playbook:HH{{ VERSION }}
- hostname: playbook
- name: so-playbook
- binds:
@@ -40,15 +36,9 @@ so-playbook:
- port_bindings:
- 0.0.0.0:3200:3000
so-navigatorimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-navigator:HH1.1.1
so-navigator:
docker_container.running:
- require:
- so-navigatorimage
- image: docker.io/soshybridhunter/so-navigator:HH1.1.1
- image: {{ MASTER }}:5000/soshybridhunter/so-navigator:HH{{ VERSION }}
- hostname: navigator
- name: so-navigator
- binds:
@@ -56,7 +46,7 @@ so-navigator:
- /opt/so/conf/playbook/nav_layer_playbook.json:/nav-app/src/assets/playbook.json:ro
- port_bindings:
- 0.0.0.0:4200:4200
/usr/sbin/so-playbook-sync:
cron.present:
- identifier: so-playbook-sync

View File

@@ -1,4 +1,4 @@
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -13,6 +13,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set lsaccessip = salt['pillar.get']('master:lsaccessip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# Redis Setup
redisconfdir:
@@ -44,15 +46,9 @@ redisconfsync:
- group: 939
- template: jinja
so-redisimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-redis:HH1.1.0
so-redis:
docker_container.running:
- require:
- so-redisimage
- image: docker.io/soshybridhunter/so-redis:HH1.1.0
- image: {{ MASTER }}:5000/soshybridhunter/so-redis:HH{{ VERSION }}
- hostname: so-redis
- user: socore
- port_bindings:

View File

@@ -21,4 +21,3 @@ health:
threshold: 3
proxy:
remoteurl: https://registry-1.docker.io

41
salt/registry/init.sls Normal file
View File

@@ -0,0 +1,41 @@
# Create the config directory for the docker registry
dockerregistryconfdir:
file.directory:
- name: /opt/so/conf/docker-registry/etc
- user: 939
- group: 939
- makedirs: True
dockerregistrydir:
file.directory:
- name: /nsm/docker-registry/docker
- user: 939
- group: 939
- makedirs: True
dockerregistrylogdir:
file.directory:
- name: /opt/so/log/docker-registry
- user: 939
- group: 939
- makedirs: true
# Copy the config
dockerregistryconf:
file.managed:
- name: /opt/so/conf/docker-registry/etc/config.yml
- source: salt://registry/etc/config.yml
# Install the registry container
so-dockerregistry:
docker_container.running:
- image: registry:2
- hostname: so-registry
- port_bindings:
- 0.0.0.0:5000:5000
- binds:
- /opt/so/conf/docker-registry/etc/config.yml:/etc/docker/registry/config.yml:ro
- /opt/so/conf/docker-registry:/var/lib/registry:rw
- /nsm/docker-registry/docker:/var/lib/registry/docker:rw
- /etc/pki/registry.crt:/etc/pki/registry.crt:ro
- /etc/pki/registry.key:/etc/pki/registry.key:ro

View File

@@ -1,3 +1,6 @@
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
sensoronidir:
file.directory:
- name: /opt/so/conf/sensoroni
@@ -27,15 +30,9 @@ sensoronisync:
- group: 939
- template: jinja
so-sensoroniimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-sensoroni:HH1.1.3
so-sensoroni:
docker_container.running:
- require:
- so-sensoroniimage
- image: docker.io/soshybridhunter/so-sensoroni:HH1.1.3
- image: {{ MASTER }}:5000/soshybridhunter/so-sensoroni:HH{{ VERSION }}
- hostname: sensoroni
- name: so-sensoroni
- binds:

View File

@@ -1,3 +1,6 @@
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
soctopusdir:
file.directory:
- name: /opt/so/conf/soctopus
@@ -44,15 +47,9 @@ navigatordefaultlayer:
- replace: False
- template: jinja
so-soctopusimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-soctopus:HH1.1.3
so-soctopus:
docker_container.running:
- require:
- so-soctopusimage
- image: docker.io/soshybridhunter/so-soctopus:HH1.1.3
- image: {{ MASTER }}:5000/soshybridhunter/so-soctopus:HH{{ VERSION }}
- hostname: soctopus
- name: so-soctopus
- binds:

View File

@@ -14,7 +14,9 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
{% set BROVER = salt['pillar.get']('static:broversion', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# Suricata
@@ -77,16 +79,10 @@ surithresholding:
- user: 940
- group: 940
- template: jinja
so-suricataimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-suricata:HH1.1.1
so-suricata:
docker_container.running:
- require:
- so-suricataimage
- image: docker.io/soshybridhunter/so-suricata:HH1.1.1
- image: {{ MASTER }}:5000/soshybridhunter/so-suricata:HH{{ VERSION }}
- privileged: True
- environment:
- INTERFACE={{ interface }}

File diff suppressed because it is too large Load Diff

View File

@@ -1,243 +0,0 @@
@version: 3.5
source s_syslog { unix-dgram("/dev/log"); };
source s_network {
tcp();
udp();
};
parser p_db {
db-parser(file("/opt/so/conf/syslog-ng/patterndb.xml"));
};
filter f_rewrite_cisco_program { match('^(%[A-Z]+\-\d\-[0-9A-Z]+): ([^\n]+)' value("MSGONLY") type("pcre") flags("store-matches" "nobackref")); };
filter f_rewrite_cisco_program_2 { match('^[\*\.]?(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+\d{1,2}\s\d{1,2}:\d{1,2}:\d{1,2}(?:\.\d+)?(?: [A-Z]{3})?: (%[^:]+): ([^\n]+)' value("MSGONLY") type("pcre") flags("store-matches" "nobackref")); };
filter f_rewrite_cisco_program_3 { match('^\d+[ywdh]\d+[ywdh]: (%[^:]+): ([^\n]+)' value("MSGONLY") type("pcre") flags("store-matches" "nobackref")); };
filter f_snort { match('snort:' value("MSGHDR")); };
filter f_bro_headers { message("^#") };
rewrite r_cisco_program {
set("$1", value("PROGRAM") condition(filter(f_rewrite_cisco_program) or filter(f_rewrite_cisco_program_2) or filter(f_rewrite_cisco_program_3)));
set("$2", value("MESSAGE") condition(filter(f_rewrite_cisco_program) or filter(f_rewrite_cisco_program_2) or filter(f_rewrite_cisco_program_3)));
};
rewrite r_snare { subst("MSWinEventLog.+(Security|Application|System).+", "$1", value("PROGRAM") flags(global)); };
rewrite r_from_pipes { subst('\|', "%7C", value("MESSAGE") flags(global) condition(program("bro_*" type(glob)))); };
rewrite r_pipes { subst("\t", "|", value("MESSAGE") flags(global)); };
rewrite r_host { set("$SOURCEIP", value("HOST")); };
rewrite r_extracted_host { set("$pdb_extracted_sourceip", value("HOST") condition("$pdb_extracted_sourceip" != "")); };
template t_db_parsed { template("$R_UNIXTIME\t$HOST\t$PROGRAM\t${.classifier.class}\t$MSGONLY\t${i0}\t${i1}\t${i2}\t${i3}\t${i4}\t${i5}\t${s0}\t${s1}\t${s2}\t${s3}\t${s4}\t${s5}\n"); };
source s_bro_conn { file("/nsm/bro/logs/current/conn.log" flags(no-parse) program_override("bro_conn")); };
source s_bro_http {
file("/nsm/bro/logs/current/http_eth1.log" flags(no-parse) program_override("bro_http"));
};
source s_bro_dns { file("/nsm/bro/logs/current/dns.log" flags(no-parse) program_override("bro_dns")); };
source s_bro_files { file("/nsm/bro/logs/current/files.log" flags(no-parse) program_override("bro_files")); };
source s_bro_dhcp { file("/nsm/bro/logs/current/dhcp.log" flags(no-parse) program_override("bro_dhcp")); };
source s_bro_weird { file("/nsm/bro/logs/current/weird.log" flags(no-parse) program_override("bro_weird")); };
source s_bro_tunnels { file("/nsm/bro/logs/current/tunnel.log" flags(no-parse) program_override("bro_tunnels")); };
source s_bro_syslog { file("/nsm/bro/logs/current/syslog.log" flags(no-parse) program_override("bro_syslog")); };
source s_bro_ftp { file("/nsm/bro/logs/current/ftp.log" flags(no-parse) program_override("bro_ftp")); };
source s_bro_notice { file("/nsm/bro/logs/current/notice.log" flags(no-parse) program_override("bro_notice")); };
source s_bro_smtp { file("/nsm/bro/logs/current/smtp.log" flags(no-parse) program_override("bro_smtp")); };
source s_bro_smtp_entities { file("/nsm/bro/logs/current/smtp_entities.log" flags(no-parse) program_override("bro_smtp_entities")); };
source s_bro_ssl { file("/nsm/bro/logs/current/ssl.log" flags(no-parse) program_override("bro_ssl")); };
source s_ossec { file("/var/ossec/logs/archives/archives.log" program_override('ossec_archive') follow_freq(1) flags(no-parse)); };
source s_bro_software { file("/nsm/bro/logs/current/software.log" flags(no-parse) program_override("bro_software")); };
source s_bro_irc { file("/nsm/bro/logs/current/irc.log" flags(no-parse) program_override("bro_irc")); };
source s_bro_ssh { file("/nsm/bro/logs/current/ssh.log" flags(no-parse) program_override("bro_ssh")); };
source s_bro_intel { file("/nsm/bro/logs/current/intel.log" flags(no-parse) program_override("bro_intel")); };
source s_bro_x509 { file("/nsm/bro/logs/current/x509.log" flags(no-parse) program_override("bro_x509")); };
source s_bro_snmp { file("/nsm/bro/logs/current/snmp.log" flags(no-parse) program_override("bro_snmp")); };
source s_bro_radius { file("/nsm/bro/logs/current/radius.log" flags(no-parse) program_override("bro_radius")); };
source s_bro_mysql { file("/nsm/bro/logs/current/mysql.log" flags(no-parse) program_override("bro_mysql")); };
source s_bro_kerberos { file("/nsm/bro/logs/current/kerberos.log" flags(no-parse) program_override("bro_kerberos")); };
source s_bro_rdp { file("/nsm/bro/logs/current/rdp.log" flags(no-parse) program_override("bro_rdp")); };
source s_bro_pe { file("/nsm/bro/logs/current/pe.log" flags(no-parse) program_override("bro_pe")); };
source s_bro_sip { file("/nsm/bro/logs/current/sip.log" flags(no-parse) program_override("bro_sip")); };
source s_bro_smb_mapping { file("/nsm/bro/logs/current/smb_mapping.log" flags(no-parse) program_override("bro_smb_mapping")); };
source s_bro_smb_files { file("/nsm/bro/logs/current/smb_files.log" flags(no-parse) program_override("bro_smb_files")); };
source s_bro_ntlm { file("/nsm/bro/logs/current/ntlm.log" flags(no-parse) program_override("bro_ntlm")); };
source s_bro_dce_rpc { file("/nsm/bro/logs/current/dce_rpc.log" flags(no-parse) program_override("bro_dce_rpc")); };
source s_bro_modbus { file("/nsm/bro/logs/current/modbus.log" flags(no-parse) program_override("bro_modbus")); };
source s_bro_dnp3 { file("/nsm/bro/logs/current/dnp3.log" flags(no-parse) program_override("bro_dnp3")); };
source s_bro_rfb { file("/nsm/bro/logs/current/rfb.log" flags(no-parse) program_override("bro_rfb")); };
destination d_elsa { program("sh /opt/elsa/contrib/securityonion/contrib/securityonion-elsa-syslog-ng.sh" template(t_db_parsed)); };
destination d_logstash { tcp("logstash" port(6050) template("$(format-json --scope selected_macros --scope nv_pairs --exclude DATE --key ISODATE)\n")); };
log {
source(s_bro_conn);
source(s_bro_http);
source(s_bro_dns);
source(s_bro_weird);
source(s_bro_tunnels);
source(s_bro_syslog);
source(s_bro_ftp);
source(s_bro_files);
source(s_bro_dhcp);
source(s_bro_notice);
source(s_bro_smtp);
source(s_bro_smtp_entities);
source(s_bro_ssl);
source(s_bro_irc);
source(s_bro_software);
source(s_bro_ssh);
source(s_bro_smb_mapping);
source(s_bro_smb_files);
source(s_bro_ntlm);
source(s_bro_dce_rpc);
source(s_bro_intel);
source(s_bro_x509);
source(s_bro_snmp);
source(s_bro_radius);
source(s_bro_mysql);
source(s_bro_kerberos);
source(s_bro_rdp);
source(s_bro_pe);
source(s_bro_sip);
source(s_bro_modbus);
source(s_bro_dnp3);
source(s_bro_rfb);
source(s_ossec);
source(s_network);
source(s_syslog);
log { filter(f_bro_headers); flags(final); };
log { destination(d_logstash); };
};
# Bring it all back
#source s_src {
# system();
# internal();
#};
########################
# Destinations
########################
# First some standard logfile
#
destination d_auth { file("/var/log/auth.log"); };
destination d_cron { file("/var/log/cron.log"); };
destination d_daemon { file("/var/log/daemon.log"); };
destination d_kern { file("/var/log/kern.log"); };
destination d_lpr { file("/var/log/lpr.log"); };
destination d_mail { file("/var/log/mail.log"); };
destination d_syslog { file("/var/log/syslog"); };
destination d_user { file("/var/log/user.log"); };
destination d_uucp { file("/var/log/uucp.log"); };
# This files are the log come from the mail subsystem.
#
destination d_mailinfo { file("/var/log/mail/mail.info"); };
destination d_mailwarn { file("/var/log/mail/mail.warn"); };
destination d_mailerr { file("/var/log/mail/mail.err"); };
# Logging for INN news system
#
destination d_newscrit { file("/var/log/news/news.crit"); };
destination d_newserr { file("/var/log/news/news.err"); };
destination d_newsnotice { file("/var/log/news/news.notice"); };
# Some `catch-all' logfiles.
#
destination d_debug { file("/var/log/debug"); };
destination d_error { file("/var/log/error"); };
destination d_messages { file("/var/log/messages"); };
# The root's console.
#
destination d_console { usertty("root"); };
# Virtual console.
#
destination d_console_all { file("/dev/tty10"); };
# The named pipe /dev/xconsole is for the nsole' utility. To use it,
# you must invoke nsole' with the -file' option:
#
# $ xconsole -file /dev/xconsole [...]
#
destination d_xconsole { pipe("/dev/xconsole"); };
# Send the messages to an other host
#
#destination d_net { tcp("127.0.0.1" port(1000) authentication(on) encrypt(on) log_fifo_size(1000)); };
# Debian only
destination d_ppp { file("/var/log/ppp.log"); };
########################
# Filters
########################
# Here's come the filter options. With this rules, we can set which
# message go where.
filter f_dbg { level(debug); };
filter f_info { level(info); };
filter f_notice { level(notice); };
filter f_warn { level(warn); };
filter f_err { level(err); };
filter f_crit { level(crit .. emerg); };
filter f_debug { level(debug) and not facility(auth, authpriv, news, mail); };
filter f_error { level(err .. emerg) and not filter(f_snort); };
filter f_messages { level(info,notice,warn) and not facility(auth,authpriv,cron,daemon,mail,news); };
filter f_auth { facility(auth, authpriv) and not filter(f_debug); };
filter f_cron { facility(cron) and not filter(f_debug); };
filter f_daemon { facility(daemon) and not filter(f_debug); };
filter f_kern { facility(kern) and not filter(f_debug); };
filter f_lpr { facility(lpr) and not filter(f_debug); };
filter f_local { facility(local0, local1, local3, local4, local5, local6, local7) and not filter(f_debug); };
filter f_mail { facility(mail) and not filter(f_debug); };
filter f_news { facility(news) and not filter(f_debug); };
filter f_syslog3 { not facility(auth, authpriv, mail) and not filter(f_debug) and not filter(f_snort); };
filter f_user { facility(user) and not filter(f_debug); };
filter f_uucp { facility(uucp) and not filter(f_debug); };
filter f_cnews { level(notice, err, crit) and facility(news); };
filter f_cother { level(debug, info, notice, warn) or facility(daemon, mail); };
filter f_ppp { facility(local2) and not filter(f_debug); };
filter f_console { level(warn .. emerg); };
########################
# Log paths
########################
log { source(s_syslog); filter(f_auth); destination(d_auth); };
log { source(s_syslog); filter(f_cron); destination(d_cron); };
log { source(s_syslog); filter(f_daemon); destination(d_daemon); };
log { source(s_syslog); filter(f_kern); destination(d_kern); };
log { source(s_syslog); filter(f_lpr); destination(d_lpr); };
log { source(s_syslog); filter(f_syslog3); destination(d_syslog); };
log { source(s_syslog); filter(f_user); destination(d_user); };
log { source(s_syslog); filter(f_uucp); destination(d_uucp); };
log { source(s_syslog); filter(f_mail); destination(d_mail); };
#log { source(s_syslog); filter(f_mail); filter(f_info); destination(d_mailinfo); };
#log { source(s_syslog); filter(f_mail); filter(f_warn); destination(d_mailwarn); };
#log { source(s_syslog); filter(f_mail); filter(f_err); destination(d_mailerr); };
log { source(s_syslog); filter(f_news); filter(f_crit); destination(d_newscrit); };
log { source(s_syslog); filter(f_news); filter(f_err); destination(d_newserr); };
log { source(s_syslog); filter(f_news); filter(f_notice); destination(d_newsnotice); };
#log { source(s_syslog); filter(f_cnews); destination(d_console_all); };
#log { source(s_syslog); filter(f_cother); destination(d_console_all); };
#log { source(s_syslog); filter(f_ppp); destination(d_ppp); };
log { source(s_syslog); filter(f_debug); destination(d_debug); };
log { source(s_syslog); filter(f_error); destination(d_error); };
log { source(s_syslog); filter(f_messages); destination(d_messages); };
log { source(s_syslog); filter(f_console); destination(d_console_all); destination(d_xconsole); };
log { source(s_syslog); filter(f_crit); destination(d_console); };
# All messages send to a remote site
#
#log { source(s_syslog); destination(d_net); };
###
# Include all config files in /etc/syslog-ng/conf.d/
###

View File

@@ -1,18 +0,0 @@
# Sync the Files
file.directory:
- name: /opt/so/conf/syslog-ng
- user: 939
- group: 939
# Syslog-ng Docker
so-syslog-ng:
dockerng.running:
- image: pillaritem/so-logstash
- hostname: syslog-ng
- priviledged: true
- ports:
- 514/tcp
- 514/udp
- 601
- network_mode: so-elastic-net

View File

@@ -1,18 +1,14 @@
{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-eval' %}
so-tcpreplayimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-tcpreplay:HH1.1.4
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
so-tcpreplay:
docker_container.running:
- require:
- so-tcpreplayimage
- network_mode: "host"
- image: docker.io/soshybridhunter/so-tcpreplay:HH1.1.4
- image: {{ MASTER }}:5000/soshybridhunter/so-tcpreplay:HH{{ VERSION }}
- name: so-tcpreplay
- user: root
- interactive: True
- tty: True
{% endif %}

View File

@@ -20,7 +20,7 @@ base:
- idstools
- pcap
- suricata
- bro
- zeek
- redis
- logstash
- filebeat
@@ -34,7 +34,7 @@ base:
- pcap
- suricata
{%- if BROVER != 'SURICATA' %}
- bro
- zeek
{%- endif %}
- wazuh
- filebeat
@@ -46,11 +46,13 @@ base:
'G@role:so-eval':
- ca
- ssl
- registry
- master
- common
- sensoroni
- firewall
- master
- idstools
- auth
{%- if OSQUERY != 0 %}
- mysql
{%- endif %}
@@ -59,7 +61,7 @@ base:
- kibana
- pcap
- suricata
- bro
- zeek
- curator
- elastalert
{%- if OSQUERY != 0 %}
@@ -91,6 +93,7 @@ base:
'G@role:so-master':
- ca
- ssl
- registry
- common
- sensoroni
- firewall

View File

@@ -0,0 +1,33 @@
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Check if Wazuh enabled
if grep -q -R "wazuh: 1" /opt/so/saltstack/pillar/*; then
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
if ! grep -q "<white_list>{{ MASTERIP }}</white_list>" $WAZUH_MGR_CFG ; then
DATE=`date`
sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG
sed -i '/^$/N;/^\n$/D' $WAZUH_MGR_CFG
echo -e "<!--Address {{ MASTERIP }} added by setup on "$DATE"-->\n <global>\n <white_list>{{ MASTERIP }}</white_list>\n </global>\n</ossec_config>" >> $WAZUH_MGR_CFG
echo "Added whitelist entry for {{ MASTERIP }} in $WAZUH_MGR_CFG."
echo
echo "Restarting OSSEC Server..."
/usr/sbin/so-wazuh-restart
fi
fi

View File

@@ -1,5 +1,6 @@
{%- set HOSTNAME = salt['grains.get']('host', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# Add ossec group
ossecgroup:
group.present:
@@ -62,15 +63,18 @@ wazuhagentregister:
- mode: 755
- template: jinja
so-wazuhimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-wazuh:HH1.1.3
wazuhmgrwhitelist:
file.managed:
- name: /usr/sbin/wazuh-manager-whitelist
- source: salt://wazuh/files/wazuh-manager-whitelist
- user: 0
- group: 0
- mode: 755
- template: jinja
so-wazuh:
docker_container.running:
- require:
- so-wazuhimage
- image: docker.io/soshybridhunter/so-wazuh:HH1.1.3
- image: {{ MASTER }}:5000/soshybridhunter/so-wazuh:HH{{ VERSION }}
- hostname: {{HOSTNAME}}-wazuh-manager
- name: so-wazuh
- detach: True
@@ -87,3 +91,9 @@ registertheagent:
- name: /usr/sbin/wazuh-register-agent
- cwd: /
#- stateful: True
# Whitelist manager IP
whitelistmanager:
cmd.run:
- name: /usr/sbin/wazuh-manager-whitelist
- cwd: /

View File

@@ -0,0 +1,2 @@
#!/bin/bash
/usr/bin/docker exec so-zeek /opt/zeek/bin/zeekctl netstats | awk '{print $(NF-2),$(NF-1),$NF}' | awk -F '[ =]' '{RCVD += $2;DRP += $4;TTL += $6} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/zeek/logs/packetloss.log

120
salt/zeek/files/local.zeek Normal file
View File

@@ -0,0 +1,120 @@
##! Local site policy. Customize as appropriate.
##!
##! This file will not be overwritten when upgrading or reinstalling!
# This script logs which scripts were loaded during each run.
@load misc/loaded-scripts
# Apply the default tuning scripts for common tuning settings.
@load tuning/defaults
# Estimate and log capture loss.
@load misc/capture-loss
# Enable logging of memory, packet and lag statistics.
@load misc/stats
# Load the scan detection script. It's disabled by default because
# it often causes performance issues.
#@load misc/scan
# Detect traceroute being run on the network. This could possibly cause
# performance trouble when there are a lot of traceroutes on your network.
# Enable cautiously.
#@load misc/detect-traceroute
# Generate notices when vulnerable versions of software are discovered.
# The default is to only monitor software found in the address space defined
# as "local". Refer to the software framework's documentation for more
# information.
@load frameworks/software/vulnerable
# Detect software changing (e.g. attacker installing hacked SSHD).
@load frameworks/software/version-changes
# This adds signatures to detect cleartext forward and reverse windows shells.
@load-sigs frameworks/signatures/detect-windows-shells
# Load all of the scripts that detect software in various protocols.
@load protocols/ftp/software
@load protocols/smtp/software
@load protocols/ssh/software
@load protocols/http/software
# The detect-webapps script could possibly cause performance trouble when
# running on live traffic. Enable it cautiously.
#@load protocols/http/detect-webapps
# This script detects DNS results pointing toward your Site::local_nets
# where the name is not part of your local DNS zone and is being hosted
# externally. Requires that the Site::local_zones variable is defined.
@load protocols/dns/detect-external-names
# Script to detect various activity in FTP sessions.
@load protocols/ftp/detect
# Scripts that do asset tracking.
@load protocols/conn/known-hosts
@load protocols/conn/known-services
@load protocols/ssl/known-certs
# This script enables SSL/TLS certificate validation.
@load protocols/ssl/validate-certs
# This script prevents the logging of SSL CA certificates in x509.log
@load protocols/ssl/log-hostcerts-only
# Uncomment the following line to check each SSL certificate hash against the ICSI
# certificate notary service; see http://notary.icsi.berkeley.edu .
# @load protocols/ssl/notary
# If you have GeoIP support built in, do some geographic detections and
# logging for SSH traffic.
@load protocols/ssh/geo-data
# Detect hosts doing SSH bruteforce attacks.
@load protocols/ssh/detect-bruteforcing
# Detect logins using "interesting" hostnames.
@load protocols/ssh/interesting-hostnames
# Detect SQL injection attacks.
@load protocols/http/detect-sqli
#### Network File Handling ####
# Enable MD5 and SHA1 hashing for all files.
@load frameworks/files/hash-all-files
# Detect SHA1 sums in Team Cymru's Malware Hash Registry.
@load frameworks/files/detect-MHR
# Extend email alerting to include hostnames
@load policy/frameworks/notice/extend-email/hostnames
# Uncomment the following line to enable detection of the heartbleed attack. Enabling
# this might impact performance a bit.
# @load policy/protocols/ssl/heartbleed
# Uncomment the following line to enable logging of connection VLANs. Enabling
# this adds two VLAN fields to the conn.log file.
# @load policy/protocols/conn/vlan-logging
# Uncomment the following line to enable logging of link-layer addresses. Enabling
# this adds the link-layer address for each connection endpoint to the conn.log file.
# @load policy/protocols/conn/mac-logging
# JA3 - SSL Detection Goodness
@load policy/ja3
# HASSH
@load policy/hassh
# You can load your own intel into:
# /opt/so/saltstack/bro/policy/intel/ on the master
@load intel
# Load a custom Bro policy
# /opt/so/saltstack/bro/policy/custom/ on the master
#@load custom/somebropolicy.bro
# Write logs in JSON
redef LogAscii::use_json = T;
redef LogAscii::json_timestamps = JSON::TS_ISO8601;

47
salt/zeek/files/node.cfg Normal file
View File

@@ -0,0 +1,47 @@
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
{%- if salt['pillar.get']('sensor:bro_pins') or salt['pillar.get']('sensor:bro_lbprocs') %}
{%- if salt['pillar.get']('sensor:bro_proxies') %}
{%- set proxies = salt['pillar.get']('sensor:bro_proxies', '1') %}
{%- else %}
{%- if salt['pillar.get']('sensor:bro_pins') %}
{%- set proxies = (salt['pillar.get']('sensor:bro_pins')|length/10)|round(0, 'ceil')|int %}
{%- else %}
{%- set proxies = (salt['pillar.get']('sensor:bro_lbprocs')/10)|round(0, 'ceil')|int %}
{%- endif %}
{%- endif %}
[manager]
type=manager
host=localhost
[logger]
type=logger
host=localhost
[proxy]
type=proxy
host=localhost
[worker-1]
type=worker
host=localhost
interface=af_packet::{{ interface }}
lb_method=custom
{%- if salt['pillar.get']('sensor:bro_lbprocs') %}
lb_procs={{ salt['pillar.get']('sensor:bro_lbprocs', '1') }}
{%- else %}
lb_procs={{ salt['pillar.get']('sensor:bro_pins')|length }}
{%- endif %}
{%- if salt['pillar.get']('sensor:bro_pins') %}
pin_cpus={{ salt['pillar.get']('sensor:bro_pins')|join(", ") }}
{%- endif %}
af_packet_fanout_id=23
af_packet_fanout_mode=AF_Packet::FANOUT_HASH
af_packet_buffer_size=128*1024*1024
{%- else %}
[brosa]
type=standalone
host=localhost
interface={{ interface }}
{%- endif %}

118
salt/zeek/init.sls Normal file
View File

@@ -0,0 +1,118 @@
{% set VERSION = salt['pillar.get']('static:soversion', '1.1.4') %}
{% set MASTER = salt['grains.get']('master') %}
# Zeek Salt State
# Add Zeek group
zeekgroup:
group.present:
- name: zeek
- gid: 937
# Add Zeek User
zeek:
user.present:
- uid: 937
- gid: 937
- home: /home/zeek
# Create some directories
zeekpolicydir:
file.directory:
- name: /opt/so/conf/zeek/policy
- user: 937
- group: 939
- makedirs: True
# Zeek Log Directory
zeeklogdir:
file.directory:
- name: /nsm/zeek/logs
- user: 937
- group: 939
- makedirs: True
# Zeek Spool Directory
zeekspooldir:
file.directory:
- name: /nsm/zeek/spool/manager
- user: 937
- makedirs: true
# Zeek extracted
zeekextractdir:
file.directory:
- name: /nsm/zeek/extracted
- user: 937
- group: 939
- makedirs: True
zeeksfafincompletedir:
file.directory:
- name: /nsm/faf/files/incomplete
- user: 937
- makedirs: true
zeeksfafcompletedir:
file.directory:
- name: /nsm/faf/files/complete
- user: 937
- makedirs: true
# Sync the policies
zeekpolicysync:
file.recurse:
- name: /opt/so/conf/zeek/policy
- source: salt://zeek/policy
- user: 937
- group: 939
- template: jinja
# Sync node.cfg
nodecfgsync:
file.managed:
- name: /opt/so/conf/zeek/node.cfg
- source: salt://zeek/files/node.cfg
- user: 937
- group: 939
- template: jinja
plcronscript:
file.managed:
- name: /usr/local/bin/packetloss.sh
- source: salt://zeek/cron/packetloss.sh
- mode: 755
/usr/local/bin/packetloss.sh:
cron.present:
- user: root
- minute: '*/10'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
localzeeksync:
file.managed:
- name: /opt/so/conf/zeek/local.zeek
- source: salt://zeek/files/local.zeek
- user: 937
- group: 939
- template: jinja
so-zeek:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-zeek:HH{{ VERSION }}
- privileged: True
- binds:
- /nsm/zeek/logs:/nsm/zeek/logs:rw
- /nsm/zeek/spool:/nsm/zeek/spool:rw
- /nsm/zeek/extracted:/nsm/zeek/extracted:rw
- /opt/so/conf/zeek/local.zeek:/opt/zeek/share/zeek/site/local.zeek:ro
- /opt/so/conf/zeek/node.cfg:/opt/zeek/etc/node.cfg:ro
- /opt/so/conf/zeek/policy/securityonion:/opt/zeek/share/zeek/policy/securityonion:ro
- /opt/so/conf/zeek/policy/custom:/opt/zeek/share/zeek/policy/custom:ro
- /opt/so/conf/zeek/policy/intel:/opt/zeek/share/zeek/policy/intel:rw
- network_mode: host
- watch:
- file: /opt/so/conf/zeek/local.zeek
- file: /opt/so/conf/zeek/node.cfg
- file: /opt/so/conf/zeek/policy

View File

@@ -0,0 +1 @@
#Intel

View File

@@ -0,0 +1,20 @@
{%- set interface = salt['pillar.get']('sensor:interface', '0') %}
global interface = "{{ interface }}";
event bro_init()
{
if ( ! reading_live_traffic() )
return;
Log::remove_default_filter(HTTP::LOG);
Log::add_filter(HTTP::LOG, [$name = "http-interfaces",
$path_func(id: Log::ID, path: string, rec: HTTP::Info) =
{
local peer = get_event_peer()$descr;
if ( peer in Cluster::nodes && Cluster::nodes[peer]?$interface )
return cat("http_", Cluster::nodes[peer]$interface);
else
return "http";
}
]);
}

View File

@@ -0,0 +1,9 @@
@load frameworks/intel/seen
@load frameworks/intel/do_notice
@load frameworks/files/hash-all-files
redef Intel::read_files += {
fmt("%s/apt1-fqdn.dat", @DIR),
fmt("%s/apt1-md5.dat", @DIR),
fmt("%s/apt1-certs.dat", @DIR)
};

View File

@@ -0,0 +1,26 @@
#fields indicator indicator_type meta.source meta.desc meta.do_notice
b054e26ef827fbbf5829f84a9bdbb697a5b042fc Intel::CERT_HASH Mandiant APT1 Report ALPHA T
7bc0cc2cf7c3a996c32dbe7e938993f7087105b4 Intel::CERT_HASH Mandiant APT1 Report AOL T
7855c132af1390413d4e4ff4ead321f8802d8243 Intel::CERT_HASH Mandiant APT1 Report AOL T
f3e3c590d7126bd227733e9d8313d2575c421243 Intel::CERT_HASH Mandiant APT1 Report AOL T
d4d4e896ce7d73b573f0a0006080a246aec61fe7 Intel::CERT_HASH Mandiant APT1 Report AOL T
bcdf4809c1886ac95478bbafde246d0603934298 Intel::CERT_HASH Mandiant APT1 Report AOL T
6b4855df8afc8d57a671fe5ed628f6d88852a922 Intel::CERT_HASH Mandiant APT1 Report AOL T
d50fdc82c328319ac60f256d3119b8708cd5717b Intel::CERT_HASH Mandiant APT1 Report AOL T
70b48d5177eebe9c762e9a37ecabebfd10e1b7e9 Intel::CERT_HASH Mandiant APT1 Report AOL T
3a6a299b764500ce1b6e58a32a257139d61a3543 Intel::CERT_HASH Mandiant APT1 Report AOL T
bf4f90e0029b2263af1141963ddf2a0c71a6b5fb Intel::CERT_HASH Mandiant APT1 Report AOL T
b21139583dec0dae344cca530690ec1f344acc79 Intel::CERT_HASH Mandiant APT1 Report AOL T
21971ffef58baf6f638df2f7e2cceb4c58b173c8 Intel::CERT_HASH Mandiant APT1 Report EMAIL T
04ecff66973c92a1c348666d5a4738557cce0cfc Intel::CERT_HASH Mandiant APT1 Report IBM T
f97d1a703aec44d0f53a3a294e33acda43a49de1 Intel::CERT_HASH Mandiant APT1 Report IBM T
c0d32301a7c96ecb0bc8e381ec19e6b4eaf5d2fe Intel::CERT_HASH Mandiant APT1 Report IBM T
1b27a897cda019da2c3a6dc838761871e8bf5b5d Intel::CERT_HASH Mandiant APT1 Report LAME T
d515996e8696612dc78fc6db39006466fc6550df Intel::CERT_HASH Mandiant APT1 Report MOON-NIGHT T
8f79315659e59c79f1301ef4aee67b18ae2d9f1c Intel::CERT_HASH Mandiant APT1 Report NONAME T
a57a84975e31e376e3512da7b05ad06ef6441f53 Intel::CERT_HASH Mandiant APT1 Report NS T
b3db37a0edde97b3c3c15da5f2d81d27af82f583 Intel::CERT_HASH Mandiant APT1 Report SERVER (PEM) T
6d8f1454f6392361fb2464b744d4fc09eee5fcfd Intel::CERT_HASH Mandiant APT1 Report SUR T
b66e230f404b2cc1c033ccacda5d0a14b74a2752 Intel::CERT_HASH Mandiant APT1 Report VIRTUALLYTHERE T
4acbadb86a91834493dde276736cdf8f7ef5d497 Intel::CERT_HASH Mandiant APT1 Report WEBMAIL T
86a48093d9b577955c4c9bd19e30536aae5543d4 Intel::CERT_HASH Mandiant APT1 Report YAHOO T

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
global sensorname = "{{ grains.host }}";
redef record Conn::Info += {
sensorname: string &log &optional;
};
event connection_state_remove(c: connection)
{
c$conn$sensorname = sensorname;
}

View File

@@ -0,0 +1 @@
@load ./extract

View File

@@ -0,0 +1,21 @@
global ext_map: table[string] of string = {
["application/x-dosexec"] = "exe",
["text/plain"] = "txt",
["image/jpeg"] = "jpg",
["image/png"] = "png",
["text/html"] = "html",
} &default ="";
event file_sniff(f: fa_file, meta: fa_metadata)
{
if ( ! meta?$mime_type || meta$mime_type != "application/x-dosexec" )
return;
local ext = "";
if ( meta?$mime_type )
ext = ext_map[meta$mime_type];
local fname = fmt("/nsm/bro/extracted/%s-%s.%s", f$source, f$id, ext);
Files::add_analyzer(f, Files::ANALYZER_EXTRACT, [$extract_filename=fname]);
}

View File

@@ -0,0 +1,3 @@
@load tuning/json-logs
redef LogAscii::json_timestamps = JSON::TS_ISO8601;
redef LogAscii::use_json = T;

View File

@@ -409,20 +409,24 @@ docker_install() {
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum -y update
yum -y install docker-ce python36-docker
if [ $INSTALLTYPE != 'EVALMODE' ]; then
if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
docker_registry
echo "Restarting Docker" >> $SETUPLOG 2>&1
systemctl restart docker
systemctl enable docker
else
docker_registry
echo "Restarting Docker" >> $SETUPLOG 2>&1
systemctl restart docker
systemctl enable docker
fi
echo "Restarting Docker" >> $SETUPLOG 2>&1
systemctl restart docker
systemctl enable docker
else
if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
apt-get update >> $SETUPLOG 2>&1
apt-get -y install docker-ce python3-docker >> $SETUPLOG 2>&1
if [ $INSTALLTYPE != 'EVALMODE' ]; then
docker_registry >> $SETUPLOG 2>&1
fi
docker_registry >> $SETUPLOG 2>&1
echo "Restarting Docker" >> $SETUPLOG 2>&1
systemctl restart docker >> $SETUPLOG 2>&1
else
@@ -450,6 +454,59 @@ docker_registry() {
}
docker_seed_registry() {
VERSION="HH1.1.4"
TRUSTED_CONTAINERS=( \
"so-auth-api:$VERSION" \
"so-auth-ui:$VERSION" \
"so-core:$VERSION" \
"so-thehive-cortex:$VERSION" \
"so-curator:$VERSION" \
"so-domainstats:$VERSION" \
"so-elastalert:$VERSION" \
"so-elasticsearch:$VERSION" \
"so-filebeat:$VERSION" \
"so-fleet:$VERSION" \
"so-fleet-launcher:$VERSION" \
"so-freqserver:$VERSION" \
"so-grafana:$VERSION" \
"so-idstools:$VERSION" \
"so-influxdb:$VERSION" \
"so-kibana:$VERSION" \
"so-logstash:$VERSION" \
"so-mysql:$VERSION" \
"so-navigator:$VERSION" \
"so-playbook:$VERSION" \
"so-redis:$VERSION" \
"so-sensoroni:$VERSION" \
"so-soctopus:$VERSION" \
"so-steno:$VERSION" \
#"so-strelka:$VERSION" \
"so-suricata:$VERSION" \
"so-telegraf:$VERSION" \
"so-thehive:$VERSION" \
"so-thehive-es:$VERSION" \
"so-wazuh:$VERSION" \
"so-zeek:$VERSION" )
for i in "${TRUSTED_CONTAINERS[@]}"
do
# Pull down the trusted docker image
echo "Downloading $i"
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination
docker tag soshybridhunter/$i $HOSTNAME:5000/soshybridhunter/$i
docker push $HOSTNAME:5000/soshybridhunter/$i
done
for i in "${TRUSTED_CONTAINERS[@]}"
do
echo "Removing $i locally"
docker rmi soshybridhunter/$i
done
}
es_heapsize() {
# Determine ES Heap Size
@@ -650,6 +707,7 @@ master_static() {
touch /opt/so/saltstack/pillar/static.sls
echo "static:" > /opt/so/saltstack/pillar/static.sls
echo " soversion: 1.1.4" >> /opt/so/saltstack/pillar/static.sls
echo " hnmaster: $HNMASTER" >> /opt/so/saltstack/pillar/static.sls
echo " ntpserver: $NTPSERVER" >> /opt/so/saltstack/pillar/static.sls
echo " proxy: $PROXY" >> /opt/so/saltstack/pillar/static.sls

View File

@@ -364,6 +364,10 @@ if (whiptail_you_sure) ; then
salt_checkin >> $SETUPLOG 2>&1
salt-call state.apply ca >> $SETUPLOG 2>&1
salt-call state.apply ssl >> $SETUPLOG 2>&1
salt-call state.apply firewall >> $SETUPLOG 2>&1
salt-call state.apply registry >> $SETUPLOG 2>&1
echo -e "XXX\n42\nDownloading Containers from the Internet... \nXXX"
docker_seed_registry >> $SETUPLOG 2>&1
echo -e "XXX\n43\nInstalling Common Components... \nXXX"
salt-call state.apply common >> $SETUPLOG 2>&1
echo -e "XXX\n45\nApplying firewall rules... \nXXX"
@@ -626,16 +630,18 @@ if (whiptail_you_sure) ; then
salt_checkin >> $SETUPLOG 2>&1
salt-call state.apply ca >> $SETUPLOG 2>&1
salt-call state.apply ssl >> $SETUPLOG 2>&1
salt-call state.apply firewall >> $SETUPLOG 2>&1
salt-call state.apply registry >> $SETUPLOG 2>&1
echo -e "XXX\n14\nDownloading Containers from the Internet... \nXXX"
docker_seed_registry >> $SETUPLOG 2>&1
salt-call state.apply master >> $SETUPLOG 2>&1
echo -e "XXX\n15\nInstalling core components... \nXXX"
salt-call state.apply common >> $SETUPLOG 2>&1
echo -e "XXX\n18\nInitializing firewall rules... \nXXX"
salt-call state.apply firewall >> $SETUPLOG 2>&1
echo -e "XXX\n25\nInstalling master components... \nXXX"
salt-call state.apply master >> $SETUPLOG 2>&1
if [ $INSTALLTYPE == 'EVALMODE' ]; then
salt-call state.apply idstools >> $SETUPLOG 2>&1
fi
salt-call state.apply idstools >> $SETUPLOG 2>&1
if [[ $OSQUERY == '1' ]]; then
salt-call state.apply mysql >> $SETUPLOG 2>&1
@@ -686,6 +692,7 @@ if (whiptail_you_sure) ; then
echo -e "XXX\n95\nSetting checkin to run on boot... \nXXX"
checkin_at_boot >> $SETUPLOG 2>&1
echo -e "XX\n97\nFinishing touches... \nXXX"
salt-call state.apply auth >> $SETUPLOG 2>&1
filter_unused_nics >> $SETUPLOG 2>&1
network_setup >> $SETUPLOG 2>&1
echo -e "XXX\n98\nVerifying Setup... \nXXX"

View File

@@ -672,8 +672,8 @@ whiptail_set_hostname() {
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
while [[ "$HOSTNAME" == 'localhost' ]] ; do
whiptail --title "Security Onion Setup" --msgbox "Please choose a hostname that isn't localhost." 8 75
while [[ "$HOSTNAME" == *'localhost'* ]] ; do
whiptail --title "Security Onion Setup" --msgbox "Please choose a hostname that doesn't contain localhost." 8 75
HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter the Hostname you would like to set." 10 75 $HOSTNAME 3>&1 1>&2 2>&3)
local exitstatus=$?

View File

@@ -11,4 +11,5 @@ chown -R socore:socore /opt/so/saltstack/salt
chmod 755 /opt/so/saltstack/pillar/firewall/addfirewall.sh
cd ~
rm -rf /tmp/sogh
# Run so-elastic-download here and call this soup with some magic
salt-call state.highstate