merge with dev and fix conflicts

This commit is contained in:
m0duspwnens
2020-05-27 13:54:08 -04:00
23 changed files with 171 additions and 99 deletions

View File

@@ -1,5 +1,5 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
@@ -17,4 +17,5 @@
. /usr/sbin/so-common
/usr/sbin/so-restart cortex $1
/usr/sbin/so-stop cortex $1
/usr/sbin/so-start thehive $1

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common
/usr/sbin/so-start cortex $1
/usr/sbin/so-start thehive $1

View File

@@ -1,5 +1,5 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify

View File

@@ -0,0 +1,21 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
/usr/sbin/so-stop thehive-es $1
/usr/sbin/so-start thehive $1

View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
/usr/sbin/so-start thehive $1

View File

@@ -0,0 +1,20 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
/usr/sbin/so-stop thehive-es $1

View File

@@ -1,5 +1,5 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify

View File

@@ -1,5 +1,5 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify

View File

@@ -57,7 +57,7 @@ so-filebeat:
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /nsm/zeek:/nsm/zeek:ro
- /nsm/strelka/log:/nsm/strelka/log:ro
- /opt/so/log/suricata:/suricata:ro
- /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro

View File

@@ -1,66 +0,0 @@
#!/bin/bash
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
default_salt_dir=/opt/so/saltstack/default
hive_init(){
sleep 120
HIVE_IP="{{MASTERIP}}"
HIVE_USER="{{HIVEUSER}}"
HIVE_PASSWORD="{{HIVEPASSWORD}}"
HIVE_KEY="{{HIVEKEY}}"
SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf"
echo -n "Waiting for TheHive..."
COUNT=0
HIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://$HIVE_IP/thehive"
if [ $? -eq 0 ]; then
HIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$HIVE_CONNECTED" == "yes" ]; then
# Migrate DB
curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate"
# Create intial TheHive user
curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
# Pre-load custom fields
#
# reputation
curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
touch /opt/so/state/thehive.txt
else
echo "We experienced an issue connecting to TheHive!"
fi
}
if [ -f /opt/so/state/thehive.txt ]; then
exit 0
else
rm -f garbage_file
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
sleep 1
done
rm -f garbage_file
sleep 5
hive_init
fi

View File

@@ -198,7 +198,7 @@ so-logstash:
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{%- if grains['role'] == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro
- /opt/so/log/suricata:/suricata:ro
- /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /opt/so/log/fleet/:/osquery/logs:ro

View File

@@ -99,7 +99,7 @@ outputs:
- eve-log:
enabled: yes
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
filename: eve.json
filename: /nsm/eve.json
rotate-interval: day
community-id: true
community-id-seed: 0
@@ -918,7 +918,7 @@ host-mode: auto
# If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules
# apply. In that case try something like 60000 or more. This is because the CUDA
# pattern matcher buffers and scans as many packets as possible in parallel.
#max-pending-packets: 1024
max-pending-packets: 5000
# Runmode the engine should use. Please check --list-runmodes to get the available
# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned

View File

@@ -60,6 +60,12 @@ ruleslink:
- name: /opt/so/saltstack/local/salt/suricata/rules
- target: /opt/so/rules/nids
suridatadir:
file.directory:
- name: /nsm/suricata
- user: 940
- group: 939
surirulesync:
file.recurse:
- name: /opt/so/conf/suricata/rules/
@@ -124,6 +130,7 @@ so-suricata:
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
- /opt/so/log/suricata/:/var/log/suricata/:rw
- /nsm/suricata/:/nsm/:rw
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
- network_mode: host
- watch:

View File

@@ -12,7 +12,7 @@ search {
# Name of the index
index = the_hive
# Name of the Elasticsearch cluster
cluster = hive
cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
#search.uri = "http://{{ MASTERIP }}:9500"

View File

@@ -12,7 +12,7 @@ search {
# Name of the index
index = cortex
# Name of the Elasticsearch cluster
cluster = hive
cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
# Scroll keepalive

View File

@@ -1,4 +1,4 @@
cluster.name: "hive"
cluster.name: "thehive"
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.

View File

@@ -1,24 +1,24 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
hiveconfdir:
thehiveconfdir:
file.directory:
- name: /opt/so/conf/hive/etc
- name: /opt/so/conf/thehive/etc
- makedirs: True
- user: 939
- group: 939
hivelogdir:
thehivelogdir:
file.directory:
- name: /opt/so/log/hive
- name: /opt/so/log/thehive
- makedirs: True
- user: 939
- group: 939
hiveconf:
thehiveconf:
file.recurse:
- name: /opt/so/conf/hive/etc
- source: salt://hive/thehive/etc
- name: /opt/so/conf/thehive/etc
- source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -40,7 +40,7 @@ cortexlogdir:
cortexconf:
file.recurse:
- name: /opt/so/conf/cortex
- source: salt://hive/thehive/etc
- source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -48,9 +48,9 @@ cortexconf:
# Install Elasticsearch
# Made directory for ES data to live in
hiveesdata:
thehiveesdata:
file.directory:
- name: /nsm/hive/esdata
- name: /nsm/thehive/esdata
- makedirs: True
- user: 939
- group: 939
@@ -64,16 +64,16 @@ so-thehive-es:
- interactive: True
- tty: True
- binds:
- /nsm/hive/esdata:/usr/share/elasticsearch/data:rw
- /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /opt/so/log/hive:/var/log/elasticsearch:rw
- /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw
- /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /opt/so/log/thehive:/var/log/elasticsearch:rw
- environment:
- http.host=0.0.0.0
- http.port=9400
- transport.tcp.port=9500
- transport.host=0.0.0.0
- cluster.name=hive
- cluster.name=thehive
- thread_pool.index.queue_size=100000
- thread_pool.search.queue_size=100000
- thread_pool.bulk.queue_size=100000
@@ -90,13 +90,13 @@ so-cortex:
- name: so-cortex
- user: 939
- binds:
- /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
- /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9001:9001
cortexscript:
cmd.script:
- source: salt://hive/thehive/scripts/cortex_init
- source: salt://thehive/scripts/cortex_init
- cwd: /opt/so
- template: jinja
@@ -109,12 +109,12 @@ so-thehive:
- name: so-thehive
- user: 939
- binds:
- /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro
- /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9000:9000
hivescript:
thehivescript:
cmd.script:
- source: salt://hive/thehive/scripts/hive_init
- source: salt://thehive/scripts/hive_init
- cwd: /opt/so
- template: jinja

64
salt/thehive/scripts/hive_init Executable file
View File

@@ -0,0 +1,64 @@
#!/bin/bash
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
thehive_init(){
sleep 120
THEHIVE_IP="{{MASTERIP}}"
THEHIVE_USER="{{THEHIVEUSER}}"
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
THEHIVE_KEY="{{THEHIVEKEY}}"
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
echo -n "Waiting for TheHive..."
COUNT=0
THEHIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
# Migrate DB
curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
# Create intial TheHive user
curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
# Pre-load custom fields
#
# reputation
curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
touch /opt/so/state/thehive.txt
else
echo "We experienced an issue connecting to TheHive!"
fi
}
if [ -f /opt/so/state/thehive.txt ]; then
exit 0
else
rm -f garbage_file
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
sleep 1
done
rm -f garbage_file
sleep 5
thehive_init
fi

View File

@@ -34,6 +34,7 @@ HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
IP=192.168.0.0/16
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=

View File

@@ -1012,6 +1012,9 @@ parse_options() {
export {https,ftp,rsync,all}_proxy="$http_proxy"
;;
"--allow-analyst"|"--allow=a")
allow='a'
;;
*)
if [[ $1 = --* ]]; then
echo "Invalid option"

View File

@@ -541,6 +541,7 @@ fi
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
if [[ "$success" = 0 ]]; then
whiptail_setup_complete
if [[ -n $allow ]]; then so-allow -$allow; fi
if [[ $THEHIVE == 1 ]]; then
check_hive_init_then_reboot
else