diff --git a/salt/common/tools/sbin/so-cortex-restart b/salt/common/tools/sbin/so-cortex-restart
index ef0e3e4fe..841ca1bb6 100755
--- a/salt/common/tools/sbin/so-cortex-restart
+++ b/salt/common/tools/sbin/so-cortex-restart
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
@@ -17,4 +17,5 @@
. /usr/sbin/so-common
-/usr/sbin/so-restart cortex $1
+/usr/sbin/so-stop cortex $1
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-cortex-start b/salt/common/tools/sbin/so-cortex-start
index a08969cab..92fe88bb5 100755
--- a/salt/common/tools/sbin/so-cortex-start
+++ b/salt/common/tools/sbin/so-cortex-start
@@ -17,4 +17,4 @@
. /usr/sbin/so-common
-/usr/sbin/so-start cortex $1
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-cortex-stop b/salt/common/tools/sbin/so-cortex-stop
index a13d1e2e3..727b2c7fa 100755
--- a/salt/common/tools/sbin/so-cortex-stop
+++ b/salt/common/tools/sbin/so-cortex-stop
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
diff --git a/salt/common/tools/sbin/so-thehive-es-restart b/salt/common/tools/sbin/so-thehive-es-restart
new file mode 100755
index 000000000..d58caecdc
--- /dev/null
+++ b/salt/common/tools/sbin/so-thehive-es-restart
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+/usr/sbin/so-stop thehive-es $1
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-thehive-es-start b/salt/common/tools/sbin/so-thehive-es-start
new file mode 100755
index 000000000..92fe88bb5
--- /dev/null
+++ b/salt/common/tools/sbin/so-thehive-es-start
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-thehive-es-stop b/salt/common/tools/sbin/so-thehive-es-stop
new file mode 100755
index 000000000..cf9cc2310
--- /dev/null
+++ b/salt/common/tools/sbin/so-thehive-es-stop
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+/usr/sbin/so-stop thehive-es $1
diff --git a/salt/common/tools/sbin/so-thehive-restart b/salt/common/tools/sbin/so-thehive-restart
index 08cd8318e..4b28c0030 100755
--- a/salt/common/tools/sbin/so-thehive-restart
+++ b/salt/common/tools/sbin/so-thehive-restart
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
diff --git a/salt/common/tools/sbin/so-thehive-stop b/salt/common/tools/sbin/so-thehive-stop
index b326f699c..6c56e0473 100755
--- a/salt/common/tools/sbin/so-thehive-stop
+++ b/salt/common/tools/sbin/so-thehive-stop
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
diff --git a/salt/elasticsearch/files/ingest/syslog b/salt/elasticsearch/files/ingest/syslog
new file mode 100644
index 000000000..d34e79d4a
--- /dev/null
+++ b/salt/elasticsearch/files/ingest/syslog
@@ -0,0 +1,13 @@
+{
+ "description" : "syslog",
+ "processors" : [
+ {
+ "dissect": {
+ "field": "message",
+ "pattern" : "%{message}",
+ "on_failure": [ { "drop" : { } } ]
+ }
+ },
+ { "pipeline": { "name": "common" } }
+ ]
+}
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index 1c4bee013..be04effb0 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -75,6 +75,19 @@ filebeat.modules:
filebeat.inputs:
#------------------------------ Log prospector --------------------------------
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
+ - type: syslog
+ enabled: true
+ protocol.udp:
+ host: "0.0.0.0:514"
+ fields:
+ module: syslog
+ dataset: syslog
+ pipeline: "syslog"
+ index: "so-syslog-%{+yyyy.MM.dd}"
+ processors:
+ - drop_fields:
+ fields: ["source", "prospector", "input", "offset", "beat"]
+
{%- if BROVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
- type: log
diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls
index 409594b2d..8540faeb6 100644
--- a/salt/filebeat/init.sls
+++ b/salt/filebeat/init.sls
@@ -57,12 +57,14 @@ so-filebeat:
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /nsm/zeek:/nsm/zeek:ro
- /nsm/strelka/log:/nsm/strelka/log:ro
- - /opt/so/log/suricata:/suricata:ro
+ - /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
+ - port_bindings:
+ - 0.0.0.0:514:514/udp
- watch:
- file: /opt/so/conf/filebeat/etc/filebeat.yml
diff --git a/salt/hive/thehive/scripts/hive_init b/salt/hive/thehive/scripts/hive_init
deleted file mode 100755
index b1ef62d68..000000000
--- a/salt/hive/thehive/scripts/hive_init
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
-{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
-{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
-{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
-
-hive_init(){
- sleep 120
- HIVE_IP="{{MASTERIP}}"
- HIVE_USER="{{HIVEUSER}}"
- HIVE_PASSWORD="{{HIVEPASSWORD}}"
- HIVE_KEY="{{HIVEKEY}}"
- SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
-
- echo -n "Waiting for TheHive..."
- COUNT=0
- HIVE_CONNECTED="no"
- while [[ "$COUNT" -le 240 ]]; do
- curl --output /dev/null --silent --head --fail -k "https://$HIVE_IP/thehive"
- if [ $? -eq 0 ]; then
- HIVE_CONNECTED="yes"
- echo "connected!"
- break
- else
- ((COUNT+=1))
- sleep 1
- echo -n "."
- fi
- done
-
- if [ "$HIVE_CONNECTED" == "yes" ]; then
-
- # Migrate DB
- curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate"
-
- # Create intial TheHive user
- curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
-
- # Pre-load custom fields
- #
- # reputation
- curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
-
-
- touch /opt/so/state/thehive.txt
- else
- echo "We experienced an issue connecting to TheHive!"
- fi
-}
-
-if [ -f /opt/so/state/thehive.txt ]; then
- exit 0
-else
- rm -f garbage_file
- while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
- do
- echo "Waiting for Elasticsearch..."
- rm -f garbage_file
- sleep 1
- done
- rm -f garbage_file
- sleep 5
- hive_init
-fi
diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls
index ba0e015f4..1118b6807 100644
--- a/salt/logstash/init.sls
+++ b/salt/logstash/init.sls
@@ -198,7 +198,7 @@ so-logstash:
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{%- if grains['role'] == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro
- - /opt/so/log/suricata:/suricata:ro
+ - /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /opt/so/log/fleet/:/osquery/logs:ro
diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json
index 76770e2bd..e98ee7bf7 100644
--- a/salt/soc/files/soc/soc.json
+++ b/salt/soc/files/soc/soc.json
@@ -99,7 +99,7 @@
{ "name": "Connections", "description": "Connections grouped by destination country", "query": "event.module:zeek AND event.dataset:conn | groupby destination.geo.country_name"},
{ "name": "Connections", "description": "Connections grouped by source country", "query": "event.module:zeek AND event.dataset:conn | groupby source.geo.country_name"},
{ "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.module:zeek AND event.dataset:dce_rpc | groupby dce_rpc.operation"},
- { "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain dhcp.requested_address"},
+ { "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain"},
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.module:zeek AND event.dataset:dhcp | groupby dhcp.message_types"},
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.module:zeek AND event.dataset:dnp3 | groupby dnp3.fc_reply"},
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.name destination.port"},
@@ -122,8 +122,7 @@
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.module:zeek AND event.dataset:kerberos | groupby kerberos.service"},
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.module:zeek AND event.dataset:modbus | groupby modbus.function"},
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.module:zeek AND event.dataset:mysql | groupby mysql.command"},
- { "name": "NOTICE", "description": "Zeek notice logs grouped by note", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note"},
- { "name": "NOTICE", "description": "Zeek notice logs grouped by message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.message"},
+ { "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note notice.message"},
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.module:zeek AND event.dataset:ntlm | groupby ntlm.server.dns.name"},
{ "name": "PE", "description": "PE files list", "query": "event.module:zeek AND event.dataset:pe | groupby file.machine file.os file.subsystem"},
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.module:zeek AND event.dataset:radius | groupby user.name.keyword"},
diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls
index a9842924d..5767531f4 100644
--- a/salt/strelka/init.sls
+++ b/salt/strelka/init.sls
@@ -112,5 +112,5 @@ strelka_filestream:
strelka_zeek_extracted_sync:
cron.present:
- user: root
- - name: [ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1
+ - name: '[ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1'
- minute: '*'
diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml
index 8487ec032..d896167be 100644
--- a/salt/suricata/files/suricata.yaml
+++ b/salt/suricata/files/suricata.yaml
@@ -96,6 +96,8 @@ outputs:
enabled: yes
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
filename: eve.json
+ rotate-interval: day
+
#prefix: "@cee: " # prefix to prepend to each log entry
# the following are valid when type: syslog above
#identity: "suricata"
@@ -1042,7 +1044,7 @@ host-mode: auto
# Number of packets preallocated per thread. The default is 1024. A higher number
# will make sure each CPU will be more easily kept busy, but may negatively
# impact caching.
-#max-pending-packets: 1024
+max-pending-packets: 5000
# Runmode the engine should use. Please check --list-runmodes to get the available
# runmodes for each packet acquisition method. Default depends on selected capture
diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls
index 39f419ad0..0f3d49bc3 100644
--- a/salt/suricata/init.sls
+++ b/salt/suricata/init.sls
@@ -55,6 +55,12 @@ surilogdir:
- user: 940
- group: 939
+suridatadir:
+ file.directory:
+ - name: /nsm/suricata
+ - user: 940
+ - group: 939
+
surirulesync:
file.recurse:
- name: /opt/so/conf/suricata/rules/
@@ -119,6 +125,7 @@ so-suricata:
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
- /opt/so/log/suricata/:/var/log/suricata/:rw
+ - /nsm/suricata/:/nsm/:rw
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
- network_mode: host
- watch:
diff --git a/salt/hive/thehive/etc/application.conf b/salt/thehive/etc/application.conf
similarity index 99%
rename from salt/hive/thehive/etc/application.conf
rename to salt/thehive/etc/application.conf
index 230d87d67..8630cb386 100644
--- a/salt/hive/thehive/etc/application.conf
+++ b/salt/thehive/etc/application.conf
@@ -12,7 +12,7 @@ search {
# Name of the index
index = the_hive
# Name of the Elasticsearch cluster
- cluster = hive
+ cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
#search.uri = "http://{{ MASTERIP }}:9500"
diff --git a/salt/hive/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf
similarity index 99%
rename from salt/hive/thehive/etc/cortex-application.conf
rename to salt/thehive/etc/cortex-application.conf
index 356bfd7b3..1a887cdb3 100644
--- a/salt/hive/thehive/etc/cortex-application.conf
+++ b/salt/thehive/etc/cortex-application.conf
@@ -12,7 +12,7 @@ search {
# Name of the index
index = cortex
# Name of the Elasticsearch cluster
- cluster = hive
+ cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
# Scroll keepalive
diff --git a/salt/hive/thehive/etc/es/elasticsearch.yml b/salt/thehive/etc/es/elasticsearch.yml
similarity index 95%
rename from salt/hive/thehive/etc/es/elasticsearch.yml
rename to salt/thehive/etc/es/elasticsearch.yml
index d00c01d5d..7f268a671 100644
--- a/salt/hive/thehive/etc/es/elasticsearch.yml
+++ b/salt/thehive/etc/es/elasticsearch.yml
@@ -1,4 +1,4 @@
-cluster.name: "hive"
+cluster.name: "thehive"
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.
diff --git a/salt/hive/thehive/etc/es/log4j2.properties b/salt/thehive/etc/es/log4j2.properties
similarity index 100%
rename from salt/hive/thehive/etc/es/log4j2.properties
rename to salt/thehive/etc/es/log4j2.properties
diff --git a/salt/hive/init.sls b/salt/thehive/init.sls
similarity index 70%
rename from salt/hive/init.sls
rename to salt/thehive/init.sls
index 2be2f7480..732fe4a77 100644
--- a/salt/hive/init.sls
+++ b/salt/thehive/init.sls
@@ -1,24 +1,24 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
-hiveconfdir:
+thehiveconfdir:
file.directory:
- - name: /opt/so/conf/hive/etc
+ - name: /opt/so/conf/thehive/etc
- makedirs: True
- user: 939
- group: 939
-hivelogdir:
+thehivelogdir:
file.directory:
- - name: /opt/so/log/hive
+ - name: /opt/so/log/thehive
- makedirs: True
- user: 939
- group: 939
-hiveconf:
+thehiveconf:
file.recurse:
- - name: /opt/so/conf/hive/etc
- - source: salt://hive/thehive/etc
+ - name: /opt/so/conf/thehive/etc
+ - source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -40,7 +40,7 @@ cortexlogdir:
cortexconf:
file.recurse:
- name: /opt/so/conf/cortex
- - source: salt://hive/thehive/etc
+ - source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -48,9 +48,9 @@ cortexconf:
# Install Elasticsearch
# Made directory for ES data to live in
-hiveesdata:
+thehiveesdata:
file.directory:
- - name: /nsm/hive/esdata
+ - name: /nsm/thehive/esdata
- makedirs: True
- user: 939
- group: 939
@@ -64,16 +64,16 @@ so-thehive-es:
- interactive: True
- tty: True
- binds:
- - /nsm/hive/esdata:/usr/share/elasticsearch/data:rw
- - /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- - /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- - /opt/so/log/hive:/var/log/elasticsearch:rw
+ - /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw
+ - /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
+ - /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
+ - /opt/so/log/thehive:/var/log/elasticsearch:rw
- environment:
- http.host=0.0.0.0
- http.port=9400
- transport.tcp.port=9500
- transport.host=0.0.0.0
- - cluster.name=hive
+ - cluster.name=thehive
- thread_pool.index.queue_size=100000
- thread_pool.search.queue_size=100000
- thread_pool.bulk.queue_size=100000
@@ -90,13 +90,13 @@ so-cortex:
- name: so-cortex
- user: 939
- binds:
- - /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
+ - /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9001:9001
cortexscript:
cmd.script:
- - source: salt://hive/thehive/scripts/cortex_init
+ - source: salt://thehive/scripts/cortex_init
- cwd: /opt/so
- template: jinja
@@ -109,12 +109,12 @@ so-thehive:
- name: so-thehive
- user: 939
- binds:
- - /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro
+ - /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9000:9000
-hivescript:
+thehivescript:
cmd.script:
- - source: salt://hive/thehive/scripts/hive_init
+ - source: salt://thehive/scripts/hive_init
- cwd: /opt/so
- template: jinja
diff --git a/salt/hive/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init
similarity index 100%
rename from salt/hive/thehive/scripts/cortex_init
rename to salt/thehive/scripts/cortex_init
diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init
new file mode 100755
index 000000000..296004e77
--- /dev/null
+++ b/salt/thehive/scripts/hive_init
@@ -0,0 +1,64 @@
+#!/bin/bash
+{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
+{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
+{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
+
+thehive_init(){
+ sleep 120
+ THEHIVE_IP="{{MASTERIP}}"
+ THEHIVE_USER="{{THEHIVEUSER}}"
+ THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
+ THEHIVE_KEY="{{THEHIVEKEY}}"
+ SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
+
+ echo -n "Waiting for TheHive..."
+ COUNT=0
+ THEHIVE_CONNECTED="no"
+ while [[ "$COUNT" -le 240 ]]; do
+ curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive"
+ if [ $? -eq 0 ]; then
+ THEHIVE_CONNECTED="yes"
+ echo "connected!"
+ break
+ else
+ ((COUNT+=1))
+ sleep 1
+ echo -n "."
+ fi
+ done
+
+ if [ "$THEHIVE_CONNECTED" == "yes" ]; then
+
+ # Migrate DB
+ curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
+
+ # Create intial TheHive user
+ curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
+
+ # Pre-load custom fields
+ #
+ # reputation
+ curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
+
+
+ touch /opt/so/state/thehive.txt
+ else
+ echo "We experienced an issue connecting to TheHive!"
+ fi
+}
+
+if [ -f /opt/so/state/thehive.txt ]; then
+ exit 0
+else
+ rm -f garbage_file
+ while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
+ do
+ echo "Waiting for Elasticsearch..."
+ rm -f garbage_file
+ sleep 1
+ done
+ rm -f garbage_file
+ sleep 5
+ thehive_init
+fi
diff --git a/salt/top.sls b/salt/top.sls
index 95acae1fd..4d60b01c0 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -100,7 +100,7 @@ base:
- schedule
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -149,7 +149,7 @@ base:
{%- endif %}
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -203,7 +203,7 @@ base:
- schedule
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -318,7 +318,7 @@ base:
{%- endif %}
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults
index b5a6258ff..74ba8323f 100644
--- a/setup/automation/pm_standalone_defaults
+++ b/setup/automation/pm_standalone_defaults
@@ -21,6 +21,8 @@ address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
+ALLOW_CIDR=0.0.0.0/0
+ALLOW_ROLE=a
BASICBRO=7
BASICSURI=7
# BLOGS=
diff --git a/setup/so-common-functions b/setup/so-common-functions
index 15cb3e686..fc380f85b 100644
--- a/setup/so-common-functions
+++ b/setup/so-common-functions
@@ -38,31 +38,3 @@ calculate_useable_cores() {
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
export lb_procs
}
-
-set_defaul_log_size() {
- local percentage
-
- case $INSTALLTYPE in
- EVAL | HEAVYNODE)
- percentage=50
- ;;
- *)
- percentage=80
- ;;
- esac
-
- local disk_dir="/"
- if [ -d /nsm ]; then
- disk_dir="/nsm"
- fi
- local disk_size_1k
- disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
-
- local ratio="1048576"
-
- local disk_size_gb
- disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
-
- log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
- export log_size_limit
-}
diff --git a/setup/so-functions b/setup/so-functions
index bf6db26be..6707e6841 100755
--- a/setup/so-functions
+++ b/setup/so-functions
@@ -954,41 +954,6 @@ node_pillar() {
cat "$pillar_file" >> "$setup_log" 2>&1
}
-parse_options() {
- case "$1" in
- --turbo=*)
- if [[ $is_master || $is_helix ]]; then
- local proxy
- proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}')
- proxy_addr="http://$proxy"
- use_proxy "$proxy_addr"
- TURBO="$proxy_addr"
- else
- echo "turbo is not supported on this install type" >> $setup_log 2>&1
- fi
- ;;
- --proxy=*)
- echo "Unimplimented"
- return
-
- if [[ $2 != --proxy-user=* ]] || [[ $3 != --proxy-pass=* ]]; then
- echo "Invalid options passed for proxy. Order is --proxy-user= --proxy-pass="
- else
- local proxy
- local proxy_user
- local proxy_password
- proxy=$(echo "$1" | tr -d '"' | awk -F'--proxy=' '{print $2}')
- proxy_user=$(echo "$2" | tr -d '"' | awk -F'--proxy-user=' '{print $2}')
- proxy_password=$(echo "$3" | tr -d '"' | awk -F'--proxy-pass=' '{print $2}')
-
- use_proxy "$proxy" "$proxy_user" "$proxy_password"
- fi
- ;;
- *)
- echo "Invalid option"
- esac
-}
-
patch_pillar() {
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
@@ -1268,8 +1233,6 @@ set_progress_str() {
'----'\
"$percentage% - ${progress_bar_text^^}"\
"----" >> "$setup_log" 2>&1
-
- sleep 5
}
sensor_pillar() {
@@ -1320,6 +1283,33 @@ sensor_pillar() {
cat "$pillar_file" >> "$setup_log" 2>&1
}
+set_default_log_size() {
+ local percentage
+
+ case $INSTALLTYPE in
+ EVAL | HEAVYNODE)
+ percentage=50
+ ;;
+ *)
+ percentage=80
+ ;;
+ esac
+
+ local disk_dir="/"
+ if [ -d /nsm ]; then
+ disk_dir="/nsm"
+ fi
+ local disk_size_1k
+ disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
+
+ local ratio="1048576"
+
+ local disk_size_gb
+ disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
+
+ log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
+}
+
set_hostname() {
set_hostname_iso
@@ -1484,18 +1474,19 @@ update_packages() {
fi
}
-use_proxy() {
- local proxy_addr=$1
- #TODO: add options for username + pass
+use_turbo_proxy() {
+ if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
+ echo "turbo is not supported on this install type" >> $setup_log 2>&1
+ return
+ fi
if [[ $OS == 'centos' ]]; then
- printf '%s\n'\
- "proxy=${proxy_addr}:3142" >> /etc/yum.conf
+ printf '%s\n' "proxy=${TURBO}:3142" >> /etc/yum.conf
else
printf '%s\n'\
"Acquire {"\
- " HTTP::proxy \"${proxy_addr}:3142\";"\
- " HTTPS::proxy \"${proxy_addr}:3142\";"\
+ " HTTP::proxy \"${TURBO}:3142\";"\
+ " HTTPS::proxy \"${TURBO}:3142\";"\
"}" > /etc/apt/apt.conf.d/proxy.conf
fi
}
diff --git a/setup/so-setup b/setup/so-setup
index 69c5763f9..0d0022feb 100755
--- a/setup/so-setup
+++ b/setup/so-setup
@@ -21,23 +21,72 @@ source ./so-common-functions
source ./so-whiptail
source ./so-variables
+# Parse command line arguments
setup_type=$1
-export setup_type
-
automation=$2
+
+while [[ $# -gt 0 ]]; do
+ arg="$1"
+ shift
+ case "$arg" in
+ "--turbo="* )
+ export TURBO="http://${arg#*=}";;
+ "--proxy="* )
+ export {http,https,ftp,rsync,all}_proxy="${arg#*=}";;
+ "--allow-role="* )
+ export ALLOW_ROLE="${arg#*=}";;
+ "--allow-cidr="* )
+ export ALLOW_CIDR="${arg#*=}";;
+ * )
+ if [[ "$arg" == "--"* ]]; then
+ echo "Invalid option"
+ fi
+ esac
+done
+
+# Begin Installation pre-processing
+echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1
+
+automated=no
+function progress() {
+ if [ $automated == no ]; then
+ whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0
+ else
+ cat >> $setup_log 2>&1
+ fi
+}
+
if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then
- echo "Preselecting variable values based on automated setup: $automation"
- exit 1
+ echo "Preselecting variable values based on automated setup: $automation" >> $setup_log 2>&1
source automation/$automation
- sleep 30 # Re-implement with network availability probe
+ automated=yes
+
+ echo "Checking network configuration" >> $setup_log 2>&1
+ ip a >> $setup_log 2>&1
+
+ attempt=1
+ attempts=60
+ ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1
+ while [ $? -ne 0 ]; do
+ ip a >> $setup_log 2>&1
+ if [ $attempt -gt $attempts ]; then
+ echo "Network unavailable - setup cannot continue" >> $setup_log 2>&1
+ exit 1
+ fi
+ echo "Waiting for network to come up (attempt $attempt of $attempts)" >> $setup_log 2>&1
+ attempt=$((attempt + 1))
+ sleep 10;
+ ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1
+ done
+ echo "Network is up on $MNIC" >> $setup_log 2>&1
fi
case "$setup_type" in
iso | network) # Accepted values
- echo "Beginning Security Onion $setup_type install"
+ echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1
;;
*)
- echo "Invalid install type, must be 'iso' or 'network'"
+ echo "Invalid install type, must be 'iso' or 'network'" | tee $setup_log
exit 1
;;
esac
@@ -45,9 +94,8 @@ esac
# Allow execution of SO tools during setup
export PATH=$PATH:../salt/common/tools/sbin
-date -u > $setup_log 2>&1
-
got_root
+
detect_os
if [ "$OS" == ubuntu ]; then
@@ -59,7 +107,7 @@ setterm -blank 0
if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then
true
else
- echo "User cancelled setup." >> $setup_log 2>&1
+ echo "User cancelled setup." | tee $setup_log
whiptail_cancel
fi
@@ -142,17 +190,21 @@ echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1
minion_type=$(get_minion_type)
-# Set any constants needed
+# Set any variables needed
+set_default_log_size >> $setup_log 2>&1
+
if [[ $is_helix ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
HNSENSOR=inherit
MASTERUPDATES=0
fi
+
if [[ $is_helix || ( $is_master && $is_node ) ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
fi
+
if [[ $is_master && $is_node ]]; then
LSPIPELINEWORKERS=1
LSPIPELINEBATCH=125
@@ -161,6 +213,7 @@ if [[ $is_master && $is_node ]]; then
NIDS=Suricata
BROVERSION=ZEEK
fi
+
if [[ $is_node ]]; then
CURCLOSEDAYS=30
fi
@@ -203,6 +256,9 @@ fi
if [[ $is_distmaster || ( $is_sensor || $is_node ) && ! $is_eval ]]; then
whiptail_master_updates
+ if [[ $setup_type == 'network' && $MASTERUPDATES == 1 ]]; then
+ whiptail_master_updates_warning
+ fi
fi
if [[ $is_minion ]]; then
@@ -249,9 +305,8 @@ fi
whiptail_make_changes
-if [[ $# -gt 1 ]]; then
- set -- "${@:2}"
- parse_options "$@" >> $setup_log 2>&1
+if [[ -n "$TURBO" ]]; then
+ use_turbo_proxy
fi
if [[ "$setup_type" == 'iso' ]]; then
@@ -498,11 +553,15 @@ fi
set_progress_str 95 'Verifying setup'
salt-call -l info state.highstate >> $setup_log 2>&1
-} | whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+} | progress
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
if [[ "$success" = 0 ]]; then
whiptail_setup_complete
+ if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then
+ export IP=$ALLOW_CIDR
+ so-allow -$ALLOW_ROLE >> $setup_log 2>&1
+ fi
if [[ $THEHIVE == 1 ]]; then
check_hive_init_then_reboot
else
diff --git a/setup/so-whiptail b/setup/so-whiptail
index 72455fc9e..9ba4ebc20 100755
--- a/setup/so-whiptail
+++ b/setup/so-whiptail
@@ -413,7 +413,6 @@ whiptail_log_size_limit() {
[ -n "$TESTING" ] && return
- set_defaul_log_size
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
@@ -1027,7 +1026,17 @@ whiptail_master_updates() {
;;
esac
+}
+whiptail_master_updates_warning() {
+ [ -n "$TESTING" ] && return
+
+ whiptail --title "Security Onion Setup"\
+ --msgbox "Updating through the master node requires the master to have internet access, press ENTER to continue"\
+ 8 75
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
}
whiptail_node_updates() {