mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge branch 'dev' into feature/suri5
This commit is contained in:
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
@@ -17,4 +17,5 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
/usr/sbin/so-restart cortex $1
|
/usr/sbin/so-stop cortex $1
|
||||||
|
/usr/sbin/so-start thehive $1
|
||||||
|
|||||||
@@ -17,4 +17,4 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
/usr/sbin/so-start cortex $1
|
/usr/sbin/so-start thehive $1
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
21
salt/common/tools/sbin/so-thehive-es-restart
Executable file
21
salt/common/tools/sbin/so-thehive-es-restart
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
/usr/sbin/so-stop thehive-es $1
|
||||||
|
/usr/sbin/so-start thehive $1
|
||||||
20
salt/common/tools/sbin/so-thehive-es-start
Executable file
20
salt/common/tools/sbin/so-thehive-es-start
Executable file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
/usr/sbin/so-start thehive $1
|
||||||
20
salt/common/tools/sbin/so-thehive-es-stop
Executable file
20
salt/common/tools/sbin/so-thehive-es-stop
Executable file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
/usr/sbin/so-stop thehive-es $1
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
#
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
|||||||
13
salt/elasticsearch/files/ingest/syslog
Normal file
13
salt/elasticsearch/files/ingest/syslog
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"description" : "syslog",
|
||||||
|
"processors" : [
|
||||||
|
{
|
||||||
|
"dissect": {
|
||||||
|
"field": "message",
|
||||||
|
"pattern" : "%{message}",
|
||||||
|
"on_failure": [ { "drop" : { } } ]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ "pipeline": { "name": "common" } }
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -75,6 +75,19 @@ filebeat.modules:
|
|||||||
filebeat.inputs:
|
filebeat.inputs:
|
||||||
#------------------------------ Log prospector --------------------------------
|
#------------------------------ Log prospector --------------------------------
|
||||||
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
|
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
|
||||||
|
- type: syslog
|
||||||
|
enabled: true
|
||||||
|
protocol.udp:
|
||||||
|
host: "0.0.0.0:514"
|
||||||
|
fields:
|
||||||
|
module: syslog
|
||||||
|
dataset: syslog
|
||||||
|
pipeline: "syslog"
|
||||||
|
index: "so-syslog-%{+yyyy.MM.dd}"
|
||||||
|
processors:
|
||||||
|
- drop_fields:
|
||||||
|
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||||
|
|
||||||
{%- if BROVER != 'SURICATA' %}
|
{%- if BROVER != 'SURICATA' %}
|
||||||
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
|
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
|
||||||
- type: log
|
- type: log
|
||||||
|
|||||||
@@ -57,12 +57,14 @@ so-filebeat:
|
|||||||
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
|
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
|
||||||
- /nsm/zeek:/nsm/zeek:ro
|
- /nsm/zeek:/nsm/zeek:ro
|
||||||
- /nsm/strelka/log:/nsm/strelka/log:ro
|
- /nsm/strelka/log:/nsm/strelka/log:ro
|
||||||
- /opt/so/log/suricata:/suricata:ro
|
- /nsm/suricata:/suricata:ro
|
||||||
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
||||||
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
||||||
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
|
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
|
||||||
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
|
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
|
||||||
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
|
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
|
||||||
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
|
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
|
||||||
|
- port_bindings:
|
||||||
|
- 0.0.0.0:514:514/udp
|
||||||
- watch:
|
- watch:
|
||||||
- file: /opt/so/conf/filebeat/etc/filebeat.yml
|
- file: /opt/so/conf/filebeat/etc/filebeat.yml
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
|
||||||
{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
|
|
||||||
{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
|
|
||||||
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
|
|
||||||
|
|
||||||
hive_init(){
|
|
||||||
sleep 120
|
|
||||||
HIVE_IP="{{MASTERIP}}"
|
|
||||||
HIVE_USER="{{HIVEUSER}}"
|
|
||||||
HIVE_PASSWORD="{{HIVEPASSWORD}}"
|
|
||||||
HIVE_KEY="{{HIVEKEY}}"
|
|
||||||
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
|
|
||||||
|
|
||||||
echo -n "Waiting for TheHive..."
|
|
||||||
COUNT=0
|
|
||||||
HIVE_CONNECTED="no"
|
|
||||||
while [[ "$COUNT" -le 240 ]]; do
|
|
||||||
curl --output /dev/null --silent --head --fail -k "https://$HIVE_IP/thehive"
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
HIVE_CONNECTED="yes"
|
|
||||||
echo "connected!"
|
|
||||||
break
|
|
||||||
else
|
|
||||||
((COUNT+=1))
|
|
||||||
sleep 1
|
|
||||||
echo -n "."
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$HIVE_CONNECTED" == "yes" ]; then
|
|
||||||
|
|
||||||
# Migrate DB
|
|
||||||
curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate"
|
|
||||||
|
|
||||||
# Create intial TheHive user
|
|
||||||
curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
|
|
||||||
|
|
||||||
# Pre-load custom fields
|
|
||||||
#
|
|
||||||
# reputation
|
|
||||||
curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
|
|
||||||
|
|
||||||
|
|
||||||
touch /opt/so/state/thehive.txt
|
|
||||||
else
|
|
||||||
echo "We experienced an issue connecting to TheHive!"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ -f /opt/so/state/thehive.txt ]; then
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
rm -f garbage_file
|
|
||||||
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
|
|
||||||
do
|
|
||||||
echo "Waiting for Elasticsearch..."
|
|
||||||
rm -f garbage_file
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
rm -f garbage_file
|
|
||||||
sleep 5
|
|
||||||
hive_init
|
|
||||||
fi
|
|
||||||
@@ -198,7 +198,7 @@ so-logstash:
|
|||||||
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
||||||
{%- if grains['role'] == 'so-eval' %}
|
{%- if grains['role'] == 'so-eval' %}
|
||||||
- /nsm/zeek:/nsm/zeek:ro
|
- /nsm/zeek:/nsm/zeek:ro
|
||||||
- /opt/so/log/suricata:/suricata:ro
|
- /nsm/suricata:/suricata:ro
|
||||||
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
||||||
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
||||||
- /opt/so/log/fleet/:/osquery/logs:ro
|
- /opt/so/log/fleet/:/osquery/logs:ro
|
||||||
|
|||||||
@@ -99,7 +99,7 @@
|
|||||||
{ "name": "Connections", "description": "Connections grouped by destination country", "query": "event.module:zeek AND event.dataset:conn | groupby destination.geo.country_name"},
|
{ "name": "Connections", "description": "Connections grouped by destination country", "query": "event.module:zeek AND event.dataset:conn | groupby destination.geo.country_name"},
|
||||||
{ "name": "Connections", "description": "Connections grouped by source country", "query": "event.module:zeek AND event.dataset:conn | groupby source.geo.country_name"},
|
{ "name": "Connections", "description": "Connections grouped by source country", "query": "event.module:zeek AND event.dataset:conn | groupby source.geo.country_name"},
|
||||||
{ "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.module:zeek AND event.dataset:dce_rpc | groupby dce_rpc.operation"},
|
{ "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.module:zeek AND event.dataset:dce_rpc | groupby dce_rpc.operation"},
|
||||||
{ "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain dhcp.requested_address"},
|
{ "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain"},
|
||||||
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.module:zeek AND event.dataset:dhcp | groupby dhcp.message_types"},
|
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.module:zeek AND event.dataset:dhcp | groupby dhcp.message_types"},
|
||||||
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.module:zeek AND event.dataset:dnp3 | groupby dnp3.fc_reply"},
|
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.module:zeek AND event.dataset:dnp3 | groupby dnp3.fc_reply"},
|
||||||
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.name destination.port"},
|
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.name destination.port"},
|
||||||
@@ -122,8 +122,7 @@
|
|||||||
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.module:zeek AND event.dataset:kerberos | groupby kerberos.service"},
|
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.module:zeek AND event.dataset:kerberos | groupby kerberos.service"},
|
||||||
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.module:zeek AND event.dataset:modbus | groupby modbus.function"},
|
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.module:zeek AND event.dataset:modbus | groupby modbus.function"},
|
||||||
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.module:zeek AND event.dataset:mysql | groupby mysql.command"},
|
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.module:zeek AND event.dataset:mysql | groupby mysql.command"},
|
||||||
{ "name": "NOTICE", "description": "Zeek notice logs grouped by note", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note"},
|
{ "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note notice.message"},
|
||||||
{ "name": "NOTICE", "description": "Zeek notice logs grouped by message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.message"},
|
|
||||||
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.module:zeek AND event.dataset:ntlm | groupby ntlm.server.dns.name"},
|
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.module:zeek AND event.dataset:ntlm | groupby ntlm.server.dns.name"},
|
||||||
{ "name": "PE", "description": "PE files list", "query": "event.module:zeek AND event.dataset:pe | groupby file.machine file.os file.subsystem"},
|
{ "name": "PE", "description": "PE files list", "query": "event.module:zeek AND event.dataset:pe | groupby file.machine file.os file.subsystem"},
|
||||||
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.module:zeek AND event.dataset:radius | groupby user.name.keyword"},
|
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.module:zeek AND event.dataset:radius | groupby user.name.keyword"},
|
||||||
|
|||||||
@@ -112,5 +112,5 @@ strelka_filestream:
|
|||||||
strelka_zeek_extracted_sync:
|
strelka_zeek_extracted_sync:
|
||||||
cron.present:
|
cron.present:
|
||||||
- user: root
|
- user: root
|
||||||
- name: [ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1
|
- name: '[ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1'
|
||||||
- minute: '*'
|
- minute: '*'
|
||||||
|
|||||||
@@ -96,6 +96,8 @@ outputs:
|
|||||||
enabled: yes
|
enabled: yes
|
||||||
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
|
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
|
||||||
filename: eve.json
|
filename: eve.json
|
||||||
|
rotate-interval: day
|
||||||
|
|
||||||
#prefix: "@cee: " # prefix to prepend to each log entry
|
#prefix: "@cee: " # prefix to prepend to each log entry
|
||||||
# the following are valid when type: syslog above
|
# the following are valid when type: syslog above
|
||||||
#identity: "suricata"
|
#identity: "suricata"
|
||||||
@@ -1042,7 +1044,7 @@ host-mode: auto
|
|||||||
# Number of packets preallocated per thread. The default is 1024. A higher number
|
# Number of packets preallocated per thread. The default is 1024. A higher number
|
||||||
# will make sure each CPU will be more easily kept busy, but may negatively
|
# will make sure each CPU will be more easily kept busy, but may negatively
|
||||||
# impact caching.
|
# impact caching.
|
||||||
#max-pending-packets: 1024
|
max-pending-packets: 5000
|
||||||
|
|
||||||
# Runmode the engine should use. Please check --list-runmodes to get the available
|
# Runmode the engine should use. Please check --list-runmodes to get the available
|
||||||
# runmodes for each packet acquisition method. Default depends on selected capture
|
# runmodes for each packet acquisition method. Default depends on selected capture
|
||||||
|
|||||||
@@ -55,6 +55,12 @@ surilogdir:
|
|||||||
- user: 940
|
- user: 940
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
|
suridatadir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/suricata
|
||||||
|
- user: 940
|
||||||
|
- group: 939
|
||||||
|
|
||||||
surirulesync:
|
surirulesync:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /opt/so/conf/suricata/rules/
|
- name: /opt/so/conf/suricata/rules/
|
||||||
@@ -119,6 +125,7 @@ so-suricata:
|
|||||||
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
|
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
|
||||||
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
|
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
|
||||||
- /opt/so/log/suricata/:/var/log/suricata/:rw
|
- /opt/so/log/suricata/:/var/log/suricata/:rw
|
||||||
|
- /nsm/suricata/:/nsm/:rw
|
||||||
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
|
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
|
||||||
- network_mode: host
|
- network_mode: host
|
||||||
- watch:
|
- watch:
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ search {
|
|||||||
# Name of the index
|
# Name of the index
|
||||||
index = the_hive
|
index = the_hive
|
||||||
# Name of the Elasticsearch cluster
|
# Name of the Elasticsearch cluster
|
||||||
cluster = hive
|
cluster = thehive
|
||||||
# Address of the Elasticsearch instance
|
# Address of the Elasticsearch instance
|
||||||
host = ["{{ MASTERIP }}:9500"]
|
host = ["{{ MASTERIP }}:9500"]
|
||||||
#search.uri = "http://{{ MASTERIP }}:9500"
|
#search.uri = "http://{{ MASTERIP }}:9500"
|
||||||
@@ -12,7 +12,7 @@ search {
|
|||||||
# Name of the index
|
# Name of the index
|
||||||
index = cortex
|
index = cortex
|
||||||
# Name of the Elasticsearch cluster
|
# Name of the Elasticsearch cluster
|
||||||
cluster = hive
|
cluster = thehive
|
||||||
# Address of the Elasticsearch instance
|
# Address of the Elasticsearch instance
|
||||||
host = ["{{ MASTERIP }}:9500"]
|
host = ["{{ MASTERIP }}:9500"]
|
||||||
# Scroll keepalive
|
# Scroll keepalive
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
cluster.name: "hive"
|
cluster.name: "thehive"
|
||||||
network.host: 0.0.0.0
|
network.host: 0.0.0.0
|
||||||
discovery.zen.minimum_master_nodes: 1
|
discovery.zen.minimum_master_nodes: 1
|
||||||
# This is a test -- if this is here, then the volume is mounted correctly.
|
# This is a test -- if this is here, then the volume is mounted correctly.
|
||||||
@@ -1,24 +1,24 @@
|
|||||||
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
|
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
|
||||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||||
{% set MASTER = salt['grains.get']('master') %}
|
{% set MASTER = salt['grains.get']('master') %}
|
||||||
hiveconfdir:
|
thehiveconfdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /opt/so/conf/hive/etc
|
- name: /opt/so/conf/thehive/etc
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
hivelogdir:
|
thehivelogdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /opt/so/log/hive
|
- name: /opt/so/log/thehive
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
hiveconf:
|
thehiveconf:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /opt/so/conf/hive/etc
|
- name: /opt/so/conf/thehive/etc
|
||||||
- source: salt://hive/thehive/etc
|
- source: salt://thehive/etc
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
- template: jinja
|
- template: jinja
|
||||||
@@ -40,7 +40,7 @@ cortexlogdir:
|
|||||||
cortexconf:
|
cortexconf:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /opt/so/conf/cortex
|
- name: /opt/so/conf/cortex
|
||||||
- source: salt://hive/thehive/etc
|
- source: salt://thehive/etc
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
- template: jinja
|
- template: jinja
|
||||||
@@ -48,9 +48,9 @@ cortexconf:
|
|||||||
# Install Elasticsearch
|
# Install Elasticsearch
|
||||||
|
|
||||||
# Made directory for ES data to live in
|
# Made directory for ES data to live in
|
||||||
hiveesdata:
|
thehiveesdata:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /nsm/hive/esdata
|
- name: /nsm/thehive/esdata
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
- user: 939
|
- user: 939
|
||||||
- group: 939
|
- group: 939
|
||||||
@@ -64,16 +64,16 @@ so-thehive-es:
|
|||||||
- interactive: True
|
- interactive: True
|
||||||
- tty: True
|
- tty: True
|
||||||
- binds:
|
- binds:
|
||||||
- /nsm/hive/esdata:/usr/share/elasticsearch/data:rw
|
- /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw
|
||||||
- /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
|
- /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
|
||||||
- /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
|
- /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
|
||||||
- /opt/so/log/hive:/var/log/elasticsearch:rw
|
- /opt/so/log/thehive:/var/log/elasticsearch:rw
|
||||||
- environment:
|
- environment:
|
||||||
- http.host=0.0.0.0
|
- http.host=0.0.0.0
|
||||||
- http.port=9400
|
- http.port=9400
|
||||||
- transport.tcp.port=9500
|
- transport.tcp.port=9500
|
||||||
- transport.host=0.0.0.0
|
- transport.host=0.0.0.0
|
||||||
- cluster.name=hive
|
- cluster.name=thehive
|
||||||
- thread_pool.index.queue_size=100000
|
- thread_pool.index.queue_size=100000
|
||||||
- thread_pool.search.queue_size=100000
|
- thread_pool.search.queue_size=100000
|
||||||
- thread_pool.bulk.queue_size=100000
|
- thread_pool.bulk.queue_size=100000
|
||||||
@@ -90,13 +90,13 @@ so-cortex:
|
|||||||
- name: so-cortex
|
- name: so-cortex
|
||||||
- user: 939
|
- user: 939
|
||||||
- binds:
|
- binds:
|
||||||
- /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
|
- /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
|
||||||
- port_bindings:
|
- port_bindings:
|
||||||
- 0.0.0.0:9001:9001
|
- 0.0.0.0:9001:9001
|
||||||
|
|
||||||
cortexscript:
|
cortexscript:
|
||||||
cmd.script:
|
cmd.script:
|
||||||
- source: salt://hive/thehive/scripts/cortex_init
|
- source: salt://thehive/scripts/cortex_init
|
||||||
- cwd: /opt/so
|
- cwd: /opt/so
|
||||||
- template: jinja
|
- template: jinja
|
||||||
|
|
||||||
@@ -109,12 +109,12 @@ so-thehive:
|
|||||||
- name: so-thehive
|
- name: so-thehive
|
||||||
- user: 939
|
- user: 939
|
||||||
- binds:
|
- binds:
|
||||||
- /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro
|
- /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
|
||||||
- port_bindings:
|
- port_bindings:
|
||||||
- 0.0.0.0:9000:9000
|
- 0.0.0.0:9000:9000
|
||||||
|
|
||||||
hivescript:
|
thehivescript:
|
||||||
cmd.script:
|
cmd.script:
|
||||||
- source: salt://hive/thehive/scripts/hive_init
|
- source: salt://thehive/scripts/hive_init
|
||||||
- cwd: /opt/so
|
- cwd: /opt/so
|
||||||
- template: jinja
|
- template: jinja
|
||||||
64
salt/thehive/scripts/hive_init
Executable file
64
salt/thehive/scripts/hive_init
Executable file
@@ -0,0 +1,64 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||||
|
{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
|
||||||
|
{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
|
||||||
|
{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
|
||||||
|
|
||||||
|
thehive_init(){
|
||||||
|
sleep 120
|
||||||
|
THEHIVE_IP="{{MASTERIP}}"
|
||||||
|
THEHIVE_USER="{{THEHIVEUSER}}"
|
||||||
|
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
|
||||||
|
THEHIVE_KEY="{{THEHIVEKEY}}"
|
||||||
|
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
|
||||||
|
|
||||||
|
echo -n "Waiting for TheHive..."
|
||||||
|
COUNT=0
|
||||||
|
THEHIVE_CONNECTED="no"
|
||||||
|
while [[ "$COUNT" -le 240 ]]; do
|
||||||
|
curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive"
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
THEHIVE_CONNECTED="yes"
|
||||||
|
echo "connected!"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
((COUNT+=1))
|
||||||
|
sleep 1
|
||||||
|
echo -n "."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||||
|
|
||||||
|
# Migrate DB
|
||||||
|
curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
|
||||||
|
|
||||||
|
# Create intial TheHive user
|
||||||
|
curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
|
||||||
|
|
||||||
|
# Pre-load custom fields
|
||||||
|
#
|
||||||
|
# reputation
|
||||||
|
curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
|
||||||
|
|
||||||
|
|
||||||
|
touch /opt/so/state/thehive.txt
|
||||||
|
else
|
||||||
|
echo "We experienced an issue connecting to TheHive!"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -f /opt/so/state/thehive.txt ]; then
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
rm -f garbage_file
|
||||||
|
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
|
||||||
|
do
|
||||||
|
echo "Waiting for Elasticsearch..."
|
||||||
|
rm -f garbage_file
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
rm -f garbage_file
|
||||||
|
sleep 5
|
||||||
|
thehive_init
|
||||||
|
fi
|
||||||
@@ -100,7 +100,7 @@ base:
|
|||||||
- schedule
|
- schedule
|
||||||
- soctopus
|
- soctopus
|
||||||
{%- if THEHIVE != 0 %}
|
{%- if THEHIVE != 0 %}
|
||||||
- hive
|
- thehive
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if PLAYBOOK != 0 %}
|
{%- if PLAYBOOK != 0 %}
|
||||||
- playbook
|
- playbook
|
||||||
@@ -149,7 +149,7 @@ base:
|
|||||||
{%- endif %}
|
{%- endif %}
|
||||||
- soctopus
|
- soctopus
|
||||||
{%- if THEHIVE != 0 %}
|
{%- if THEHIVE != 0 %}
|
||||||
- hive
|
- thehive
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if PLAYBOOK != 0 %}
|
{%- if PLAYBOOK != 0 %}
|
||||||
- playbook
|
- playbook
|
||||||
@@ -203,7 +203,7 @@ base:
|
|||||||
- schedule
|
- schedule
|
||||||
- soctopus
|
- soctopus
|
||||||
{%- if THEHIVE != 0 %}
|
{%- if THEHIVE != 0 %}
|
||||||
- hive
|
- thehive
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if PLAYBOOK != 0 %}
|
{%- if PLAYBOOK != 0 %}
|
||||||
- playbook
|
- playbook
|
||||||
@@ -318,7 +318,7 @@ base:
|
|||||||
{%- endif %}
|
{%- endif %}
|
||||||
- soctopus
|
- soctopus
|
||||||
{%- if THEHIVE != 0 %}
|
{%- if THEHIVE != 0 %}
|
||||||
- hive
|
- thehive
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- if PLAYBOOK != 0 %}
|
{%- if PLAYBOOK != 0 %}
|
||||||
- playbook
|
- playbook
|
||||||
|
|||||||
@@ -21,6 +21,8 @@ address_type=DHCP
|
|||||||
ADMINUSER=onionuser
|
ADMINUSER=onionuser
|
||||||
ADMINPASS1=onionuser
|
ADMINPASS1=onionuser
|
||||||
ADMINPASS2=onionuser
|
ADMINPASS2=onionuser
|
||||||
|
ALLOW_CIDR=0.0.0.0/0
|
||||||
|
ALLOW_ROLE=a
|
||||||
BASICBRO=7
|
BASICBRO=7
|
||||||
BASICSURI=7
|
BASICSURI=7
|
||||||
# BLOGS=
|
# BLOGS=
|
||||||
|
|||||||
@@ -38,31 +38,3 @@ calculate_useable_cores() {
|
|||||||
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
|
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
|
||||||
export lb_procs
|
export lb_procs
|
||||||
}
|
}
|
||||||
|
|
||||||
set_defaul_log_size() {
|
|
||||||
local percentage
|
|
||||||
|
|
||||||
case $INSTALLTYPE in
|
|
||||||
EVAL | HEAVYNODE)
|
|
||||||
percentage=50
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
percentage=80
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
local disk_dir="/"
|
|
||||||
if [ -d /nsm ]; then
|
|
||||||
disk_dir="/nsm"
|
|
||||||
fi
|
|
||||||
local disk_size_1k
|
|
||||||
disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
|
|
||||||
|
|
||||||
local ratio="1048576"
|
|
||||||
|
|
||||||
local disk_size_gb
|
|
||||||
disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
|
|
||||||
|
|
||||||
log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
|
|
||||||
export log_size_limit
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -954,41 +954,6 @@ node_pillar() {
|
|||||||
cat "$pillar_file" >> "$setup_log" 2>&1
|
cat "$pillar_file" >> "$setup_log" 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
parse_options() {
|
|
||||||
case "$1" in
|
|
||||||
--turbo=*)
|
|
||||||
if [[ $is_master || $is_helix ]]; then
|
|
||||||
local proxy
|
|
||||||
proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}')
|
|
||||||
proxy_addr="http://$proxy"
|
|
||||||
use_proxy "$proxy_addr"
|
|
||||||
TURBO="$proxy_addr"
|
|
||||||
else
|
|
||||||
echo "turbo is not supported on this install type" >> $setup_log 2>&1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
--proxy=*)
|
|
||||||
echo "Unimplimented"
|
|
||||||
return
|
|
||||||
|
|
||||||
if [[ $2 != --proxy-user=* ]] || [[ $3 != --proxy-pass=* ]]; then
|
|
||||||
echo "Invalid options passed for proxy. Order is --proxy-user=<user> --proxy-pass=<password>"
|
|
||||||
else
|
|
||||||
local proxy
|
|
||||||
local proxy_user
|
|
||||||
local proxy_password
|
|
||||||
proxy=$(echo "$1" | tr -d '"' | awk -F'--proxy=' '{print $2}')
|
|
||||||
proxy_user=$(echo "$2" | tr -d '"' | awk -F'--proxy-user=' '{print $2}')
|
|
||||||
proxy_password=$(echo "$3" | tr -d '"' | awk -F'--proxy-pass=' '{print $2}')
|
|
||||||
|
|
||||||
use_proxy "$proxy" "$proxy_user" "$proxy_password"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Invalid option"
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
patch_pillar() {
|
patch_pillar() {
|
||||||
|
|
||||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||||
@@ -1268,8 +1233,6 @@ set_progress_str() {
|
|||||||
'----'\
|
'----'\
|
||||||
"$percentage% - ${progress_bar_text^^}"\
|
"$percentage% - ${progress_bar_text^^}"\
|
||||||
"----" >> "$setup_log" 2>&1
|
"----" >> "$setup_log" 2>&1
|
||||||
|
|
||||||
sleep 5
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sensor_pillar() {
|
sensor_pillar() {
|
||||||
@@ -1320,6 +1283,33 @@ sensor_pillar() {
|
|||||||
cat "$pillar_file" >> "$setup_log" 2>&1
|
cat "$pillar_file" >> "$setup_log" 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
set_default_log_size() {
|
||||||
|
local percentage
|
||||||
|
|
||||||
|
case $INSTALLTYPE in
|
||||||
|
EVAL | HEAVYNODE)
|
||||||
|
percentage=50
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
percentage=80
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
local disk_dir="/"
|
||||||
|
if [ -d /nsm ]; then
|
||||||
|
disk_dir="/nsm"
|
||||||
|
fi
|
||||||
|
local disk_size_1k
|
||||||
|
disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
|
||||||
|
|
||||||
|
local ratio="1048576"
|
||||||
|
|
||||||
|
local disk_size_gb
|
||||||
|
disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
|
||||||
|
|
||||||
|
log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
|
||||||
|
}
|
||||||
|
|
||||||
set_hostname() {
|
set_hostname() {
|
||||||
|
|
||||||
set_hostname_iso
|
set_hostname_iso
|
||||||
@@ -1484,18 +1474,19 @@ update_packages() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
use_proxy() {
|
use_turbo_proxy() {
|
||||||
local proxy_addr=$1
|
if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
|
||||||
#TODO: add options for username + pass
|
echo "turbo is not supported on this install type" >> $setup_log 2>&1
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ $OS == 'centos' ]]; then
|
if [[ $OS == 'centos' ]]; then
|
||||||
printf '%s\n'\
|
printf '%s\n' "proxy=${TURBO}:3142" >> /etc/yum.conf
|
||||||
"proxy=${proxy_addr}:3142" >> /etc/yum.conf
|
|
||||||
else
|
else
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
"Acquire {"\
|
"Acquire {"\
|
||||||
" HTTP::proxy \"${proxy_addr}:3142\";"\
|
" HTTP::proxy \"${TURBO}:3142\";"\
|
||||||
" HTTPS::proxy \"${proxy_addr}:3142\";"\
|
" HTTPS::proxy \"${TURBO}:3142\";"\
|
||||||
"}" > /etc/apt/apt.conf.d/proxy.conf
|
"}" > /etc/apt/apt.conf.d/proxy.conf
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,23 +21,72 @@ source ./so-common-functions
|
|||||||
source ./so-whiptail
|
source ./so-whiptail
|
||||||
source ./so-variables
|
source ./so-variables
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
setup_type=$1
|
setup_type=$1
|
||||||
export setup_type
|
|
||||||
|
|
||||||
automation=$2
|
automation=$2
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
arg="$1"
|
||||||
|
shift
|
||||||
|
case "$arg" in
|
||||||
|
"--turbo="* )
|
||||||
|
export TURBO="http://${arg#*=}";;
|
||||||
|
"--proxy="* )
|
||||||
|
export {http,https,ftp,rsync,all}_proxy="${arg#*=}";;
|
||||||
|
"--allow-role="* )
|
||||||
|
export ALLOW_ROLE="${arg#*=}";;
|
||||||
|
"--allow-cidr="* )
|
||||||
|
export ALLOW_CIDR="${arg#*=}";;
|
||||||
|
* )
|
||||||
|
if [[ "$arg" == "--"* ]]; then
|
||||||
|
echo "Invalid option"
|
||||||
|
fi
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Begin Installation pre-processing
|
||||||
|
echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1
|
||||||
|
|
||||||
|
automated=no
|
||||||
|
function progress() {
|
||||||
|
if [ $automated == no ]; then
|
||||||
|
whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0
|
||||||
|
else
|
||||||
|
cat >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then
|
if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then
|
||||||
echo "Preselecting variable values based on automated setup: $automation"
|
echo "Preselecting variable values based on automated setup: $automation" >> $setup_log 2>&1
|
||||||
exit 1
|
|
||||||
source automation/$automation
|
source automation/$automation
|
||||||
sleep 30 # Re-implement with network availability probe
|
automated=yes
|
||||||
|
|
||||||
|
echo "Checking network configuration" >> $setup_log 2>&1
|
||||||
|
ip a >> $setup_log 2>&1
|
||||||
|
|
||||||
|
attempt=1
|
||||||
|
attempts=60
|
||||||
|
ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1
|
||||||
|
while [ $? -ne 0 ]; do
|
||||||
|
ip a >> $setup_log 2>&1
|
||||||
|
if [ $attempt -gt $attempts ]; then
|
||||||
|
echo "Network unavailable - setup cannot continue" >> $setup_log 2>&1
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "Waiting for network to come up (attempt $attempt of $attempts)" >> $setup_log 2>&1
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
sleep 10;
|
||||||
|
ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1
|
||||||
|
done
|
||||||
|
echo "Network is up on $MNIC" >> $setup_log 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
case "$setup_type" in
|
case "$setup_type" in
|
||||||
iso | network) # Accepted values
|
iso | network) # Accepted values
|
||||||
echo "Beginning Security Onion $setup_type install"
|
echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Invalid install type, must be 'iso' or 'network'"
|
echo "Invalid install type, must be 'iso' or 'network'" | tee $setup_log
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -45,9 +94,8 @@ esac
|
|||||||
# Allow execution of SO tools during setup
|
# Allow execution of SO tools during setup
|
||||||
export PATH=$PATH:../salt/common/tools/sbin
|
export PATH=$PATH:../salt/common/tools/sbin
|
||||||
|
|
||||||
date -u > $setup_log 2>&1
|
|
||||||
|
|
||||||
got_root
|
got_root
|
||||||
|
|
||||||
detect_os
|
detect_os
|
||||||
|
|
||||||
if [ "$OS" == ubuntu ]; then
|
if [ "$OS" == ubuntu ]; then
|
||||||
@@ -59,7 +107,7 @@ setterm -blank 0
|
|||||||
if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then
|
if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then
|
||||||
true
|
true
|
||||||
else
|
else
|
||||||
echo "User cancelled setup." >> $setup_log 2>&1
|
echo "User cancelled setup." | tee $setup_log
|
||||||
whiptail_cancel
|
whiptail_cancel
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -142,17 +190,21 @@ echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1
|
|||||||
|
|
||||||
minion_type=$(get_minion_type)
|
minion_type=$(get_minion_type)
|
||||||
|
|
||||||
# Set any constants needed
|
# Set any variables needed
|
||||||
|
set_default_log_size >> $setup_log 2>&1
|
||||||
|
|
||||||
if [[ $is_helix ]]; then
|
if [[ $is_helix ]]; then
|
||||||
RULESETUP=ETOPEN
|
RULESETUP=ETOPEN
|
||||||
NSMSETUP=BASIC
|
NSMSETUP=BASIC
|
||||||
HNSENSOR=inherit
|
HNSENSOR=inherit
|
||||||
MASTERUPDATES=0
|
MASTERUPDATES=0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_helix || ( $is_master && $is_node ) ]]; then
|
if [[ $is_helix || ( $is_master && $is_node ) ]]; then
|
||||||
RULESETUP=ETOPEN
|
RULESETUP=ETOPEN
|
||||||
NSMSETUP=BASIC
|
NSMSETUP=BASIC
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_master && $is_node ]]; then
|
if [[ $is_master && $is_node ]]; then
|
||||||
LSPIPELINEWORKERS=1
|
LSPIPELINEWORKERS=1
|
||||||
LSPIPELINEBATCH=125
|
LSPIPELINEBATCH=125
|
||||||
@@ -161,6 +213,7 @@ if [[ $is_master && $is_node ]]; then
|
|||||||
NIDS=Suricata
|
NIDS=Suricata
|
||||||
BROVERSION=ZEEK
|
BROVERSION=ZEEK
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_node ]]; then
|
if [[ $is_node ]]; then
|
||||||
CURCLOSEDAYS=30
|
CURCLOSEDAYS=30
|
||||||
fi
|
fi
|
||||||
@@ -203,6 +256,9 @@ fi
|
|||||||
|
|
||||||
if [[ $is_distmaster || ( $is_sensor || $is_node ) && ! $is_eval ]]; then
|
if [[ $is_distmaster || ( $is_sensor || $is_node ) && ! $is_eval ]]; then
|
||||||
whiptail_master_updates
|
whiptail_master_updates
|
||||||
|
if [[ $setup_type == 'network' && $MASTERUPDATES == 1 ]]; then
|
||||||
|
whiptail_master_updates_warning
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_minion ]]; then
|
if [[ $is_minion ]]; then
|
||||||
@@ -249,9 +305,8 @@ fi
|
|||||||
|
|
||||||
whiptail_make_changes
|
whiptail_make_changes
|
||||||
|
|
||||||
if [[ $# -gt 1 ]]; then
|
if [[ -n "$TURBO" ]]; then
|
||||||
set -- "${@:2}"
|
use_turbo_proxy
|
||||||
parse_options "$@" >> $setup_log 2>&1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$setup_type" == 'iso' ]]; then
|
if [[ "$setup_type" == 'iso' ]]; then
|
||||||
@@ -498,11 +553,15 @@ fi
|
|||||||
set_progress_str 95 'Verifying setup'
|
set_progress_str 95 'Verifying setup'
|
||||||
salt-call -l info state.highstate >> $setup_log 2>&1
|
salt-call -l info state.highstate >> $setup_log 2>&1
|
||||||
|
|
||||||
} | whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
|
} | progress
|
||||||
|
|
||||||
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
|
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
|
||||||
if [[ "$success" = 0 ]]; then
|
if [[ "$success" = 0 ]]; then
|
||||||
whiptail_setup_complete
|
whiptail_setup_complete
|
||||||
|
if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then
|
||||||
|
export IP=$ALLOW_CIDR
|
||||||
|
so-allow -$ALLOW_ROLE >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
if [[ $THEHIVE == 1 ]]; then
|
if [[ $THEHIVE == 1 ]]; then
|
||||||
check_hive_init_then_reboot
|
check_hive_init_then_reboot
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -413,7 +413,6 @@ whiptail_log_size_limit() {
|
|||||||
|
|
||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
set_defaul_log_size
|
|
||||||
|
|
||||||
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
|
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||||
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
|
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
|
||||||
@@ -1027,7 +1026,17 @@ whiptail_master_updates() {
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_master_updates_warning() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
whiptail --title "Security Onion Setup"\
|
||||||
|
--msgbox "Updating through the master node requires the master to have internet access, press ENTER to continue"\
|
||||||
|
8 75
|
||||||
|
|
||||||
|
local exitstatus=$?
|
||||||
|
whiptail_check_exitstatus $exitstatus
|
||||||
}
|
}
|
||||||
|
|
||||||
whiptail_node_updates() {
|
whiptail_node_updates() {
|
||||||
|
|||||||
Reference in New Issue
Block a user