This commit is contained in:
m0duspwnens
2020-05-29 13:01:08 -04:00
52 changed files with 1108 additions and 546 deletions

View File

@@ -28,19 +28,82 @@ salttmp:
- group: 939
- makedirs: True
# Install packages needed for the sensor
sensorpkgs:
# Install epel
{% if grains['os'] == 'CentOS' %}
epel:
pkg.installed:
- skip_suggestions: False
- skip_suggestions: True
- pkgs:
- epel-release
{% endif %}
# Install common packages
{% if grains['os'] != 'CentOS' %}
commonpkgs:
pkg.installed:
- skip_suggestions: True
- pkgs:
- apache2-utils
- wget
- ntpdate
- jq
- python3-docker
- docker-ce
- curl
- ca-certificates
- software-properties-common
- apt-transport-https
- openssl
- netcat
- python3-mysqldb
- sqlite3
- argon2
- libssl-dev
- python3-dateutil
- python3-m2crypto
- python3-mysqldb
heldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.2.13-2
- docker-ce: 5:19.03.9~3-0~ubuntu-bionic
- hold: True
- update_holds: True
{% else %}
commonpkgs:
pkg.installed:
- skip_suggestions: True
- pkgs:
- wget
- ntpdate
- bind-utils
- jq
{% if grains['os'] != 'CentOS' %}
- apache2-utils
{% else %}
- net-tools
- tcpdump
- httpd-tools
- net-tools
- curl
- sqlite
- argon2
- mariadb-devel
- nmap-ncat
- python3
- python36-docker
- python36-dateutil
- python36-m2crypto
- python36-mysql
- yum-utils
- device-mapper-persistent-data
- lvm2
- openssl
heldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.2.13-3.2.el7
- docker-ce: 3:19.03.9-3.el7
- hold: True
- update_holds: True
{% endif %}
# Always keep these packages up to date

View File

@@ -1,5 +1,5 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
@@ -17,4 +17,5 @@
. /usr/sbin/so-common
/usr/sbin/so-restart cortex $1
/usr/sbin/so-stop cortex $1
/usr/sbin/so-start thehive $1

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common
/usr/sbin/so-start cortex $1
/usr/sbin/so-start thehive $1

View File

@@ -1,5 +1,5 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify

View File

@@ -166,8 +166,7 @@ cat << EOF
What elasticsearch index do you want to use?
Below are the default Index Patterns used in Security Onion:
*:logstash-*
*:logstash-beats-*
*:so-ids-*
*:elastalert_status*
EOF

View File

@@ -32,5 +32,5 @@ fi
case $1 in
"all") salt-call state.highstate queue=True;;
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
*) if docker ps | grep -q so-$1; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
esac

View File

@@ -0,0 +1,21 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
/usr/sbin/so-stop thehive-es $1
/usr/sbin/so-start thehive $1

View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
/usr/sbin/so-start thehive $1

View File

@@ -0,0 +1,20 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
/usr/sbin/so-stop thehive-es $1

View File

@@ -1,5 +1,5 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify

View File

@@ -1,5 +1,5 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify

View File

@@ -0,0 +1,39 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Show Zeek stats (capstats, netstats)
show_stats() {
echo '##############'
echo '# Zeek Stats #'
echo '##############'
echo
echo "Average throughput:"
echo
docker exec -it so-zeek /opt/zeek/bin/zeekctl capstats
echo
echo "Average packet loss:"
echo
docker exec -it so-zeek /opt/zeek/bin/zeekctl netstats
echo
}
if docker ps | grep -q zeek; then
show_stats
else
echo "Zeek is not running! Try starting it with 'so-zeek-start'." && exit 1;
fi

View File

@@ -24,9 +24,8 @@ actions:
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
exclude:
kind: regex
value: '^(logstash-.*|so-.*)$'
- filtertype: age
source: name
direction: older

View File

@@ -20,8 +20,8 @@ actions:
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
kind: regex
value: '^(logstash-.*|so-.*)$'
- filtertype: space
source: creation_date
use_age: True

View File

@@ -33,17 +33,17 @@ LOG="/opt/so/log/curator/so-curator-closed-delete.log"
# Check for 2 conditions:
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
# 2. Are there any closed logstash- indices that we can delete?
# 2. Are there any closed logstash-, or so- indices that we can delete?
# If both conditions are true, keep on looping until one of the conditions is false.
while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" > /dev/null; do
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" > /dev/null; do
# We need to determine OLDEST_INDEX.
# First, get the list of closed indices that are prefixed with "logstash-".
# First, get the list of closed indices that are prefixed with "logstash-" or "so-".
# For example: logstash-ids-YYYY.MM.DD
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
# Finally, select the first entry in that sorted list.
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" | awk '{print $2}' | sort -t- -k3 | head -1)
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}

View File

@@ -40,7 +40,7 @@ hive_alert_config:
title: '{match[rule][name]}'
type: 'NIDS'
source: 'SecurityOnion'
description: "`Hunting Pivot:` \n\n <https://{{MASTER}}/#/hunt?q=event.module%3A%20suricata%20AND%20rule.uuid%3A{match[rule][uuid]}%20%7C%20groupby%20source.ip%20destination.ip%20rule.name> \n\n `Kibana Dashboard:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:')),sort:!('@timestamp',desc))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
description: "`Hunting Pivot:` \n\n <https://{{MASTER}}/#/hunt?q=event.module%3A%20suricata%20AND%20rule.uuid%3A{match[rule][uuid]}%20%7C%20groupby%20source.ip%20destination.ip%20rule.name> \n\n `Kibana Dashboard - Signature Drilldown:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:')),sort:!('@timestamp',desc))> \n\n `Kibana Dashboard - Community_ID:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/30d0ac90-729f-11ea-8dd2-9d8795a1200b?_g=(filters:!(('$state':(store:globalState),meta:(alias:!n,disabled:!f,index:'*:so-*',key:network.community_id,negate:!f,params:(query:'{match[network][community_id]}'),type:phrase),query:(match_phrase:(network.community_id:'{match[network][community_id]}')))),refreshInterval:(pause:!t,value:0),time:(from:now-7d,to:now))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
severity: 2
tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}']
tlp: 3

View File

@@ -22,3 +22,7 @@ transport.bind_host: 0.0.0.0
transport.publish_host: {{ nodeip }}
transport.publish_port: 9300
{%- endif %}
cluster.routing.allocation.disk.threshold_enabled: true
cluster.routing.allocation.disk.watermark.low: 95%
cluster.routing.allocation.disk.watermark.high: 98%
cluster.routing.allocation.disk.watermark.flood_stage: 98%

View File

@@ -38,7 +38,7 @@
{ "rename": { "field": "module", "target_field": "event.module", "ignore_missing": true } },
{ "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_missing": true } },
{ "rename": { "field": "category", "target_field": "event.category", "ignore_missing": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } },
{
"remove": {
"field": [ "index_name_prefix", "message2", "type" ],

View File

@@ -31,7 +31,7 @@
{ "rename": { "field": "message3.columns.remote_port", "target_field": "remote.port", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.process_name", "target_field": "process.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.eventid", "target_field": "event.code", "ignore_missing": true } },
{ "set": { "if": "ctx.message3.columns.?data != null", "field": "dataset", "value": "wel-{{message3.columns.source}}", "override": true } },
{ "set": { "if": "ctx.message3.columns?.data != null", "field": "dataset", "value": "wel-{{message3.columns.source}}", "override": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.destinationIp", "target_field": "destination.ip", "ignore_missing": true } },

View File

@@ -0,0 +1,13 @@
{
"description" : "syslog",
"processors" : [
{
"dissect": {
"field": "message",
"pattern" : "%{message}",
"on_failure": [ { "drop" : { } } ]
}
},
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -7,6 +7,7 @@
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
{"community_id": {"if": "ctx.network?.transport != null", "field":["message2.id.orig_h","message2.id.orig_p","message2.id.resp_h","message2.id.resp_p","network.transport"],"target_field":"network.community_id"}},
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },

View File

@@ -29,6 +29,7 @@
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
{ "script": { "lang": "painless", "source": "ctx.useragent_length = ctx.useragent.length()", "ignore_failure": true } },
{ "script": { "lang": "painless", "source": "ctx.virtual_host_length = ctx.virtual_host.length()", "ignore_failure": true } },
{ "set": { "field": "network.transport", "value": "tcp" } },
{ "pipeline": { "name": "zeek.common" } }
]
}

View File

@@ -6,7 +6,7 @@
{ "rename": { "field": "message2.fuid", "target_field": "log.id.fuid", "ignore_missing": true } },
{ "rename": { "field": "message2.mime", "target_field": "file.mimetype", "ignore_missing": true } },
{ "rename": { "field": "message2.desc", "target_field": "file.description", "ignore_missing": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.note", "target_field": "notice.note", "ignore_missing": true } },
{ "rename": { "field": "message2.msg", "target_field": "notice.message", "ignore_missing": true } },
{ "rename": { "field": "message2.sub", "target_field": "notice.sub_message", "ignore_missing": true } },

View File

@@ -5,7 +5,7 @@
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.username", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message2.mac", "target_field": "host.mac", "ignore_missing": true } },
{ "rename": { "field": "message2.framed_addr", "target_field": "framed_addr", "ignore_missing": true } },
{ "rename": { "field": "message2.framed_addr", "target_field": "radius.framed_address", "ignore_missing": true } },
{ "rename": { "field": "message2.remote_ip", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "message2.connect_info", "target_field": "radius.connect_info", "ignore_missing": true } },
{ "rename": { "field": "message2.reply_msg", "target_field": "radius.reply_message", "ignore_missing": true } },

View File

@@ -25,6 +25,7 @@
{ "rename": { "field": "message2.tls", "target_field": "smtp.tls", "ignore_missing": true } },
{ "rename": { "field": "message2.fuids", "target_field": "log.id.fuids", "ignore_missing": true } },
{ "rename": { "field": "message2.is_webmail", "target_field": "smtp.is_webmail", "ignore_missing": true } },
{ "set": { "field": "network.transport", "value": "tcp" } },
{ "pipeline": { "name": "zeek.common" } }
]
}

View File

@@ -15,6 +15,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
RETURN_CODE=0
ELASTICSEARCH_HOST=$1
ELASTICSEARCH_PORT=9200
@@ -46,7 +47,9 @@ fi
cd ${ELASTICSEARCH_INGEST_PIPELINES}
echo "Loading pipelines..."
for i in *; do echo $i; curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
echo
cd - >/dev/null
exit $RETURN_CODE

View File

@@ -75,6 +75,19 @@ filebeat.modules:
filebeat.inputs:
#------------------------------ Log prospector --------------------------------
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
- type: syslog
enabled: true
protocol.udp:
host: "0.0.0.0:514"
fields:
module: syslog
dataset: syslog
pipeline: "syslog"
index: "so-syslog-%{+yyyy.MM.dd}"
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
{%- if BROVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
- type: log

View File

@@ -57,12 +57,14 @@ so-filebeat:
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /nsm/zeek:/nsm/zeek:ro
- /nsm/strelka/log:/nsm/strelka/log:ro
- /opt/so/log/suricata:/suricata:ro
- /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
- port_bindings:
- 0.0.0.0:514:514/udp
- watch:
- file: /opt/so/conf/filebeat/etc/filebeat.yml

View File

@@ -136,6 +136,18 @@ enable_wazuh_manager_1514_udp_{{ip}}:
- position: 1
- save: True
# Allow syslog
enable_syslog_514_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 514
- position: 1
- save: True
# Rules if you are a Master
{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
#This should be more granular

View File

@@ -1,64 +0,0 @@
#!/bin/bash
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
hive_init(){
sleep 120
HIVE_IP="{{MASTERIP}}"
HIVE_USER="{{HIVEUSER}}"
HIVE_PASSWORD="{{HIVEPASSWORD}}"
HIVE_KEY="{{HIVEKEY}}"
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
echo -n "Waiting for TheHive..."
COUNT=0
HIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://$HIVE_IP/thehive"
if [ $? -eq 0 ]; then
HIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$HIVE_CONNECTED" == "yes" ]; then
# Migrate DB
curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate"
# Create intial TheHive user
curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
# Pre-load custom fields
#
# reputation
curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
touch /opt/so/state/thehive.txt
else
echo "We experienced an issue connecting to TheHive!"
fi
}
if [ -f /opt/so/state/thehive.txt ]; then
exit 0
else
rm -f garbage_file
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
sleep 1
done
rm -f garbage_file
sleep 5
hive_init
fi

View File

@@ -198,7 +198,7 @@ so-logstash:
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{%- if grains['role'] == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro
- /opt/so/log/suricata:/suricata:ro
- /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /opt/so/log/fleet/:/osquery/logs:ro

View File

@@ -32,7 +32,7 @@
"dateRangeMinutes": 1440,
"mostRecentlyUsedLimit": 5,
"eventFields": {
"default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ],
"default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id" ],
"bro_conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "service", "log.id.uid" ],
"bro_dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ],
"bro_dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ],
@@ -99,7 +99,7 @@
{ "name": "Connections", "description": "Connections grouped by destination country", "query": "event.module:zeek AND event.dataset:conn | groupby destination.geo.country_name"},
{ "name": "Connections", "description": "Connections grouped by source country", "query": "event.module:zeek AND event.dataset:conn | groupby source.geo.country_name"},
{ "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.module:zeek AND event.dataset:dce_rpc | groupby dce_rpc.operation"},
{ "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain dhcp.requested_address"},
{ "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain"},
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.module:zeek AND event.dataset:dhcp | groupby dhcp.message_types"},
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.module:zeek AND event.dataset:dnp3 | groupby dnp3.fc_reply"},
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.name destination.port"},
@@ -122,8 +122,7 @@
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.module:zeek AND event.dataset:kerberos | groupby kerberos.service"},
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.module:zeek AND event.dataset:modbus | groupby modbus.function"},
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.module:zeek AND event.dataset:mysql | groupby mysql.command"},
{ "name": "NOTICE", "description": "Zeek notice logs grouped by note", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note"},
{ "name": "NOTICE", "description": "Zeek notice logs grouped by message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.message"},
{ "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note notice.message"},
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.module:zeek AND event.dataset:ntlm | groupby ntlm.server.dns.name"},
{ "name": "PE", "description": "PE files list", "query": "event.module:zeek AND event.dataset:pe | groupby file.machine file.os file.subsystem"},
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.module:zeek AND event.dataset:radius | groupby user.name.keyword"},

View File

@@ -1,9 +1,9 @@
{%- set ip = salt['pillar.get']('static:masterip', '') %}
{%- set MASTER = salt['pillar.get']('master:url_base', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
[es]
es_url = http://{{ip}}:9200
es_url = http://{{MASTER}}:9200
es_user = YOURESUSER
es_pass = YOURESPASS
es_index_pattern = so-*
@@ -11,7 +11,7 @@ es_verifycert = no
[cortex]
auto_analyze_alerts = no
cortex_url = https://{{ip}}/cortex/
cortex_url = https://{{MASTER}}/cortex/
cortex_key = {{ CORTEXKEY }}
supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
@@ -32,7 +32,7 @@ grr_user = YOURGRRUSER
grr_pass = YOURGRRPASS
[hive]
hive_url = https://{{ip}}/thehive/
hive_url = https://{{MASTER}}/thehive/
hive_key = {{ HIVEKEY }}
hive_tlp = 3
hive_verifycert = no
@@ -59,7 +59,7 @@ slack_url = YOURSLACKWORKSPACE
slack_webhook = YOURSLACKWEBHOOK
[playbook]
playbook_url = https://{{ip}}/playbook
playbook_url = https://{{MASTER}}/playbook
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
playbook_verifycert = no

View File

@@ -1,5 +1,7 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
{%- set MASTER_URL = salt['pillar.get']('master:url_base', '') %}
{%- set MASTER_IP = salt['pillar.get']('static:masterip', '') %}
soctopusdir:
file.directory:
@@ -69,3 +71,5 @@ so-soctopus:
- /opt/so/conf/navigator/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
- port_bindings:
- 0.0.0.0:7000:7000
- extra_hosts:
- {{MASTER_URL}}:{{MASTER_IP}}

View File

@@ -112,5 +112,5 @@ strelka_filestream:
strelka_zeek_extracted_sync:
cron.present:
- user: root
- name: mv /nsm/zeek/extracted/complete/* /nsm/strelka
- name: '[ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1'
- minute: '*'

File diff suppressed because it is too large Load Diff

View File

@@ -55,6 +55,12 @@ surilogdir:
- user: 940
- group: 939
suridatadir:
file.directory:
- name: /nsm/suricata
- user: 940
- group: 939
surirulesync:
file.recurse:
- name: /opt/so/conf/suricata/rules/
@@ -119,6 +125,7 @@ so-suricata:
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
- /opt/so/log/suricata/:/var/log/suricata/:rw
- /nsm/suricata/:/nsm/:rw
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
- network_mode: host
- watch:

View File

@@ -12,7 +12,7 @@ search {
# Name of the index
index = the_hive
# Name of the Elasticsearch cluster
cluster = hive
cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
#search.uri = "http://{{ MASTERIP }}:9500"

View File

@@ -12,7 +12,7 @@ search {
# Name of the index
index = cortex
# Name of the Elasticsearch cluster
cluster = hive
cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
# Scroll keepalive

View File

@@ -1,4 +1,4 @@
cluster.name: "hive"
cluster.name: "thehive"
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.

View File

@@ -1,24 +1,24 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
hiveconfdir:
thehiveconfdir:
file.directory:
- name: /opt/so/conf/hive/etc
- name: /opt/so/conf/thehive/etc
- makedirs: True
- user: 939
- group: 939
hivelogdir:
thehivelogdir:
file.directory:
- name: /opt/so/log/hive
- name: /opt/so/log/thehive
- makedirs: True
- user: 939
- group: 939
hiveconf:
thehiveconf:
file.recurse:
- name: /opt/so/conf/hive/etc
- source: salt://hive/thehive/etc
- name: /opt/so/conf/thehive/etc
- source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -40,7 +40,7 @@ cortexlogdir:
cortexconf:
file.recurse:
- name: /opt/so/conf/cortex
- source: salt://hive/thehive/etc
- source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -48,9 +48,9 @@ cortexconf:
# Install Elasticsearch
# Made directory for ES data to live in
hiveesdata:
thehiveesdata:
file.directory:
- name: /nsm/hive/esdata
- name: /nsm/thehive/esdata
- makedirs: True
- user: 939
- group: 939
@@ -64,16 +64,16 @@ so-thehive-es:
- interactive: True
- tty: True
- binds:
- /nsm/hive/esdata:/usr/share/elasticsearch/data:rw
- /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /opt/so/log/hive:/var/log/elasticsearch:rw
- /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw
- /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /opt/so/log/thehive:/var/log/elasticsearch:rw
- environment:
- http.host=0.0.0.0
- http.port=9400
- transport.tcp.port=9500
- transport.host=0.0.0.0
- cluster.name=hive
- cluster.name=thehive
- thread_pool.index.queue_size=100000
- thread_pool.search.queue_size=100000
- thread_pool.bulk.queue_size=100000
@@ -90,13 +90,13 @@ so-cortex:
- name: so-cortex
- user: 939
- binds:
- /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
- /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9001:9001
cortexscript:
cmd.script:
- source: salt://hive/thehive/scripts/cortex_init
- source: salt://thehive/scripts/cortex_init
- cwd: /opt/so
- template: jinja
@@ -109,12 +109,12 @@ so-thehive:
- name: so-thehive
- user: 939
- binds:
- /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro
- /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9000:9000
hivescript:
thehivescript:
cmd.script:
- source: salt://hive/thehive/scripts/hive_init
- source: salt://thehive/scripts/hive_init
- cwd: /opt/so
- template: jinja

64
salt/thehive/scripts/hive_init Executable file
View File

@@ -0,0 +1,64 @@
#!/bin/bash
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
thehive_init(){
sleep 120
THEHIVE_IP="{{MASTERIP}}"
THEHIVE_USER="{{THEHIVEUSER}}"
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
THEHIVE_KEY="{{THEHIVEKEY}}"
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
echo -n "Waiting for TheHive..."
COUNT=0
THEHIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
# Migrate DB
curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
# Create intial TheHive user
curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
# Pre-load custom fields
#
# reputation
curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
touch /opt/so/state/thehive.txt
else
echo "We experienced an issue connecting to TheHive!"
fi
}
if [ -f /opt/so/state/thehive.txt ]; then
exit 0
else
rm -f garbage_file
while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
sleep 1
done
rm -f garbage_file
sleep 5
thehive_init
fi

View File

@@ -100,7 +100,7 @@ base:
- schedule
- soctopus
{%- if THEHIVE != 0 %}
- hive
- thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -149,7 +149,7 @@ base:
{%- endif %}
- soctopus
{%- if THEHIVE != 0 %}
- hive
- thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -203,7 +203,7 @@ base:
- schedule
- soctopus
{%- if THEHIVE != 0 %}
- hive
- thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -318,7 +318,7 @@ base:
{%- endif %}
- soctopus
{%- if THEHIVE != 0 %}
- hive
- thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook

View File

@@ -0,0 +1,78 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TESTING=true
address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
ALLOW_CIDR=0.0.0.0/0
ALLOW_ROLE=a
BASICBRO=7
BASICSURI=7
# BLOGS=
BNICS=eth1
BROVERSION=ZEEK
# CURCLOSEDAYS=
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MASTERADV=BASIC
MASTERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=
# MMASK=
MNIC=eth0
# MSEARCH=
# MSRV=
# MTU=
NAVIGATOR=1
NIDS=Suricata
# NODE_ES_HEAP_SIZE=
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
NODEUPDATES=MASTER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
# REDIRECTHOST=
REDIRECTINFO=IP
RULESETUP=ETOPEN
# SHARDCOUNT=
SKIP_REBOOT=1
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
THEHIVE=1
WAZUH=1
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=onionuser
WEBPASSWD2=onionuser

View File

@@ -0,0 +1,2 @@
[Service]
ExecStart=/usr/bin/dockerd /usr/bin/dockerd -H fd:// --registry-mirror "$proxy_addr"

View File

@@ -38,31 +38,3 @@ calculate_useable_cores() {
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
export lb_procs
}
set_defaul_log_size() {
local percentage
case $INSTALLTYPE in
EVAL | HEAVYNODE)
percentage=50
;;
*)
percentage=80
;;
esac
local disk_dir="/"
if [ -d /nsm ]; then
disk_dir="/nsm"
fi
local disk_size_1k
disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
local ratio="1048576"
local disk_size_gb
disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
export log_size_limit
}

View File

@@ -19,7 +19,7 @@ source ./so-whiptail
source ./so-variables
source ./so-common-functions
SOVERSION=1.3.0
SOVERSION=1.4.0
accept_salt_key_remote() {
systemctl restart salt-minion
@@ -193,7 +193,7 @@ check_admin_pass() {
check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH"
}
check_hive_init_then_reboot() {
check_hive_init() {
wait_for_file /opt/so/state/thehive.txt 20 5
local return_val=$?
@@ -203,7 +203,6 @@ check_hive_init_then_reboot() {
docker stop so-thehive
docker rm so-thehive
shutdown -r now
}
check_network_manager_conf() {
@@ -514,7 +513,7 @@ detect_os() {
# Install bind-utils so the host command exists
if ! command -v host > /dev/null 2>&1; then
echo "Installing required packages to run installer"
yum -y install bind-utils >> "$setup_log" 2>&1
yum -y install bind-utils yum-plugin-versionlock >> "$setup_log" 2>&1
fi
@@ -550,6 +549,9 @@ detect_os() {
disable_onion_user() {
# Disable the default account cause security.
usermod -L onion
# Remove the automated setup script from crontab, if it exists
crontab -u onion -r
}
disable_misc_network_features() {
@@ -580,7 +582,9 @@ docker_install() {
{
yum clean expire-cache;
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
yum -y install docker-ce;
yum -y install docker-ce-19.03.9-3.el7 containerd.io-1.2.6-3.el7;
yum versionlock docker-ce-19.03.9-3.el7;
yum versionlock containerd.io-1.2.6-3.el7
} >> "$setup_log" 2>&1
else
@@ -616,9 +620,10 @@ docker_registry() {
echo "Setting up Docker Registry" >> "$setup_log" 2>&1
mkdir -p /etc/docker >> "$setup_log" 2>&1
# Make the host use the master docker registry
if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi
printf '%s\n'\
"{"\
" \"registry-mirrors\": [\"https://$MSRV:5000\"]"\
" \"registry-mirrors\": [ \"$proxy:5000\" ]"\
"}" > /etc/docker/daemon.json
echo "Docker Registry Setup - Complete" >> "$setup_log" 2>&1
@@ -683,7 +688,7 @@ docker_seed_registry() {
# Tag it with the new registry destination
docker tag soshybridhunter/"$i" "$HOSTNAME":5000/soshybridhunter/"$i"
docker push "$HOSTNAME":5000/soshybridhunter/"$i"
docker rmi soshybridhunter/"$i"
#docker rmi soshybridhunter/"$i"
} >> "$setup_log" 2>&1
done
else
@@ -1068,7 +1073,7 @@ saltify() {
yum -y update exclude=salt*;
systemctl enable salt-minion;
} >> "$setup_log" 2>&1
echo "exclude=salt*" >> /etc/yum.conf
yum versionlock salt*
else
DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade >> "$setup_log" 2>&1
@@ -1227,8 +1232,6 @@ set_progress_str() {
'----'\
"$percentage% - ${progress_bar_text^^}"\
"----" >> "$setup_log" 2>&1
sleep 5
}
sensor_pillar() {
@@ -1279,6 +1282,33 @@ sensor_pillar() {
cat "$pillar_file" >> "$setup_log" 2>&1
}
set_default_log_size() {
local percentage
case $INSTALLTYPE in
EVAL | HEAVYNODE)
percentage=50
;;
*)
percentage=80
;;
esac
local disk_dir="/"
if [ -d /nsm ]; then
disk_dir="/nsm"
fi
local disk_size_1k
disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
local ratio="1048576"
local disk_size_gb
disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
}
set_hostname() {
set_hostname_iso
@@ -1443,6 +1473,23 @@ update_packages() {
fi
}
use_turbo_proxy() {
if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
echo "turbo is not supported on this install type" >> $setup_log 2>&1
return
fi
if [[ $OS == 'centos' ]]; then
printf '%s\n' "proxy=${TURBO}:3142" >> /etc/yum.conf
else
printf '%s\n'\
"Acquire {"\
" HTTP::proxy \"${TURBO}:3142\";"\
" HTTPS::proxy \"${TURBO}:3142\";"\
"}" > /etc/apt/apt.conf.d/proxy.conf
fi
}
ls_heapsize() {
if [ "$total_mem" -ge 32000 ]; then

View File

@@ -21,15 +21,74 @@ source ./so-common-functions
source ./so-whiptail
source ./so-variables
# Parse command line arguments
setup_type=$1
export setup_type
automation=$2
while [[ $# -gt 0 ]]; do
arg="$1"
shift
case "$arg" in
"--turbo="* )
export TURBO="http://${arg#*=}";;
"--proxy="* )
export {http,https,ftp,rsync,all}_proxy="${arg#*=}";;
"--allow-role="* )
export ALLOW_ROLE="${arg#*=}";;
"--allow-cidr="* )
export ALLOW_CIDR="${arg#*=}";;
"--skip-reboot" )
export SKIP_REBOOT=1;;
* )
if [[ "$arg" == "--"* ]]; then
echo "Invalid option"
fi
esac
done
# Begin Installation pre-processing
echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1
automated=no
function progress() {
if [ $automated == no ]; then
whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0
else
cat >> $setup_log 2>&1
fi
}
if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then
echo "Preselecting variable values based on automated setup: $automation" >> $setup_log 2>&1
source automation/$automation
automated=yes
echo "Checking network configuration" >> $setup_log 2>&1
ip a >> $setup_log 2>&1
attempt=1
attempts=60
ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1
while [ $? -ne 0 ]; do
ip a >> $setup_log 2>&1
if [ $attempt -gt $attempts ]; then
echo "Network unavailable - setup cannot continue" >> $setup_log 2>&1
exit 1
fi
echo "Waiting for network to come up (attempt $attempt of $attempts)" >> $setup_log 2>&1
attempt=$((attempt + 1))
sleep 10;
ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1
done
echo "Network is up on $MNIC" >> $setup_log 2>&1
fi
case "$setup_type" in
iso | network) # Accepted values
echo "Beginning Security Onion $setup_type install"
echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1
;;
*)
echo "Invalid install type, must be 'iso' or 'network'"
echo "Invalid install type, must be 'iso' or 'network'" | tee $setup_log
exit 1
;;
esac
@@ -37,9 +96,8 @@ esac
# Allow execution of SO tools during setup
export PATH=$PATH:../salt/common/tools/sbin
date -u > $setup_log 2>&1
got_root
detect_os
if [ "$OS" == ubuntu ]; then
@@ -48,10 +106,10 @@ fi
setterm -blank 0
if (whiptail_you_sure); then
if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then
true
else
echo "User cancelled setup." >> $setup_log 2>&1
echo "User cancelled setup." | tee $setup_log
whiptail_cancel
fi
@@ -134,17 +192,21 @@ echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1
minion_type=$(get_minion_type)
# Set any constants needed
# Set any variables needed
set_default_log_size >> $setup_log 2>&1
if [[ $is_helix ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
HNSENSOR=inherit
MASTERUPDATES=0
fi
if [[ $is_helix || ( $is_master && $is_node ) ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
fi
if [[ $is_master && $is_node ]]; then
LSPIPELINEWORKERS=1
LSPIPELINEBATCH=125
@@ -153,6 +215,7 @@ if [[ $is_master && $is_node ]]; then
NIDS=Suricata
BROVERSION=ZEEK
fi
if [[ $is_node ]]; then
CURCLOSEDAYS=30
fi
@@ -195,6 +258,9 @@ fi
if [[ $is_distmaster || ( $is_sensor || $is_node ) && ! $is_eval ]]; then
whiptail_master_updates
if [[ $setup_type == 'network' && $MASTERUPDATES == 1 ]]; then
whiptail_master_updates_warning
fi
fi
if [[ $is_minion ]]; then
@@ -241,6 +307,10 @@ fi
whiptail_make_changes
if [[ -n "$TURBO" ]]; then
use_turbo_proxy
fi
if [[ "$setup_type" == 'iso' ]]; then
# Init networking so rest of install works
set_hostname_iso
@@ -485,17 +555,22 @@ fi
set_progress_str 95 'Verifying setup'
salt-call -l info state.highstate >> $setup_log 2>&1
} | whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
} | progress
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
if [[ "$success" = 0 ]]; then
whiptail_setup_complete
if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then
export IP=$ALLOW_CIDR
so-allow -$ALLOW_ROLE >> $setup_log 2>&1
fi
if [[ $THEHIVE == 1 ]]; then
check_hive_init_then_reboot
else
shutdown -r now
check_hive_init
fi
else
whiptail_setup_failed
fi
if [[ -z $SKIP_REBOOT ]]; then
shutdown -r now
fi

View File

@@ -413,7 +413,6 @@ whiptail_log_size_limit() {
[ -n "$TESTING" ] && return
set_defaul_log_size
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
@@ -958,7 +957,7 @@ whiptail_setup_complete() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Finished $install_type install. Press ENTER to reboot." 8 75
whiptail --title "Security Onion Setup" --msgbox "Finished $install_type install. Press Ok to reboot." 8 75
install_cleanup >> $setup_log 2>&1
}
@@ -967,7 +966,7 @@ whiptail_setup_failed() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press ENTER to reboot." 8 75
whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press Ok to reboot." 8 75
install_cleanup >> $setup_log 2>&1
}
@@ -1027,7 +1026,17 @@ whiptail_master_updates() {
;;
esac
}
whiptail_master_updates_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup"\
--msgbox "Updating through the master node requires the master to have internet access, press ENTER to continue"\
8 75
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_node_updates() {
@@ -1048,7 +1057,7 @@ whiptail_you_sure() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "Are you sure you want to install Security Onion over the internet?" 8 75
whiptail --title "Security Onion Setup" --yesno "Are you sure you want to continue a network install of Security Onion?" 8 75
local exitstatus=$?
return $exitstatus

View File

@@ -15,6 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
cd setup
cd setup || exit
./so-setup network
./so-setup network "$@"