diff --git a/install_scripts/disable-checksum-offload.sh b/install_scripts/disable-checksum-offload.sh
index 9cc0b5d5b..32b8d46e6 100644
--- a/install_scripts/disable-checksum-offload.sh
+++ b/install_scripts/disable-checksum-offload.sh
@@ -6,4 +6,4 @@ if [ "$NM_DISPATCHER_ACTION" == "pre-up" ]; then
ethtool -K $DEVICE_IFACE $i off;
done
fi
-fi
\ No newline at end of file
+fi
diff --git a/salt/_modules/needs_restarting.py b/salt/_modules/needs_restarting.py
new file mode 100644
index 000000000..5afb6f02a
--- /dev/null
+++ b/salt/_modules/needs_restarting.py
@@ -0,0 +1,24 @@
+from os import path
+import subprocess
+
+def check():
+
+ os = __grains__['os']
+ retval = 'False'
+
+ if os == 'Ubuntu':
+ if path.exists('/var/run/reboot-required'):
+ retval = 'True'
+
+ elif os == 'CentOS':
+ cmd = 'needs-restarting -r > /dev/null 2>&1'
+
+ try:
+ needs_restarting = subprocess.check_call(cmd, shell=True)
+ except subprocess.CalledProcessError:
+ retval = 'True'
+
+ else:
+ retval = 'Unsupported OS: %s' % os
+
+ return retval
diff --git a/salt/ca/init.sls b/salt/ca/init.sls
index 27344fc7f..407516f6e 100644
--- a/salt/ca/init.sls
+++ b/salt/ca/init.sls
@@ -39,10 +39,10 @@ pki_private_key:
- require:
- file: /etc/pki
-mine.send:
+send_x509_pem_entries_to_mine:
module.run:
- - func: x509.get_pem_entries
- - kwargs:
- glob_path: /etc/pki/ca.crt
+ - mine.send:
+ - func: x509.get_pem_entries
+ - glob_path: /etc/pki/ca.crt
- onchanges:
- x509: /etc/pki/ca.crt
diff --git a/salt/common/init.sls b/salt/common/init.sls
index 1bba4c871..505289bc0 100644
--- a/salt/common/init.sls
+++ b/salt/common/init.sls
@@ -141,6 +141,8 @@ so-core:
- watch:
- file: /opt/so/conf/nginx/nginx.conf
+# If master or eval, install Grafana/Telegraf/Influx
+{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' and GRAFANA == 1 %}
# Add Telegraf to monitor all the things.
tgraflogdir:
file.directory:
@@ -213,9 +215,6 @@ so-telegraf:
- /opt/so/conf/telegraf/etc/telegraf.conf
- /opt/so/conf/telegraf/scripts
-# If its a master or eval lets install the back end for now
-{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' and GRAFANA == 1 %}
-
# Influx DB
influxconfdir:
file.directory:
@@ -316,7 +315,7 @@ grafanaconf:
- source: salt://common/grafana/etc
{% if salt['pillar.get']('mastertab', False) %}
-{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %}
dashboard-master:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json
@@ -337,7 +336,7 @@ dashboard-master:
{% endif %}
{% if salt['pillar.get']('sensorstab', False) %}
-{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/forward_nodes/{{ SN }}-Sensor.json
@@ -358,7 +357,7 @@ dashboard-{{ SN }}:
{% endif %}
{% if salt['pillar.get']('nodestab', False) %}
-{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/storage_nodes/{{ SN }}-Node.json
@@ -379,7 +378,7 @@ dashboard-{{ SN }}:
{% endif %}
{% if salt['pillar.get']('evaltab', False) %}
-{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/eval/{{ SN }}-Node.json
diff --git a/salt/common/nginx/nginx.conf.so-eval b/salt/common/nginx/nginx.conf.so-eval
index 41f455216..b5cf6ef5a 100644
--- a/salt/common/nginx/nginx.conf.so-eval
+++ b/salt/common/nginx/nginx.conf.so-eval
@@ -185,6 +185,18 @@ http {
proxy_set_header Proxy "";
}
+
+ location /cyberchef/ {
+ proxy_pass http://{{ masterip }}:9080/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_http_version 1.1; # this is essential for chunked responses to work
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/;
diff --git a/salt/common/nginx/nginx.conf.so-master b/salt/common/nginx/nginx.conf.so-master
index 964579a96..265413fa2 100644
--- a/salt/common/nginx/nginx.conf.so-master
+++ b/salt/common/nginx/nginx.conf.so-master
@@ -187,6 +187,18 @@ http {
proxy_set_header Proxy "";
}
+
+ location /cyberchef/ {
+ proxy_pass http://{{ masterip }}:9080/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_http_version 1.1; # this is essential for chunked responses to work
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/;
diff --git a/salt/cyberchef/init.sls b/salt/cyberchef/init.sls
new file mode 100644
index 000000000..202b15037
--- /dev/null
+++ b/salt/cyberchef/init.sls
@@ -0,0 +1,53 @@
+# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# Create the cyberchef group
+cyberchefgroup:
+ group.present:
+ - name: cyberchef
+ - gid: 946
+
+# Add the cyberchef user
+cyberchef:
+ user.present:
+ - uid: 946
+ - gid: 946
+ - home: /opt/so/conf/cyberchef
+
+cyberchefconfdir:
+ file.directory:
+ - name: /opt/so/conf/cyberchef
+ - user: 946
+ - group: 939
+ - makedirs: True
+
+cybercheflog:
+ file.directory:
+ - name: /opt/so/log/cyberchef
+ - user: 946
+ - group: 946
+ - makedirs: True
+
+so-cyberchefimage:
+ cmd.run:
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-cyberchef:HH1.1.3
+
+so-cyberchef:
+ docker_container.running:
+ - require:
+ - so-cyberchefimage
+ - image: docker.io/soshybridhunter/so-cyberchef:HH1.1.3
+ - port_bindings:
+ - 0.0.0.0:9080:8080
diff --git a/salt/elastalert/files/elastalert_config.yaml b/salt/elastalert/files/elastalert_config.yaml
index 6a918093b..735ccb190 100644
--- a/salt/elastalert/files/elastalert_config.yaml
+++ b/salt/elastalert/files/elastalert_config.yaml
@@ -8,6 +8,11 @@ rules_folder: /etc/elastalert/rules/
# the rules directory - true or false
scan_subdirectories: true
+# Do not disable a rule when an uncaught exception is thrown -
+# This setting should be tweaked once the following issue has been fixed
+# https://github.com/Security-Onion-Solutions/securityonion-saltstack/issues/98
+disable_rules_on_error: false
+
# How often ElastAlert will query Elasticsearch
# The unit can be anything from weeks to seconds
run_every:
diff --git a/salt/elastalert/files/rules/so/nids2hive.yaml b/salt/elastalert/files/rules/so/nids2hive.yaml
index 92de99537..019a0844f 100644
--- a/salt/elastalert/files/rules/so/nids2hive.yaml
+++ b/salt/elastalert/files/rules/so/nids2hive.yaml
@@ -15,7 +15,7 @@ timeframe:
buffer_time:
minutes: 10
allow_buffer_time_overlap: true
-query_key: alert
+query_key: ["alert", "ips"]
realert:
days: 1
@@ -36,11 +36,11 @@ hive_proxies:
hive_alert_config:
title: '{match[alert]}'
- type: 'external'
+ type: 'NIDS'
source: 'SecurityOnion'
description: "`NIDS Dashboard:` \n\n \n\n `IPs: `{match[source_ip]}:{match[source_port]} --> {match[destination_ip]}:{match[destination_port]} \n\n `Signature:` {match[rule_signature]}"
severity: 2
- tags: ['elastalert', 'SecurityOnion', 'NIDS','{match[sid]}']
+ tags: ['{match[sid]}','{match[source_ip]}','{match[destination_ip]}']
tlp: 3
status: 'New'
follow: True
diff --git a/salt/firewall/init.sls b/salt/firewall/init.sls
index c0c1e6d82..b0ff81b00 100644
--- a/salt/firewall/init.sls
+++ b/salt/firewall/init.sls
@@ -276,6 +276,18 @@ enable_master_cortex_9001_{{ip}}:
- position: 1
- save: True
+enable_master_cyberchef_9080_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: tcp
+ - source: {{ ip }}
+ - dport: 9080
+ - position: 1
+ - save: True
+
+
{% endfor %}
# Make it so all the minions can talk to salt and update etc.
diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls
index e633bef7f..917ee541e 100644
--- a/salt/fleet/init.sls
+++ b/salt/fleet/init.sls
@@ -61,13 +61,13 @@ fleetdbpriv:
so-fleetimage:
cmd.run:
- - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-fleet:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-fleet:HH1.1.3
so-fleet:
docker_container.running:
- require:
- so-fleetimage
- - image: docker.io/soshybridhunter/so-fleet:HH1.1.0
+ - image: docker.io/soshybridhunter/so-fleet:HH1.1.3
- hostname: so-fleet
- port_bindings:
- 0.0.0.0:8080:8080
@@ -83,6 +83,7 @@ so-fleet:
- KOLIDE_AUTH_JWT_KEY=thisisatest
- KOLIDE_OSQUERY_STATUS_LOG_FILE=/var/log/osquery/status.log
- KOLIDE_OSQUERY_RESULT_LOG_FILE=/var/log/osquery/result.log
+ - KOLIDE_SERVER_URL_PREFIX=/fleet
- binds:
- /etc/pki/fleet.key:/ssl/server.key:ro
- /etc/pki/fleet.crt:/ssl/server.cert:ro
diff --git a/salt/hive/thehive/etc/application.conf b/salt/hive/thehive/etc/application.conf
index 6cc72813e..3b6c89637 100644
--- a/salt/hive/thehive/etc/application.conf
+++ b/salt/hive/thehive/etc/application.conf
@@ -1,5 +1,5 @@
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
-{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %}
+{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
# Secret Key
# The secret key is used to secure cryptographic functions.
diff --git a/salt/hive/thehive/scripts/cortex_init.sh b/salt/hive/thehive/scripts/cortex_init.sh
index 5d4de730b..506b14be5 100644
--- a/salt/hive/thehive/scripts/cortex_init.sh
+++ b/salt/hive/thehive/scripts/cortex_init.sh
@@ -3,6 +3,9 @@
{%- set CORTEXUSER = salt['pillar.get']('static:cortexuser', '') %}
{%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %}
+{%- set CORTEXORGNAME = salt['pillar.get']('static:cortexorgname', '') %}
+{%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %}
+{%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
cortex_init(){
sleep 60
@@ -10,17 +13,34 @@ cortex_init(){
CORTEX_USER="{{CORTEXUSER}}"
CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
CORTEX_KEY="{{CORTEXKEY}}"
+ CORTEX_ORG_NAME="{{CORTEXORGNAME}}"
+ CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup"
+ CORTEX_ORG_USER="{{CORTEXORGUSER}}"
+ CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
+
# Migrate DB
curl -v -k -XPOST "https://$CORTEX_IP:/cortex/api/maintenance/migrate"
- # Create intial Cortex user
- curl -v -k "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"read\",\"analyze\",\"orgadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
+ # Create intial Cortex superadmin
+ curl -v -k "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
+
+ # Create user-supplied org
+ curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
+
+ # Create user-supplied org user
+ curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
# Enable URLScan.io Analyzer
- curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
-
+ curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
+
+ # Enable Cert PassiveDNS Analyzer
+ curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
+
+ # Revoke $CORTEX_USER key
+ curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" "https:///$CORTEX_IP/api/user/$CORTEX_USER/key"
+
# Update SOCtopus config with apikey value
#sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG
diff --git a/salt/logstash/conf/conf.enabled.txt.so-eval b/salt/logstash/conf/conf.enabled.txt.so-eval
index dfc3ea421..d125fc829 100644
--- a/salt/logstash/conf/conf.enabled.txt.so-eval
+++ b/salt/logstash/conf/conf.enabled.txt.so-eval
@@ -13,7 +13,7 @@
#/usr/share/logstash/pipeline.so/0002_input_windows_json.conf
#/usr/share/logstash/pipeline.so/0003_input_syslog.conf
#/usr/share/logstash/pipeline.so/0005_input_suricata.conf
-/usr/share/logstash/pipeline.dynamic/0006_input_beats.conf
+#/usr/share/logstash/pipeline.dynamic/0006_input_beats.conf
/usr/share/logstash/pipeline.so/0007_input_import.conf
/usr/share/logstash/pipeline.dynamic/0010_input_hhbeats.conf
#/usr/share/logstash/pipeline.so/1000_preprocess_log_elapsed.conf
diff --git a/salt/logstash/files/dynamic/0006_input_beats.conf b/salt/logstash/files/dynamic/0006_input_beats.conf
index 1a6b66bbe..a7140f859 100644
--- a/salt/logstash/files/dynamic/0006_input_beats.conf
+++ b/salt/logstash/files/dynamic/0006_input_beats.conf
@@ -9,23 +9,6 @@ input {
}
}
filter {
- if [type] == "ids" or [type] =~ "bro" {
- mutate {
- rename => { "host" => "beat_host" }
- remove_tag => ["beat"]
- add_field => { "sensor_name" => "%{[beat][name]}" }
- add_field => { "syslog-host_from" => "%{[beat][name]}" }
- remove_field => [ "beat", "prospector", "input", "offset" ]
- }
- }
- if [type] =~ "ossec" {
- mutate {
- rename => { "host" => "beat_host" }
- remove_tag => ["beat"]
- add_field => { "syslog-host_from" => "%{[beat][name]}" }
- remove_field => [ "beat", "prospector", "input", "offset" ]
- }
- }
if [type] == "osquery" {
mutate {
rename => { "host" => "beat_host" }
diff --git a/salt/master/files/registry/scripts/so-docker-download.sh b/salt/master/files/registry/scripts/so-docker-download.sh
new file mode 100644
index 000000000..33b5065ae
--- /dev/null
+++ b/salt/master/files/registry/scripts/so-docker-download.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+MASTER={{ MASTER }}
+VERSION="HH1.1.3"
+TRUSTED_CONTAINERS=( \
+"so-core:$VERSION" \
+"so-cyberchef:$VERSION" \
+"so-acng:$VERSION" \
+"so-sensoroni:$VERSION" \
+"so-fleet:$VERSION" \
+"so-soctopus:$VERSION" \
+"so-steno:$VERSION" \
+"so-playbook:$VERSION" \
+"so-thehive-cortex:$VERSION" \
+"so-thehive:$VERSION" \
+"so-thehive-es:$VERSION" \
+"so-wazuh:$VERSION" \
+"so-kibana:$VERSION" \
+"so-auth-ui:$VERSION" \
+"so-auth-api:$VERSION" \
+"so-elastalert:$VERSION" \
+"so-navigator:$VERSION" \
+"so-filebeat:$VERSION" \
+"so-suricata:$VERSION" \
+"so-logstash:$VERSION" \
+"so-bro:$VERSION" \
+"so-idstools:$VERSION" \
+"so-fleet-launcher:$VERSION" \
+"so-freqserver:$VERSION" \
+"so-influxdb:$VERSION" \
+"so-grafana:$VERSION" \
+"so-telegraf:$VERSION" \
+"so-redis:$VERSION" \
+"so-mysql:$VERSION" \
+"so-curtor:$VERSION" \
+"so-elasticsearch:$VERSION" \
+"so-domainstats:$VERSION" \
+"so-tcpreplay:$VERSION" \
+)
+
+for i in "${TRUSTED_CONTAINERS[@]}"
+do
+ # Pull down the trusted docker image
+ docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
+ # Tag it with the new registry destination
+ docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
+ docker push $MASTER:5000/soshybridhunter/$i
+done
diff --git a/salt/master/init.sls b/salt/master/init.sls
index 1a7efe744..c6e11279d 100644
--- a/salt/master/init.sls
+++ b/salt/master/init.sls
@@ -17,6 +17,15 @@
{% if masterproxy == 1 %}
+socore_own_saltstack:
+ file.directory:
+ - name: /opt/so/saltstack
+ - user: socore
+ - group: socore
+ - recurse:
+ - user
+ - group
+
# Create the directories for apt-cacher-ng
aptcacherconfdir:
file.directory:
diff --git a/salt/motd/files/package_update_reboot_required.jinja b/salt/motd/files/package_update_reboot_required.jinja
new file mode 100644
index 000000000..3a1fd1e9e
--- /dev/null
+++ b/salt/motd/files/package_update_reboot_required.jinja
@@ -0,0 +1,23 @@
+{% set needs_restarting_check = salt['mine.get']('*', 'needs_restarting.check', tgt_type='glob') -%}
+
+{%- if needs_restarting_check %}
+ {%- set minions_need_restarted = [] %}
+
+ {%- for minion, need_restarted in needs_restarting_check | dictsort() %}
+ {%- if need_restarted == 'True' %}
+ {% do minions_need_restarted.append(minion) %}
+ {%- endif %}
+ {%- endfor -%}
+
+ {%- if minions_need_restarted | length > 0 %}
+*****************************************************************************************
+* The following nodes in your Security Onion grid need restarted due to package updates *
+*****************************************************************************************
+
+ {% for minion in minions_need_restarted -%}
+ {{ minion }}
+ {% endfor -%}
+
+ {%- endif -%}
+
+{%- endif -%}
diff --git a/salt/motd/init.sls b/salt/motd/init.sls
new file mode 100644
index 000000000..4dae979bf
--- /dev/null
+++ b/salt/motd/init.sls
@@ -0,0 +1,5 @@
+package_update_reboot_required_motd:
+ file.managed:
+ - name: /etc/motd
+ - source: salt://motd/files/package_update_reboot_required.jinja
+ - template: jinja
diff --git a/salt/patch/needs_restarting.sls b/salt/patch/needs_restarting.sls
new file mode 100644
index 000000000..f60909d22
--- /dev/null
+++ b/salt/patch/needs_restarting.sls
@@ -0,0 +1,5 @@
+needs_restarting:
+ module.run:
+ - mine.send:
+ - func: needs_restarting.check
+ - order: last
diff --git a/salt/patch/os/init.sls b/salt/patch/os/init.sls
new file mode 100644
index 000000000..7f2adc65b
--- /dev/null
+++ b/salt/patch/os/init.sls
@@ -0,0 +1,10 @@
+include:
+ - patch.needs_restarting
+{% if grains.os == "CentOS" %}
+ - yum.packages
+{% endif %}
+
+patch_os:
+ pkg.uptodate:
+ - name: patch_os
+ - refresh: True
diff --git a/salt/patch/os/schedule.sls b/salt/patch/os/schedule.sls
new file mode 100644
index 000000000..a91e61dfe
--- /dev/null
+++ b/salt/patch/os/schedule.sls
@@ -0,0 +1,76 @@
+{% if salt['pillar.get']('patch:os:schedule_name') %}
+ {% set patch_os_pillar = salt['pillar.get']('patch:os') %}
+ {% set schedule_name = patch_os_pillar.schedule_name %}
+ {% set splay = patch_os_pillar.get('splay', 300) %}
+
+ {% if schedule_name != 'manual' and schedule_name != 'auto' %}
+ {% import_yaml "patch/os/schedules/"~schedule_name~".yml" as os_schedule %}
+
+ {% if patch_os_pillar.enabled %}
+
+patch_os_schedule:
+ schedule.present:
+ - function: state.sls
+ - job_args:
+ - patch.os
+ - when:
+ {% for days in os_schedule.patch.os.schedule %}
+ {% for day, times in days.items() %}
+ {% for time in times %}
+ - {{day}} {{time}}
+ {% endfor %}
+ {% endfor %}
+ {% endfor %}
+ - splay: {{splay}}
+ - return_job: True
+
+ {% else %}
+
+disable_patch_os_schedule:
+ schedule.disabled:
+ - name: patch_os_schedule
+
+ {% endif %}
+
+
+ {% elif schedule_name == 'auto' %}
+
+ {% if patch_os_pillar.enabled %}
+
+patch_os_schedule:
+ schedule.present:
+ - function: state.sls
+ - job_args:
+ - patch.os
+ - hours: 8
+ - splay: {{splay}}
+ - return_job: True
+
+ {% else %}
+
+disable_patch_os_schedule:
+ schedule.disabled:
+ - name: patch_os_schedule
+
+ {% endif %}
+
+ {% elif schedule_name == 'manual' %}
+
+remove_patch_os_schedule:
+ schedule.absent:
+ - name: patch_os_schedule
+
+ {% endif %}
+
+{% else %}
+
+no_patch_os_schedule_name_set:
+ test.fail_without_changes:
+ - name: "Set a pillar value for patch:os:schedule_name in this minion's .sls file. If an OS patch schedule is not listed as enabled in show_schedule output below, then OS patches will need to be applied manually until this is corrected."
+
+show_patch_os_schedule:
+ module.run:
+ - schedule.is_enabled:
+ - name: patch_os_schedule
+
+{% endif %}
diff --git a/salt/patch/os/schedules/example_schedule.yml b/salt/patch/os/schedules/example_schedule.yml
new file mode 100644
index 000000000..b2748ab09
--- /dev/null
+++ b/salt/patch/os/schedules/example_schedule.yml
@@ -0,0 +1,10 @@
+patch:
+ os:
+ schedule:
+ - Tuesday:
+ - '15:00'
+ - Thursday:
+ - '03:00'
+ - Saturday:
+ - '01:00'
+ - '15:00'
diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls
index ed23cf308..a49dc00e3 100644
--- a/salt/pcap/init.sls
+++ b/salt/pcap/init.sls
@@ -96,13 +96,13 @@ stenolog:
so-stenoimage:
cmd.run:
- - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-steno:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-steno:HH1.1.3
so-steno:
docker_container.running:
- require:
- so-stenoimage
- - image: docker.io/soshybridhunter/so-steno:HH1.1.1
+ - image: docker.io/soshybridhunter/so-steno:HH1.1.3
- network_mode: host
- privileged: True
- port_bindings:
diff --git a/salt/playbook/files/redmine.db b/salt/playbook/files/redmine.db
index fdf24eae4..7d84b5856 100644
Binary files a/salt/playbook/files/redmine.db and b/salt/playbook/files/redmine.db differ
diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls
index ef66966f3..bc22b60d4 100644
--- a/salt/playbook/init.sls
+++ b/salt/playbook/init.sls
@@ -11,9 +11,9 @@ playbookdb:
playbookwebhook:
module.run:
- - name: sqlite3.modify
- - db: /opt/so/conf/playbook/redmine.db
- - sql: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
+ - sqlite3.modify:
+ - db: /opt/so/conf/playbook/redmine.db
+ - sql: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
navigatorconfig:
file.managed:
@@ -26,13 +26,13 @@ navigatorconfig:
so-playbookimage:
cmd.run:
- - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-playbook:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-playbook:HH1.1.3
so-playbook:
docker_container.running:
- require:
- so-playbookimage
- - image: docker.io/soshybridhunter/so-playbook:HH1.1.1
+ - image: docker.io/soshybridhunter/so-playbook:HH1.1.3
- hostname: playbook
- name: so-playbook
- binds:
diff --git a/salt/sensoroni/init.sls b/salt/sensoroni/init.sls
index 245c34344..19fcd8b4a 100644
--- a/salt/sensoroni/init.sls
+++ b/salt/sensoroni/init.sls
@@ -29,19 +29,19 @@ sensoronisync:
so-sensoroniimage:
cmd.run:
- - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-sensoroni:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-sensoroni:HH1.1.3
so-sensoroni:
docker_container.running:
- require:
- so-sensoroniimage
- - image: docker.io/soshybridhunter/so-sensoroni:HH1.1.1
+ - image: docker.io/soshybridhunter/so-sensoroni:HH1.1.3
- hostname: sensoroni
- name: so-sensoroni
- binds:
- /nsm/sensoroni/jobs:/opt/sensoroni/jobs:rw
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
- - /opt/so/log/sensoroni/:/opt/sensoroni/log/:rw
+ - /opt/so/log/sensoroni/:/opt/sensoroni/logs/:rw
- port_bindings:
- 0.0.0.0:9822:9822
- watch:
diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf
index f1d311602..dd32507ef 100644
--- a/salt/soctopus/files/SOCtopus.conf
+++ b/salt/soctopus/files/SOCtopus.conf
@@ -50,4 +50,4 @@ playbook_url = http://{{ip}}:3200/playbook
playbook_key = a4a34538782804adfcb8dfae96262514ad70c37c
[log]
-logfile = /tmp/soctopus.log
+logfile = /var/log/SOCtopus/soctopus.log
diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template
index 992db3fa9..e278afa2c 100644
--- a/salt/soctopus/files/templates/generic.template
+++ b/salt/soctopus/files/templates/generic.template
@@ -1,23 +1,6 @@
{% set es = salt['pillar.get']('static:masterip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
-es_host: {{es}}
-es_port: 9200
-name: Alert-Name
-type: frequency
-index: "*:logstash-*"
-num_events: 1
-timeframe:
- minutes: 10
-buffer_time:
- minutes: 10
-allow_buffer_time_overlap: true
-
-filter:
-- query:
- query_string:
- query: 'select from test'
-
alert: modules.so.thehive.TheHiveAlerter
hive_connection:
@@ -30,11 +13,11 @@ hive_proxies:
hive_alert_config:
title: '{rule[name]}'
- type: 'external'
+ type: 'playbook'
source: 'SecurityOnion'
- description: '`Data:` {match[message]}'
+ description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` \n\n `Raw Data:` {match[message]}"
severity: 2
- tags: ['elastalert', 'SecurityOnion']
+ tags: ['playbook']
tlp: 3
status: 'New'
follow: True
diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template
index 1e85a3182..5f1c6961a 100644
--- a/salt/soctopus/files/templates/osquery.template
+++ b/salt/soctopus/files/templates/osquery.template
@@ -1,23 +1,6 @@
{% set es = salt['pillar.get']('static:masterip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
-es_host: {{es}}
-es_port: 9200
-name: Alert-Name
-type: frequency
-index: "*:logstash-*"
-num_events: 1
-timeframe:
- minutes: 10
-buffer_time:
- minutes: 10
-allow_buffer_time_overlap: true
-
-filter:
-- query:
- query_string:
- query: 'select from test'
-
alert: modules.so.thehive.TheHiveAlerter
hive_connection:
@@ -28,20 +11,22 @@ hive_proxies:
http: ''
https: ''
-hive_alert_config:
- title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}'
- type: 'external'
- source: 'SecurityOnion'
- description: '`Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}'
- severity: 2
- tags: ['elastalert', 'SecurityOnion']
- tlp: 3
- status: 'New'
- follow: True
- caseTemplate: '5000'
-
hive_observable_data_mapping:
- ip: '{match[osquery][EndpointIP1]}'
- ip: '{match[osquery][EndpointIP2]}'
- other: '{match[osquery][hostIdentifier]}'
- other: '{match[osquery][hostname]}'
+
+hive_alert_config:
+ title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}'
+ type: 'osquery'
+ source: 'SecurityOnion'
+ description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` \n\n `Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}"
+ severity: 2
+ tags: ['playbook','osquery']
+ tlp: 3
+ status: 'New'
+ follow: True
+ caseTemplate: '5000'
+
+
diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls
index ebfbe3224..578789a76 100644
--- a/salt/soctopus/init.sls
+++ b/salt/soctopus/init.sls
@@ -13,6 +13,12 @@ soctopussync:
- group: 939
- template: jinja
+soctopuslogdir:
+ file.directory:
+ - name: /opt/so/log/soctopus
+ - user: 939
+ - group: 939
+
playbookrulesdir:
file.directory:
- name: /opt/so/rules/elastalert/playbook
@@ -40,17 +46,18 @@ navigatordefaultlayer:
so-soctopusimage:
cmd.run:
- - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-soctopus:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-soctopus:HH1.1.3
so-soctopus:
docker_container.running:
- require:
- so-soctopusimage
- - image: docker.io/soshybridhunter/so-soctopus:HH1.1.1
+ - image: docker.io/soshybridhunter/so-soctopus:HH1.1.3
- hostname: soctopus
- name: so-soctopus
- binds:
- /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro
+ - /opt/so/log/soctopus/:/var/log/SOCtopus/:rw
- /opt/so/rules/elastalert/playbook:/etc/playbook-rules:rw
- /opt/so/conf/playbook/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
- port_bindings:
diff --git a/salt/top.sls b/salt/top.sls
index cf5d47699..a2662a89b 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -5,6 +5,11 @@
{%- set THEHIVE = salt['pillar.get']('master:thehive', '0') -%}
{%- set PLAYBOOK = salt['pillar.get']('master:playbook', '0') -%}
base:
+ '*':
+ - patch.os.schedule
+ - patch.needs_restarting
+ - motd
+
'G@role:so-sensor':
- ca
- ssl
@@ -40,6 +45,7 @@ base:
- suricata
- bro
- curator
+ - cyberchef
- elastalert
{%- if OSQUERY != 0 %}
- fleet
@@ -66,6 +72,7 @@ base:
- ca
- ssl
- common
+ - cyberchef
- sensoroni
- firewall
- master
diff --git a/salt/utility/bin/crossthestreams.sh b/salt/utility/bin/crossthestreams.sh
index b9c8f6c1d..3cd8b005c 100644
--- a/salt/utility/bin/crossthestreams.sh
+++ b/salt/utility/bin/crossthestreams.sh
@@ -31,6 +31,6 @@ echo "Applying cross cluster search config..."
# Add all the storage nodes to cross cluster searching.
-{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}'
{%- endfor %}
diff --git a/salt/yum/packages.sls b/salt/yum/packages.sls
new file mode 100644
index 000000000..4c773d0e9
--- /dev/null
+++ b/salt/yum/packages.sls
@@ -0,0 +1,3 @@
+install_yum_utils:
+ pkg.installed:
+ - name: yum-utils
diff --git a/setup/functions.sh b/setup/functions.sh
new file mode 100644
index 000000000..184750d61
--- /dev/null
+++ b/setup/functions.sh
@@ -0,0 +1,1125 @@
+# Functions
+
+accept_salt_key_local() {
+ echo "Accept the key locally on the master" >> $SETUPLOG 2>&1
+ # Accept the key locally on the master
+ salt-key -ya $MINION_ID
+
+}
+
+accept_salt_key_remote() {
+ echo "Accept the key remotely on the master" >> $SETUPLOG 2>&1
+ # Delete the key just in case.
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo salt-key -d $MINION_ID -y
+ salt-call state.apply ca
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo salt-key -a $MINION_ID -y
+
+}
+
+add_master_hostfile() {
+ echo "Checking if I can resolve master. If not add to hosts file" >> $SETUPLOG 2>&1
+ # Pop up an input to get the IP address
+ MSRVIP=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your Master Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
+
+}
+
+add_socore_user_master() {
+
+ echo "Add socore on the master" >>~/sosetup.log 2>&1
+ # Add user "socore" to the master. This will be for things like accepting keys.
+ if [ $OS == 'centos' ]; then
+ local ADDUSER=adduser
+ else
+ local ADDUSER=useradd
+ fi
+ groupadd --gid 939 socore
+ $ADDUSER --uid 939 --gid 939 --home-dir /opt/so socore
+ # Set the password for socore that we got during setup
+ echo socore:$COREPASS1 | chpasswd --crypt-method=SHA512
+
+}
+
+add_socore_user_notmaster() {
+ echo "Add socore user on non master" >> $SETUPLOG 2>&1
+ # Add socore user to the non master system. Probably not a bad idea to make system user
+ groupadd --gid 939 socore
+ $ADDUSER --uid 939 --gid 939 --home-dir /opt/so --no-create-home socore
+
+}
+
+# Create an auth pillar so that passwords survive re-install
+auth_pillar(){
+
+ if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
+ echo "Creating Auth Pillar" >> $SETUPLOG 2>&1
+ mkdir -p /opt/so/saltstack/pillar
+ echo "auth:" >> /opt/so/saltstack/pillar/auth.sls
+ echo " mysql: $MYSQLPASS" >> /opt/so/saltstack/pillar/auth.sls
+ echo " fleet: $FLEETPASS" >> /opt/so/saltstack/pillar/auth.sls
+ fi
+
+}
+
+# Enable Bro Logs
+bro_logs_enabled() {
+ echo "Enabling Bro Logs" >> $SETUPLOG 2>&1
+
+ echo "brologs:" > pillar/brologs.sls
+ echo " enabled:" >> pillar/brologs.sls
+
+ if [ $MASTERADV == 'ADVANCED' ]; then
+ for BLOG in ${BLOGS[@]}; do
+ echo " - $BLOG" | tr -d '"' >> pillar/brologs.sls
+ done
+ else
+ echo " - conn" >> pillar/brologs.sls
+ echo " - dce_rpc" >> pillar/brologs.sls
+ echo " - dhcp" >> pillar/brologs.sls
+ echo " - dhcpv6" >> pillar/brologs.sls
+ echo " - dnp3" >> pillar/brologs.sls
+ echo " - dns" >> pillar/brologs.sls
+ echo " - dpd" >> pillar/brologs.sls
+ echo " - files" >> pillar/brologs.sls
+ echo " - ftp" >> pillar/brologs.sls
+ echo " - http" >> pillar/brologs.sls
+ echo " - intel" >> pillar/brologs.sls
+ echo " - irc" >> pillar/brologs.sls
+ echo " - kerberos" >> pillar/brologs.sls
+ echo " - modbus" >> pillar/brologs.sls
+ echo " - mqtt" >> pillar/brologs.sls
+ echo " - notice" >> pillar/brologs.sls
+ echo " - ntlm" >> pillar/brologs.sls
+ echo " - openvpn" >> pillar/brologs.sls
+ echo " - pe" >> pillar/brologs.sls
+ echo " - radius" >> pillar/brologs.sls
+ echo " - rfb" >> pillar/brologs.sls
+ echo " - rdp" >> pillar/brologs.sls
+ echo " - signatures" >> pillar/brologs.sls
+ echo " - sip" >> pillar/brologs.sls
+ echo " - smb_files" >> pillar/brologs.sls
+ echo " - smb_mapping" >> pillar/brologs.sls
+ echo " - smtp" >> pillar/brologs.sls
+ echo " - snmp" >> pillar/brologs.sls
+ echo " - software" >> pillar/brologs.sls
+ echo " - ssh" >> pillar/brologs.sls
+ echo " - ssl" >> pillar/brologs.sls
+ echo " - syslog" >> pillar/brologs.sls
+ echo " - telnet" >> pillar/brologs.sls
+ echo " - tunnel" >> pillar/brologs.sls
+ echo " - weird" >> pillar/brologs.sls
+ echo " - mysql" >> pillar/brologs.sls
+ echo " - socks" >> pillar/brologs.sls
+ echo " - x509" >> pillar/brologs.sls
+ fi
+}
+
+calculate_useable_cores() {
+
+ # Calculate reasonable core usage
+ local CORES4BRO=$(( $CPUCORES/2 - 1 ))
+ LBPROCSROUND=$(printf "%.0f\n" $CORES4BRO)
+ # We don't want it to be 0
+ if [ "$LBPROCSROUND" -lt 1 ]; then
+ LBPROCS=1
+ else
+ LBPROCS=$LBPROCSROUND
+ fi
+
+}
+
+checkin_at_boot() {
+ echo "Enabling checkin at boot" >> $SETUPLOG 2>&1
+ echo "startup_states: highstate" >> /etc/salt/minion
+}
+
+check_hive_init_then_reboot() {
+ WAIT_STEP=0
+ MAX_WAIT=100
+ until [ -f /opt/so/state/thehive.txt ] ; do
+ WAIT_STEP=$(( ${WAIT_STEP} + 1 ))
+ echo "Waiting on the_hive to init...Attempt #$WAIT_STEP"
+ if [ ${WAIT_STEP} -gt ${MAX_WAIT} ]; then
+ echo "ERROR: We waited ${MAX_WAIT} seconds but the_hive is not working."
+ exit 5
+ fi
+ sleep 1s;
+ done
+ docker stop so-thehive
+ docker rm so-thehive
+ shutdown -r now
+}
+
+check_socore_pass() {
+
+ if [ $COREPASS1 == $COREPASS2 ]; then
+ SCMATCH=yes
+ else
+ whiptail_passwords_dont_match
+ fi
+
+}
+
+chown_salt_master() {
+
+ echo "Chown the salt dirs on the master for socore" >> $SETUPLOG 2>&1
+ chown -R socore:socore /opt/so
+
+}
+
+clear_master() {
+ # Clear out the old master public key in case this is a re-install.
+ # This only happens if you re-install the master.
+ if [ -f /etc/salt/pki/minion/minion_master.pub ]; then
+ echo "Clearing old master key" >> $SETUPLOG 2>&1
+ rm /etc/salt/pki/minion/minion_master.pub
+ service salt-minion restart
+ fi
+
+}
+
+configure_minion() {
+
+ # You have to pass the TYPE to this function so it knows if its a master or not
+ local TYPE=$1
+ echo "Configuring minion type as $TYPE" >> $SETUPLOG 2>&1
+ touch /etc/salt/grains
+ echo "role: so-$TYPE" > /etc/salt/grains
+ if [ $TYPE == 'master' ] || [ $TYPE == 'eval' ]; then
+ echo "master: $HOSTNAME" > /etc/salt/minion
+ echo "id: $MINION_ID" >> /etc/salt/minion
+ echo "mysql.host: '$MAINIP'" >> /etc/salt/minion
+ echo "mysql.port: 3306" >> /etc/salt/minion
+ echo "mysql.user: 'root'" >> /etc/salt/minion
+ if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
+ echo "mysql.pass: '$MYSQLPASS'" >> /etc/salt/minion
+ else
+ OLDPASS=$(cat /opt/so/saltstack/pillar/auth.sls | grep mysql | awk {'print $2'})
+ echo "mysql.pass: '$OLDPASS'" >> /etc/salt/minion
+ fi
+ else
+ echo "master: $MSRV" > /etc/salt/minion
+ echo "id: $MINION_ID" >> /etc/salt/minion
+
+ fi
+
+ echo "use_superseded:" >> /etc/salt/minion
+ echo " - module.run" >> /etc/salt/minion
+
+ service salt-minion restart
+
+}
+
+copy_master_config() {
+
+ # Copy the master config template to the proper directory
+ cp files/master /etc/salt/master
+ # Restart the service so it picks up the changes -TODO Enable service on CentOS
+ service salt-master restart
+
+}
+
+copy_minion_tmp_files() {
+
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ echo "rsyncing all files in $TMP to /opt/so/saltstack" >> $SETUPLOG 2>&1
+ rsync -a -v $TMP/ /opt/so/saltstack/ >> $SETUPLOG 2>&1
+ else
+ echo "scp all files in $TMP to master /opt/so/saltstack" >> $SETUPLOG 2>&1
+ scp -prv -i /root/.ssh/so.key $TMP/* socore@$MSRV:/opt/so/saltstack >> $SETUPLOG 2>&1
+ fi
+
+ }
+
+copy_ssh_key() {
+
+ # Generate SSH key
+ mkdir -p /root/.ssh
+ cat /dev/zero | ssh-keygen -f /root/.ssh/so.key -t rsa -q -N ""
+ chown -R $SUDO_USER:$SUDO_USER /root/.ssh
+ #Copy the key over to the master
+ ssh-copy-id -f -i /root/.ssh/so.key socore@$MSRV
+
+}
+
+network_setup() {
+ echo "Setting up Bond" >> $SETUPLOG 2>&1
+
+ # Set the MTU
+ if [ "$NSMSETUP" != 'ADVANCED' ]; then
+ MTU=1500
+ fi
+
+ # Create the bond interface
+ nmcli con add ifname bond0 con-name "bond0" type bond mode 0 -- \
+ ipv4.method disabled \
+ ipv6.method link-local \
+ ethernet.mtu $MTU \
+ connection.autoconnect "yes" >> $SETUPLOG 2>&1
+
+ for BNIC in ${BNICS[@]}; do
+ # Strip the quotes from the NIC names
+ BONDNIC="$(echo -e "${BNIC}" | tr -d '"')"
+ # Turn off various offloading settings for the interface
+ for i in rx tx sg tso ufo gso gro lro; do
+ ethtool -K $BONDNIC $i off >> $SETUPLOG 2>&1
+ done
+ # Create the slave interface and assign it to the bond
+ nmcli con add type ethernet ifname $BONDNIC con-name "bond0-slave-$BONDNIC" master bond0 -- \
+ ethernet.mtu $MTU \
+ connection.autoconnect "yes" >> $SETUPLOG 2>&1
+ # Bring the slave interface up
+ nmcli con up bond0-slave-$BONDNIC >> $SETUPLOG 2>&1
+ done
+ # Replace the variable string in the network script
+ sed -i "s/\$MAININT/${MAININT}/g" ./install_scripts/disable-checksum-offload.sh >> $SETUPLOG 2>&1
+ # Copy the checksum offload script to prevent issues with packet capture
+ cp ./install_scripts/disable-checksum-offload.sh /etc/NetworkManager/dispatcher.d/disable-checksum-offload.sh >> $SETUPLOG 2>&1
+}
+
+detect_os() {
+
+ # Detect Base OS
+ echo "Detecting Base OS" >> $SETUPLOG 2>&1
+ if [ -f /etc/redhat-release ]; then
+ OS=centos
+ yum -y install bind-utils
+ elif [ -f /etc/os-release ]; then
+ OS=ubuntu
+ apt install -y network-manager
+ /bin/systemctl enable network-manager
+ /bin/systemctl start network-manager
+ else
+ echo "We were unable to determine if you are using a supported OS." >> $SETUPLOG 2>&1
+ exit
+ fi
+
+}
+
+docker_install() {
+
+ if [ $OS == 'centos' ]; then
+ yum clean expire-cache
+ yum -y install yum-utils device-mapper-persistent-data lvm2 openssl
+ yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+ yum -y update
+ yum -y install docker-ce python36-docker
+ if [ $INSTALLTYPE != 'EVALMODE' ]; then
+ docker_registry
+ fi
+ echo "Restarting Docker" >> $SETUPLOG 2>&1
+ systemctl restart docker
+ systemctl enable docker
+
+ else
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ apt-get update >> $SETUPLOG 2>&1
+ apt-get -y install docker-ce >> $SETUPLOG 2>&1
+ if [ $INSTALLTYPE != 'EVALMODE' ]; then
+ docker_registry >> $SETUPLOG 2>&1
+ fi
+ echo "Restarting Docker" >> $SETUPLOG 2>&1
+ systemctl restart docker >> $SETUPLOG 2>&1
+ else
+ apt-key add $TMP/gpg/docker.pub >> $SETUPLOG 2>&1
+ add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" >> $SETUPLOG 2>&1
+ apt-get update >> $SETUPLOG 2>&1
+ apt-get -y install docker-ce >> $SETUPLOG 2>&1
+ docker_registry >> $SETUPLOG 2>&1
+ echo "Restarting Docker" >> $SETUPLOG 2>&1
+ systemctl restart docker >> $SETUPLOG 2>&1
+ fi
+ echo "Using pip3 to install docker-py for salt"
+ pip3 install docker
+ fi
+
+}
+
+docker_registry() {
+
+ echo "Setting up Docker Registry" >> $SETUPLOG 2>&1
+ mkdir -p /etc/docker >> $SETUPLOG 2>&1
+ # Make the host use the master docker registry
+ echo "{" > /etc/docker/daemon.json
+ echo " \"registry-mirrors\": [\"https://$MSRV:5000\"]" >> /etc/docker/daemon.json
+ echo "}" >> /etc/docker/daemon.json
+ echo "Docker Registry Setup - Complete" >> $SETUPLOG 2>&1
+
+}
+
+es_heapsize() {
+
+ # Determine ES Heap Size
+ if [ $TOTAL_MEM -lt 8000 ] ; then
+ ES_HEAP_SIZE="600m"
+ elif [ $TOTAL_MEM -ge 100000 ]; then
+ # Set a max of 25GB for heap size
+ # https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
+ ES_HEAP_SIZE="25000m"
+ else
+ # Set heap size to 25% of available memory
+ ES_HEAP_SIZE=$(($TOTAL_MEM / 4))"m"
+ fi
+
+}
+
+eval_mode_hostsfile() {
+
+ echo "127.0.0.1 $HOSTNAME" >> /etc/hosts
+
+}
+
+filter_nics() {
+
+ # Filter the NICs that we don't want to see in setup
+ FNICS=$(ip link | grep -vw $MNIC | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
+
+}
+
+generate_passwords(){
+ # Generate Random Passwords for Things
+ MYSQLPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ FLEETPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ HIVEKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ CORTEXKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ SENSORONIKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+}
+
+get_filesystem_nsm(){
+ FSNSM=$(df /nsm | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
+}
+
+get_log_size_limit() {
+
+ DISK_DIR="/"
+ if [ -d /nsm ]; then
+ DISK_DIR="/nsm"
+ fi
+ DISK_SIZE_K=`df $DISK_DIR |grep -v "^Filesystem" | awk '{print $2}'`
+ PERCENTAGE=85
+ DISK_SIZE=DISK_SIZE_K*1000
+ PERCENTAGE_DISK_SPACE=`echo $(($DISK_SIZE*$PERCENTAGE/100))`
+ LOG_SIZE_LIMIT=$(($PERCENTAGE_DISK_SPACE/1000000000))
+
+}
+
+get_filesystem_root(){
+ FSROOT=$(df / | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
+}
+
+get_main_ip() {
+
+ # Get the main IP address the box is using
+ MAINIP=$(ip route get 1 | awk '{print $NF;exit}')
+ MAININT=$(ip route get 1 | awk '{print $5;exit}')
+
+}
+
+got_root() {
+
+ # Make sure you are root
+ if [ "$(id -u)" -ne 0 ]; then
+ echo "This script must be run using sudo!"
+ exit 1
+ fi
+
+}
+
+install_cleanup() {
+
+ # Clean up after ourselves
+ rm -rf /root/installtmp
+
+}
+
+install_python3() {
+
+ echo "Installing Python3"
+
+ if [ $OS == 'ubuntu' ]; then
+ apt-get -y install python3-pip gcc python3-dev
+ elif [ $OS == 'centos' ]; then
+ yum -y install epel-release python3
+ fi
+
+}
+
+install_prep() {
+
+ # Create a tmp space that isn't in /tmp
+ mkdir /root/installtmp
+ TMP=/root/installtmp
+
+}
+
+install_master() {
+
+ # Install the salt master package
+ if [ $OS == 'centos' ]; then
+ #yum -y install wget salt-common salt-master python36-mysql python36-dateutil python36-m2crypto >> $SETUPLOG 2>&1
+ echo ""
+ # Create a place for the keys for Ubuntu minions
+ #mkdir -p /opt/so/gpg
+ #wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub
+ #wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ #wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
+
+ else
+ apt-get install -y salt-common=2019.2.2+ds-1 salt-master=2019.2.2+ds-1 salt-minion=2019.2.2+ds-1
+ apt-mark hold salt-common salt-master salt-minion
+ echo -e "XXX\n11\nInstalling libssl-dev for M2Crypto... \nXXX"
+ apt-get -y install libssl-dev
+ echo -e "XXX\n12\nUsing pip3 to install M2Crypto for Salt... \nXXX"
+ pip3 install M2Crypto
+
+ fi
+
+ copy_master_config
+
+}
+
+ls_heapsize() {
+
+ # Determine LS Heap Size
+ if [ $TOTAL_MEM -ge 32000 ] ; then
+ LS_HEAP_SIZE="1000m"
+ else
+ # If minimal RAM, then set minimal heap
+ LS_HEAP_SIZE="500m"
+ fi
+
+}
+
+master_pillar() {
+
+ # Create the master pillar
+ touch /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo "master:" > /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " mainip: $MAINIP" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " mainint: $MAININT" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " esheap: $ES_HEAP_SIZE" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " esclustername: {{ grains.host }}" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ if [ $INSTALLTYPE == 'EVALMODE' ]; then
+ echo " freq: 0" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " domainstats: 0" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " ls_pipeline_batch_size: 125" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " ls_input_threads: 1" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " ls_batch_count: 125" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " mtu: 1500" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+
+ else
+ echo " freq: 0" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " domainstats: 0" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ fi
+ echo " lsheap: $LS_HEAP_SIZE" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " lsaccessip: 127.0.0.1" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " elastalert: 1" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " ls_pipeline_workers: $CPUCORES" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " nids_rules: $RULESETUP" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " oinkcode: $OINKCODE" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ #echo " access_key: $ACCESS_KEY" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ #echo " access_secret: $ACCESS_SECRET" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " es_port: $NODE_ES_PORT" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " log_size_limit: $LOG_SIZE_LIMIT" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " cur_close_days: $CURCLOSEDAYS" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ #echo " mysqlpass: $MYSQLPASS" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ #echo " fleetpass: $FLEETPASS" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " grafana: $GRAFANA" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " osquery: $OSQUERY" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " wazuh: $WAZUH" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " thehive: $THEHIVE" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " playbook: $PLAYBOOK" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ }
+
+master_static() {
+
+ # Create a static file for global values
+ touch /opt/so/saltstack/pillar/static.sls
+
+ echo "static:" > /opt/so/saltstack/pillar/static.sls
+ echo " hnmaster: $HNMASTER" >> /opt/so/saltstack/pillar/static.sls
+ echo " ntpserver: $NTPSERVER" >> /opt/so/saltstack/pillar/static.sls
+ echo " proxy: $PROXY" >> /opt/so/saltstack/pillar/static.sls
+ echo " broversion: $BROVERSION" >> /opt/so/saltstack/pillar/static.sls
+ echo " ids: $NIDS" >> /opt/so/saltstack/pillar/static.sls
+ echo " masterip: $MAINIP" >> /opt/so/saltstack/pillar/static.sls
+ echo " hiveuser: hiveadmin" >> /opt/so/saltstack/pillar/static.sls
+ echo " hivepassword: hivechangeme" >> /opt/so/saltstack/pillar/static.sls
+ echo " hivekey: $HIVEKEY" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexuser: cortexadmin" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexpassword: cortexchangeme" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexkey: $CORTEXKEY" >> /opt/so/saltstack/pillar/static.sls
+ echo " fleetsetup: 0" >> /opt/so/saltstack/pillar/static.sls
+ echo " sensoronikey: $SENSORONIKEY" >> /opt/so/saltstack/pillar/static.sls
+ if [[ $MASTERUPDATES == 'MASTER' ]]; then
+ echo " masterupdate: 1" >> /opt/so/saltstack/pillar/static.sls
+ else
+ echo " masterupdate: 0" >> /opt/so/saltstack/pillar/static.sls
+ fi
+}
+
+minio_generate_keys() {
+
+ local charSet="[:graph:]"
+
+ ACCESS_KEY=$(cat /dev/urandom | tr -cd "$charSet" | tr -d \' | tr -d \" | head -c 20)
+ ACCESS_SECRET=$(cat /dev/urandom | tr -cd "$charSet" | tr -d \' | tr -d \" | head -c 40)
+
+}
+
+node_pillar() {
+
+ NODEPILLARPATH=$TMP/pillar/nodes
+ if [ ! -d $NODEPILLARPATH ]; then
+ mkdir -p $NODEPILLARPATH
+ fi
+
+ # Create the node pillar
+ touch $NODEPILLARPATH/$MINION_ID.sls
+ echo "node:" > $NODEPILLARPATH/$MINION_ID.sls
+ echo " mainip: $MAINIP" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " mainint: $MAININT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " esheap: $NODE_ES_HEAP_SIZE" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " esclustername: {{ grains.host }}" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " lsheap: $NODE_LS_HEAP_SIZE" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_pipeline_workers: $LSPIPELINEWORKERS" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_pipeline_batch_size: $LSPIPELINEBATCH" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_input_threads: $LSINPUTTHREADS" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_batch_count: $LSINPUTBATCHCOUNT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " es_shard_count: $SHARDCOUNT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " node_type: $NODETYPE" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " es_port: $NODE_ES_PORT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " log_size_limit: $LOG_SIZE_LIMIT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " cur_close_days: $CURCLOSEDAYS" >> $NODEPILLARPATH/$MINION_ID.sls
+
+}
+
+patch_pillar() {
+
+ case $INSTALLTYPE in
+ MASTERONLY | EVALMODE)
+ PATCHPILLARPATH=/opt/so/saltstack/pillar/masters
+ ;;
+ SENSORONLY)
+ PATCHPILLARPATH=$SENSORPILLARPATH
+ ;;
+ STORAGENODE | PARSINGNODE | HOTNODE | WARMNODE)
+ PATCHPILLARPATH=$NODEPILLARPATH
+ ;;
+ esac
+
+
+ echo "" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo "patch:" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " os:" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " schedule_name: $PATCHSCHEDULENAME" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " enabled: True" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " splay: 300" >> $PATCHPILLARPATH/$MINION_ID.sls
+
+
+}
+
+patch_schedule_os_new() {
+ OSPATCHSCHEDULEDIR="$TMP/salt/patch/os/schedules"
+ OSPATCHSCHEDULE="$OSPATCHSCHEDULEDIR/$PATCHSCHEDULENAME.yml"
+
+ if [ ! -d $OSPATCHSCHEDULEDIR ] ; then
+ mkdir -p $OSPATCHSCHEDULEDIR
+ fi
+
+ echo "patch:" > $OSPATCHSCHEDULE
+ echo " os:" >> $OSPATCHSCHEDULE
+ echo " schedule:" >> $OSPATCHSCHEDULE
+ for psd in "${PATCHSCHEDULEDAYS[@]}"
+ do
+ psd=$(echo $psd | sed 's/"//g')
+ echo " - $psd:" >> $OSPATCHSCHEDULE
+ for psh in "${PATCHSCHEDULEHOURS[@]}"
+ do
+ psh=$(echo $psh | sed 's/"//g')
+ echo " - '$psh'" >> $OSPATCHSCHEDULE
+ done
+ done
+
+}
+
+process_components() {
+ CLEAN=${COMPONENTS//\"}
+ GRAFANA=0
+ OSQUERY=0
+ WAZUH=0
+ THEHIVE=0
+ PLAYBOOK=0
+
+ IFS=$' '
+ for item in $(echo "$CLEAN"); do
+ let $item=1
+ done
+ unset IFS
+}
+
+saltify() {
+
+ # Install updates and Salt
+ if [ $OS == 'centos' ]; then
+ ADDUSER=adduser
+
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ yum -y install wget https://repo.saltstack.com/py3/redhat/salt-py3-repo-latest-2.el7.noarch.rpm
+ cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2019-2.repo
+ sed -i 's/latest/2019.2/g' /etc/yum.repos.d/salt-2019-2.repo
+ # Download Ubuntu Keys in case master updates = 1
+ mkdir -p /opt/so/gpg
+ wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub
+ wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
+ cat > /etc/yum.repos.d/wazuh.repo <<\EOF
+[wazuh_repo]
+gpgcheck=1
+gpgkey=https://packages.wazuh.com/key/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://packages.wazuh.com/3.x/yum/
+protect=1
+EOF
+
+ else
+
+ if [ $MASTERUPDATES == 'MASTER' ]; then
+
+ # Create the GPG Public Key for the Salt Repo
+ echo "-----BEGIN PGP PUBLIC KEY BLOCK-----" > /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "Version: GnuPG v2.0.22 (GNU/Linux)" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "mQENBFOpvpgBCADkP656H41i8fpplEEB8IeLhugyC2rTEwwSclb8tQNYtUiGdna9" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "m38kb0OS2DDrEdtdQb2hWCnswxaAkUunb2qq18vd3dBvlnI+C4/xu5ksZZkRj+fW" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "tArNR18V+2jkwcG26m8AxIrT+m4M6/bgnSfHTBtT5adNfVcTHqiT1JtCbQcXmwVw" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "WbqS6v/LhcsBE//SHne4uBCK/GHxZHhQ5jz5h+3vWeV4gvxS3Xu6v1IlIpLDwUts" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "kT1DumfynYnnZmWTGc6SYyIFXTPJLtnoWDb9OBdWgZxXfHEcBsKGha+bXO+m2tHA" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "gNneN9i5f8oNxo5njrL8jkCckOpNpng18BKXABEBAAG0MlNhbHRTdGFjayBQYWNr" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "YWdpbmcgVGVhbSA8cGFja2FnaW5nQHNhbHRzdGFjay5jb20+iQE4BBMBAgAiBQJT" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "qb6YAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAOCKFJ3le/vhkqB/0Q" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "WzELZf4d87WApzolLG+zpsJKtt/ueXL1W1KA7JILhXB1uyvVORt8uA9FjmE083o1" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "yE66wCya7V8hjNn2lkLXboOUd1UTErlRg1GYbIt++VPscTxHxwpjDGxDB1/fiX2o" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "nK5SEpuj4IeIPJVE/uLNAwZyfX8DArLVJ5h8lknwiHlQLGlnOu9ulEAejwAKt9CU" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "4oYTszYM4xrbtjB/fR+mPnYh2fBoQO4d/NQiejIEyd9IEEMd/03AJQBuMux62tjA" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "/NwvQ9eqNgLw9NisFNHRWtP4jhAOsshv1WW+zPzu3ozoO+lLHixUIz7fqRk38q8Q" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "9oNR31KvrkSNrFbA3D89uQENBFOpvpgBCADJ79iH10AfAfpTBEQwa6vzUI3Eltqb" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "9aZ0xbZV8V/8pnuU7rqM7Z+nJgldibFk4gFG2bHCG1C5aEH/FmcOMvTKDhJSFQUx" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "uhgxttMArXm2c22OSy1hpsnVG68G32Nag/QFEJ++3hNnbyGZpHnPiYgej3FrerQJ" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "zv456wIsxRDMvJ1NZQB3twoCqwapC6FJE2hukSdWB5yCYpWlZJXBKzlYz/gwD/Fr" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "GL578WrLhKw3UvnJmlpqQaDKwmV2s7MsoZogC6wkHE92kGPG2GmoRD3ALjmCvN1E" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "PsIsQGnwpcXsRpYVCoW7e2nW4wUf7IkFZ94yOCmUq6WreWI4NggRcFC5ABEBAAGJ" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "AR8EGAECAAkFAlOpvpgCGwwACgkQDgihSd5Xv74/NggA08kEdBkiWWwJZUZEy7cK" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "WWcgjnRuOHd4rPeT+vQbOWGu6x4bxuVf9aTiYkf7ZjVF2lPn97EXOEGFWPZeZbH4" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "vdRFH9jMtP+rrLt6+3c9j0M8SIJYwBL1+CNpEC/BuHj/Ra/cmnG5ZNhYebm76h5f" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "T9iPW9fFww36FzFka4VPlvA4oB7ebBtquFg3sdQNU/MmTVV4jPFWXxh4oRDDR+8N" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "1bcPnbB11b5ary99F/mqr7RgQ+YFF0uKRE3SKa7a+6cIuHEZ7Za+zhPaQlzAOZlx" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "fuBmScum8uQTrEF5+Um5zkwC7EXTdH1co/+/V/fpOtxIg4XO4kcugZefVm5ERfVS" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "MA==" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "=dtMN" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "-----END PGP PUBLIC KEY BLOCK-----" >> /etc/pki/rpm-gpg/saltstack-signing-key
+
+ # Add the Wazuh Key
+ cat > /etc/pki/rpm-gpg/GPG-KEY-WAZUH <<\EOF
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQINBFeeyYwBEACyf4VwV8c2++J5BmCl6ofLCtSIW3UoVrF4F+P19k/0ngnSfjWb
+8pSWB11HjZ3Mr4YQeiD7yY06UZkrCXk+KXDlUjMK3VOY7oNPkqzNaP6+8bDwj4UA
+hADMkaXBvWooGizhCoBtDb1bSbHKcAnQ3PTdiuaqF5bcyKk8hv939CHulL2xH+BP
+mmTBi+PM83pwvR+VRTOT7QSzf29lW1jD79v4rtXHJs4KCz/amT/nUm/tBpv3q0sT
+9M9rH7MTQPdqvzMl122JcZST75GzFJFl0XdSHd5PAh2mV8qYak5NYNnwA41UQVIa
++xqhSu44liSeZWUfRdhrQ/Nb01KV8lLAs11Sz787xkdF4ad25V/Rtg/s4UXt35K3
+klGOBwDnzPgHK/OK2PescI5Ve1z4x1C2bkGze+gk/3IcfGJwKZDfKzTtqkZ0MgpN
+7RGghjkH4wpFmuswFFZRyV+s7jXYpxAesElDSmPJ0O07O4lQXQMROE+a2OCcm0eF
+3+Cr6qxGtOp1oYMOVH0vOLYTpwOkAM12/qm7/fYuVPBQtVpTojjV5GDl2uGq7p0o
+h9hyWnLeNRbAha0px6rXcF9wLwU5n7mH75mq5clps3sP1q1/VtP/Fr84Lm7OGke4
+9eD+tPNCdRx78RNWzhkdQxHk/b22LCn1v6p1Q0qBco9vw6eawEkz1qwAjQARAQAB
+tDFXYXp1aC5jb20gKFdhenVoIFNpZ25pbmcgS2V5KSA8c3VwcG9ydEB3YXp1aC5j
+b20+iQI9BBMBCAAnBQJXnsmMAhsDBQkFo5qABQsJCAcDBRUKCQgLBRYCAwEAAh4B
+AheAAAoJEJaz7l8pERFFHEsQAIaslejcW2NgjgOZuvn1Bht4JFMbCIPOekg4Z5yF
+binRz0wmA7JNaawDHTBYa6L+A2Xneu/LmuRjFRMesqopUukVeGQgHBXbGMzY46eI
+rqq/xgvgWzHSbWweiOX0nn+exbEAM5IyW+efkWNz0e8xM1LcxdYZxkVOqFqkp3Wv
+J9QUKw6z9ifUOx++G8UO307O3hT2f+x4MUoGZeOF4q1fNy/VyBS2lMg2HF7GWy2y
+kjbSe0p2VOFGEZLuu2f5tpPNth9UJiTliZKmgSk/zbKYmSjiVY2eDqNJ4qjuqes0
+vhpUaBjA+DgkEWUrUVXG5yfQDzTiYIF84LknjSJBYSLZ4ABsMjNO+GApiFPcih+B
+Xc9Kx7E9RNsNTDqvx40y+xmxDOzVIssXeKqwO8r5IdG3K7dkt2Vkc/7oHOpcKwE5
+8uASMPiqqMo+t1RVa6Spckp3Zz8REILbotnnVwDIwo2HmgASirMGUcttEJzubaIa
+Mv43GKs8RUH9s5NenC02lfZG7D8WQCz5ZH7yEWrt5bCaQRNDXjhsYE17SZ/ToHi3
+OpWu050ECWOHdxlXNG3dOWIdFDdBJM7UfUNSSOe2Y5RLsWfwvMFGbfpdlgJcMSDV
+X+ienkrtXhBteTu0dwPu6HZTFOjSftvtAo0VIqGQrKMvKelkkdNGdDFLQw2mUDcw
+EQj6uQINBFeeyYwBEADD1Y3zW5OrnYZ6ghTd5PXDAMB8Z1ienmnb2IUzLM+i0yE2
+TpKSP/XYCTBhFa390rYgFO2lbLDVsiz7Txd94nHrdWXGEQfwrbxsvdlLLWk7iN8l
+Fb4B60OfRi3yoR96a/kIPNa0x26+n79LtDuWZ/DTq5JSHztdd9F1sr3h8i5zYmtv
+luj99ZorpwYejbBVUm0+gP0ioaXM37uO56UFVQk3po9GaS+GtLnlgoE5volgNYyO
+rkeIua4uZVsifREkHCKoLJip6P7S3kTyfrpiSLhouEZ7kV1lbMbFgvHXyjm+/AIx
+HIBy+H+e+HNt5gZzTKUJsuBjx44+4jYsOR67EjOdtPOpgiuJXhedzShEO6rbu/O4
+wM1rX45ZXDYa2FGblHCQ/VaS0ttFtztk91xwlWvjTR8vGvp5tIfCi+1GixPRQpbN
+Y/oq8Kv4A7vB3JlJscJCljvRgaX0gTBzlaF6Gq0FdcWEl5F1zvsWCSc/Fv5WrUPY
+5mG0m69YUTeVO6cZS1aiu9Qh3QAT/7NbUuGXIaAxKnu+kkjLSz+nTTlOyvbG7BVF
+a6sDmv48Wqicebkc/rCtO4g8lO7KoA2xC/K/6PAxDrLkVyw8WPsAendmezNfHU+V
+32pvWoQoQqu8ysoaEYc/j9fN4H3mEBCN3QUJYCugmHP0pu7VtpWwwMUqcGeUVwAR
+AQABiQIlBBgBCAAPBQJXnsmMAhsMBQkFo5qAAAoJEJaz7l8pERFFz8IP/jfBxJSB
+iOw+uML+C4aeYxuHSdxmSsrJclYjkw7Asha/fm4Kkve00YAW8TGxwH2kgS72ooNJ
+1Q7hUxNbVyrJjQDSMkRKwghmrPnUM3UyHmE0dq+G2NhaPdFo8rKifLOPgwaWAfSV
+wgMTK86o0kqRbGpXgVIG5eRwv2FcxM3xGfy7sub07J2VEz7Ba6rYQ3NTbPK42AtV
++wRJDXcgS7y6ios4XQtSbIB5f6GI56zVlwfRd3hovV9ZAIJQ6DKM31wD6Kt/pRun
+DjwMZu0/82JMoqmxX/00sNdDT1S13guCfl1WhBu7y1ja9MUX5OpUzyEKg5sxme+L
+iY2Rhs6CjmbTm8ER4Uj8ydKyVTy8zbumbB6T8IwCAbEMtPxm6pKh/tgLpoJ+Bj0y
+AsGjmhV7R6PKZSDXg7/qQI98iC6DtWc9ibC/QuHLcvm3hz40mBgXAemPJygpxGst
+mVtU7O3oHw9cIUpkbMuVqSxgPFmSSq5vEYkka1CYeg8bOz6aCTuO5J0GDlLrpjtx
+6lyImbZAF/8zKnW19aq5lshT2qJlTQlZRwwDZX5rONhA6T8IEUnUyD4rAIQFwfJ+
+gsXa4ojD/tA9NLdiNeyEcNfyX3FZwXWCtVLXflzdRN293FKamcdnMjVRjkCnp7iu
+7eO7nMgcRoWddeU+2aJFqCoQtKCp/5EKhFey
+=UIVm
+-----END PGP PUBLIC KEY BLOCK-----
+EOF
+
+ # Proxy is hating on me.. Lets just set it manually
+ echo "[salt-latest]" > /etc/yum.repos.d/salt-latest.repo
+ echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-latest.repo
+ echo "baseurl=https://repo.saltstack.com/py3/redhat/7/\$basearch/latest" >> /etc/yum.repos.d/salt-latest.repo
+ echo "failovermethod=priority" >> /etc/yum.repos.d/salt-latest.repo
+ echo "enabled=1" >> /etc/yum.repos.d/salt-latest.repo
+ echo "gpgcheck=1" >> /etc/yum.repos.d/salt-latest.repo
+ echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-latest.repo
+
+ # Proxy is hating on me.. Lets just set it manually
+ echo "[salt-2019.2]" > /etc/yum.repos.d/salt-2019-2.repo
+ echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "baseurl=https://repo.saltstack.com/py3/redhat/7/\$basearch/2019.2" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "failovermethod=priority" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "enabled=1" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "gpgcheck=1" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-2019-2.repo
+
+ cat > /etc/yum.repos.d/wazuh.repo <<\EOF
+[wazuh_repo]
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://packages.wazuh.com/3.x/yum/
+protect=1
+EOF
+ else
+ yum -y install https://repo.saltstack.com/py3/redhat/salt-py3-repo-latest-2.el7.noarch.rpm
+ cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2019-2.repo
+ sed -i 's/latest/2019.2/g' /etc/yum.repos.d/salt-2019-2.repo
+cat > /etc/yum.repos.d/wazuh.repo <<\EOF
+[wazuh_repo]
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://packages.wazuh.com/3.x/yum/
+protect=1
+EOF
+ fi
+ fi
+
+ yum clean expire-cache
+ yum -y install epel-release salt-minion-2019.2.2 yum-utils device-mapper-persistent-data lvm2 openssl
+ yum -y update exclude=salt*
+ systemctl enable salt-minion
+
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ yum -y install salt-master-2019.2.2 python3 python36-m2crypto salt-minion-2019.2.2 python36-dateutil python36-mysql python36-docker
+ systemctl enable salt-master
+ else
+ yum -y install salt-minion-2019.2.2 python3 python36-m2crypto python36-dateutil python36-docker
+ fi
+ echo "exclude=salt*" >> /etc/yum.conf
+
+ else
+ ADDUSER=useradd
+ DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade
+
+ # Add the pre-requisites for installing docker-ce
+ apt-get -y install ca-certificates curl software-properties-common apt-transport-https openssl >> $SETUPLOG 2>&1
+
+ # Grab the version from the os-release file
+ UVER=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
+
+ # Nasty hack but required for now
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+
+ #echo "Using pip3 to install python-dateutil for salt"
+ #pip3 install python-dateutil
+ # Install the repo for salt
+ wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+ wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2019.2/SALTSTACK-GPG-KEY.pub | apt-key add -
+ echo "deb http://repo.saltstack.com/py3/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
+ echo "deb http://repo.saltstack.com/py3/ubuntu/$UVER/amd64/2019.2 xenial main" > /etc/apt/sources.list.d/saltstack2019.list
+
+ # Lets get the docker repo added
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+
+ # Create a place for the keys
+ mkdir -p /opt/so/gpg
+ wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest/SALTSTACK-GPG-KEY.pub
+ wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
+
+ # Get key and install wazuh
+ curl -s https://packages.wazuh.com/key/GPG-KEY-WAZUH | apt-key add -
+ # Add repo
+ echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list
+
+ # Initialize the new repos
+ apt-get update >> $SETUPLOG 2>&1
+ # Need to add python packages here
+ apt-get -y install salt-minion=2019.2.2+ds-1 salt-common=2019.2.2+ds-1 python3-dateutil >> $SETUPLOG 2>&1
+ apt-mark hold salt-minion salt-common
+
+ else
+
+ # Copy down the gpg keys and install them from the master
+ mkdir $TMP/gpg
+ scp socore@$MSRV:/opt/so/gpg/* $TMP/gpg
+ apt-key add $TMP/gpg/SALTSTACK-GPG-KEY.pub
+ apt-key add $TMP/gpg/GPG-KEY-WAZUH
+ echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
+ echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list
+ # Initialize the new repos
+ apt-get update >> $SETUPLOG 2>&1
+ # Need to add python dateutil here
+ apt-get -y install salt-minion=2019.2.2+ds-1 salt-common=2019.2.2+ds-1 >> $SETUPLOG 2>&1
+ apt-mark hold salt-minion salt-common
+
+ fi
+
+ fi
+
+}
+
+salt_checkin() {
+ # Master State to Fix Mine Usage
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ echo "Building Certificate Authority"
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ echo " *** Restarting Salt to fix any SSL errors. ***"
+ service salt-master restart >> $SETUPLOG 2>&1
+ sleep 5
+ service salt-minion restart >> $SETUPLOG 2>&1
+ sleep 15
+ echo " Applyng a mine hack "
+ sudo salt '*' mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt >> $SETUPLOG 2>&1
+ echo " Applying SSL state "
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo "Still Working... Hang in there"
+ #salt-call state.highstate
+
+ else
+
+ # Run Checkin
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ #salt-call state.highstate >> $SETUPLOG 2>&1
+
+ fi
+
+}
+
+salt_checkin_message() {
+
+ # Warn the user that this might take a while
+ echo "####################################################"
+ echo "## ##"
+ echo "## Applying and Installing everything ##"
+ echo "## (This will take a while) ##"
+ echo "## ##"
+ echo "####################################################"
+
+}
+
+salt_firstcheckin() {
+
+ #First Checkin
+ salt-call state.highstate >> $SETUPLOG 2>&1
+
+}
+
+salt_master_directories() {
+
+ # Create salt paster directories
+ mkdir -p /opt/so/saltstack/salt
+ mkdir -p /opt/so/saltstack/pillar
+
+ # Copy over the salt code and templates
+ cp -R pillar/* /opt/so/saltstack/pillar/
+ chmod +x /opt/so/saltstack/pillar/firewall/addfirewall.sh
+ chmod +x /opt/so/saltstack/pillar/data/addtotab.sh
+ cp -R salt/* /opt/so/saltstack/salt/
+
+}
+
+salt_install_mysql_deps() {
+
+ if [ $OS == 'centos' ]; then
+ yum -y install mariadb-devel
+ elif [ $OS == 'ubuntu' ]; then
+ apt-get -y install libmysqlclient-dev python3-mysqldb
+ fi
+
+}
+
+sensor_pillar() {
+
+ SENSORPILLARPATH=$TMP/pillar/sensors
+ if [ ! -d $SENSORPILLARPATH ]; then
+ mkdir -p $SENSORPILLARPATH
+ fi
+
+ # Create the sensor pillar
+ touch $SENSORPILLARPATH/$MINION_ID.sls
+ echo "sensor:" > $SENSORPILLARPATH/$MINION_ID.sls
+ echo " interface: bond0" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " mainip: $MAINIP" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " mainint: $MAININT" >> $SENSORPILLARPATH/$MINION_ID.sls
+ if [ $NSMSETUP == 'ADVANCED' ]; then
+ echo " bro_pins:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ for PIN in $BROPINS; do
+ PIN=$(echo $PIN | cut -d\" -f2)
+ echo " - $PIN" >> $SENSORPILLARPATH/$MINION_ID.sls
+ done
+ echo " suripins:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ for SPIN in $SURIPINS; do
+ SPIN=$(echo $SPIN | cut -d\" -f2)
+ echo " - $SPIN" >> $SENSORPILLARPATH/$MINION_ID.sls
+ done
+ else
+ echo " bro_lbprocs: $BASICBRO" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " suriprocs: $BASICSURI" >> $SENSORPILLARPATH/$MINION_ID.sls
+ fi
+ echo " brobpf:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " pcapbpf:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " nidsbpf:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " master: $MSRV" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " mtu: $MTU" >> $SENSORPILLARPATH/$MINION_ID.sls
+ if [ $HNSENSOR != 'inherit' ]; then
+ echo " hnsensor: $HNSENSOR" >> $SENSORPILLARPATH/$MINION_ID.sls
+ fi
+ echo " access_key: $ACCESS_KEY" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " access_secret: $ACCESS_SECRET" >> $SENSORPILLARPATH/$MINION_ID.sls
+
+}
+
+set_environment_var() {
+
+ echo "Setting environment variable: $1"
+
+ export "$1"
+ echo "$1" >> /etc/environment
+
+}
+
+set_hostname() {
+
+ hostnamectl set-hostname --static $HOSTNAME
+ echo "127.0.0.1 $HOSTNAME $HOSTNAME.localdomain localhost localhost.localdomain localhost4 localhost4.localdomain" > /etc/hosts
+ echo "::1 localhost localhost.localdomain localhost6 localhost6.localdomain6" >> /etc/hosts
+ echo $HOSTNAME > /etc/hostname
+ if [ $INSTALLTYPE != 'MASTERONLY' ] || [ $INSTALLTYPE != 'EVALMODE' ]; then
+ if [[ $TESTHOST = *"not found"* ]] || [[ $TESTHOST = *"connection timed out"* ]]; then
+ if ! grep -q $MSRVIP /etc/hosts; then
+ echo "$MSRVIP $MSRV" >> /etc/hosts
+ fi
+ fi
+ fi
+
+}
+
+set_initial_firewall_policy() {
+
+ get_main_ip
+ if [ $INSTALLTYPE == 'MASTERONLY' ]; then
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/minions.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
+ /opt/so/saltstack/pillar/data/addtotab.sh mastertab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
+ fi
+
+ if [ $INSTALLTYPE == 'EVALMODE' ]; then
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/minions.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/forward_nodes.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/storage_nodes.sls
+ /opt/so/saltstack/pillar/data/addtotab.sh evaltab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
+ fi
+
+ if [ $INSTALLTYPE == 'SENSORONLY' ]; then
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes $MAINIP
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
+ fi
+
+ if [ $INSTALLTYPE == 'STORAGENODE' ]; then
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh storage_nodes $MAINIP
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
+ fi
+
+ if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
+ echo "blah"
+ fi
+
+ if [ $INSTALLTYPE == 'HOTNODE' ]; then
+ echo "blah"
+ fi
+
+ if [ $INSTALLTYPE == 'WARMNODE' ]; then
+ echo "blah"
+ fi
+
+}
+
+set_node_type() {
+
+ # Determine the node type based on whiplash choice
+ if [ $INSTALLTYPE == 'STORAGENODE' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ NODETYPE='storage'
+ fi
+ if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
+ NODETYPE='parser'
+ fi
+ if [ $INSTALLTYPE == 'HOTNODE' ]; then
+ NODETYPE='hot'
+ fi
+ if [ $INSTALLTYPE == 'WARMNODE' ]; then
+ NODETYPE='warm'
+ fi
+
+}
+
+set_updates() {
+ echo "MASTERUPDATES is $MASTERUPDATES"
+ if [ $MASTERUPDATES == 'MASTER' ]; then
+ if [ $OS == 'centos' ]; then
+ if ! grep -q $MSRV /etc/yum.conf; then
+ echo "proxy=http://$MSRV:3142" >> /etc/yum.conf
+ fi
+
+ else
+
+ # Set it up so the updates roll through the master
+ echo "Acquire::http::Proxy \"http://$MSRV:3142\";" > /etc/apt/apt.conf.d/00Proxy
+ echo "Acquire::https::Proxy \"http://$MSRV:3142\";" >> /etc/apt/apt.conf.d/00Proxy
+
+ fi
+ fi
+}
+
+update_sudoers() {
+
+ if ! grep -qE '^socore\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then
+ # Update Sudoers so that socore can accept keys without a password
+ echo "socore ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | sudo tee -a /etc/sudoers
+ echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | sudo tee -a /etc/sudoers
+ echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/data/addtotab.sh" | sudo tee -a /etc/sudoers
+ else
+ echo "User socore already granted sudo privileges"
+ fi
+
+}
diff --git a/setup/so-setup.sh b/setup/so-setup.sh
new file mode 100644
index 000000000..837cf6d6e
--- /dev/null
+++ b/setup/so-setup.sh
@@ -0,0 +1,627 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# Source the other pieces of the setup
+source functions.sh
+source whiplash.sh
+
+# Global Variables
+HOSTNAME=$(cat /etc/hostname)
+MINION_ID=$(echo $HOSTNAME | awk -F. {'print $1'})
+TOTAL_MEM=`grep MemTotal /proc/meminfo | awk '{print $2}' | sed -r 's/.{3}$//'`
+NICS=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
+CPUCORES=$(cat /proc/cpuinfo | grep processor | wc -l)
+LISTCORES=$(cat /proc/cpuinfo | grep processor | awk '{print $3 " \"" "core" "\""}')
+RANDOMUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1)
+NODE_ES_PORT="9200"
+SETUPLOG="/root/sosetup.log"
+# End Global Variables
+
+# Reset the Install Log
+date -u >$SETUPLOG 2>&1
+
+# Check for prerequisites
+got_root
+detect_os
+
+if [ $OS == ubuntu ]; then
+ # Override the Ubuntu whiptail color pallete
+ update-alternatives --set newt-palette /etc/newt/palette.original
+fi
+
+# Question Time
+if (whiptail_you_sure); then
+
+ # Create a temp dir to get started
+ install_prep
+
+ # Determine if this is a network install or ISO install
+
+ # Let folks know they need their management interface already set up.
+ whiptail_network_notice
+
+ # Set the hostname to reduce errors
+ whiptail_set_hostname
+
+ # Go ahead and gen the keys so we can use them for any sensor type - Disabled for now
+ #minio_generate_keys
+
+ # What kind of install are we doing?
+ whiptail_install_type
+
+ # How do we want to handle OS patching? manual, auto or scheduled days and hours
+ whiptail_patch_schedule
+ case $PATCHSCHEDULE in
+ 'New Schedule')
+ whiptail_patch_schedule_select_days
+ whiptail_patch_schedule_select_hours
+ whiptail_patch_name_new_schedule
+ patch_schedule_os_new
+ ;;
+ 'Import Schedule')
+ whiptail_patch_schedule_import
+ ;;
+ Automatic)
+ PATCHSCHEDULENAME=auto
+ ;;
+ Manual)
+ PATCHSCHEDULENAME=manual
+ ;;
+ esac
+
+ ####################
+ ## Master ##
+ ####################
+
+ if [ $INSTALLTYPE == 'MASTERONLY' ]; then
+
+ # Would you like to do an advanced install?
+ whiptail_master_adv
+
+ # Pick the Management NIC
+ whiptail_management_nic
+
+ # Choose Zeek or Community NSM
+ whiptail_bro_version
+
+ # Select Snort or Suricata
+ whiptail_nids
+
+ # Snag the HOME_NET
+ whiptail_homenet_master
+
+ # Pick your Ruleset
+ whiptail_rule_setup
+
+ # Get the code if it isn't ET Open
+ if [ $RULESETUP != 'ETOPEN' ]; then
+ # Get the code
+ whiptail_oinkcode
+ fi
+
+ # Find out how to handle updates
+ whiptail_master_updates
+ whiptail_enable_components
+ process_components
+
+ # Do Advacned Setup if they chose it
+ if [ $MASTERADV == 'ADVANCED' ]; then
+ # Ask which bro logs to enable - Need to add Suricata check
+ if [ $BROVERSION != 'SURICATA' ]; then
+ whiptail_master_adv_service_brologs
+ fi
+ fi
+
+ whiptail_create_socore_user
+ SCMATCH=no
+ while [ $SCMATCH != yes ]; do
+ whiptail_create_socore_user_password1
+ whiptail_create_socore_user_password2
+ check_socore_pass
+ done
+
+ # Last Chance to back out
+ whiptail_make_changes
+ set_hostname
+ generate_passwords
+ auth_pillar
+ clear_master
+ mkdir -p /nsm
+ get_filesystem_root
+ get_filesystem_nsm
+ # Enable Bro Logs
+ bro_logs_enabled
+
+ # Figure out the main IP address
+ get_main_ip
+
+ # Add the user so we can sit back and relax
+ #echo ""
+ #echo "**** Please set a password for socore. You will use this password when setting up other Nodes/Sensors"
+ #echo ""
+ add_socore_user_master
+
+ # Install salt and dependencies
+ {
+ sleep 0.5
+ #install_pip3 >> $SETUPLOG 2>&1
+ echo -e "XXX\n1\nInstalling and configuring Salt... \nXXX"
+ echo " ** Installing Salt and Dependencies **" >> $SETUPLOG
+ salt_install_mysql_deps >> $SETUPLOG 2>&1
+ saltify >> $SETUPLOG 2>&1
+ echo -e "XXX\n5\nInstalling Docker... \nXXX"
+ docker_install >> $SETUPLOG 2>&1
+ echo -e "XXX\n10\nConfiguring Salt Master... \nXXX"
+ echo " ** Configuring Minion **" >> $SETUPLOG
+ configure_minion master >> $SETUPLOG 2>&1
+ echo " ** Installing Salt Master **" >> $SETUPLOG
+ install_master >> $SETUPLOG 2>&1
+ salt_master_directories >> $SETUPLOG 2>&1
+ update_sudoers >> $SETUPLOG 2>&1
+ chown_salt_master >> $SETUPLOG 2>&1
+ es_heapsize >> $SETUPLOG 2>&1
+ ls_heapsize >> $SETUPLOG 2>&1
+ echo -e "XXX\n25\nConfiguring Default Pillars... \nXXX"
+ master_static >> $SETUPLOG 2>&1
+ echo "** Generating the master pillar **" >> $SETUPLOG
+ master_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n30\nAccepting Salt Keys... \nXXX"
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
+ # Do a checkin to push the key up
+ echo "** Pushing the key up to Master **" >> $SETUPLOG
+ salt_firstcheckin >> $SETUPLOG 2>&1
+ # Accept the Master Key
+ echo "** Accepting the key on the master **" >> $SETUPLOG
+ accept_salt_key_local >> $SETUPLOG 2>&1
+ echo -e "XXX\n35\nConfiguring Firewall... \nXXX"
+ # Open the firewall
+ echo "** Setting the initial firewall policy **" >> $SETUPLOG
+ set_initial_firewall_policy >> $SETUPLOG 2>&1
+ # Do the big checkin but first let them know it will take a bit.
+ echo -e "XXX\n40\nGenerating CA... \nXXX"
+ salt_checkin >> $SETUPLOG 2>&1
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo -e "XXX\n43\nInstalling Common Components... \nXXX"
+ salt-call state.apply common >> $SETUPLOG 2>&1
+ echo -e "XXX\n45\nApplying firewall rules... \nXXX"
+ salt-call state.apply firewall >> $SETUPLOG 2>&1
+ salt-call state.apply master >> $SETUPLOG 2>&1
+ salt-call state.apply idstools >> $SETUPLOG 2>&1
+ echo -e "XXX\n40\nInstalling Redis... \nXXX"
+ salt-call state.apply redis >> $SETUPLOG 2>&1
+ if [[ $OSQUERY == '1' ]]; then
+ echo -e "XXX\n41\nInstalling MySQL... \nXXX"
+ salt-call state.apply mysql >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n45\nInstalling Elastic Components... \nXXX"
+ salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
+ salt-call state.apply logstash >> $SETUPLOG 2>&1
+ salt-call state.apply kibana >> $SETUPLOG 2>&1
+ salt-call state.apply elastalert >> $SETUPLOG 2>&1
+ if [[ $WAZUH == '1' ]]; then
+ echo -e "XXX\n68\nInstalling Wazuh... \nXXX"
+ salt-call state.apply wazuh >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n75\nInstalling Filebeat... \nXXX"
+ salt-call state.apply filebeat >> $SETUPLOG 2>&1
+ salt-call state.apply utility >> $SETUPLOG 2>&1
+ salt-call state.apply schedule >> $SETUPLOG 2>&1
+ if [[ $OSQUERY == '1' ]]; then
+ echo -e "XXX\n79\nInstalling Fleet... \nXXX"
+ salt-call state.apply fleet >> $SETUPLOG 2>&1
+ salt-call state.apply launcher >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n85\nConfiguring SOctopus... \nXXX"
+ salt-call state.apply soctopus >> $SETUPLOG 2>&1
+ if [[ $THEHIVE == '1' ]]; then
+ echo -e "XXX\n87\nInstalling TheHive... \nXXX"
+ salt-call state.apply hive >> $SETUPLOG 2>&1
+ fi
+ if [[ $PLAYBOOK == '1' ]]; then
+ echo -e "XXX\n89\nInstalling Playbook... \nXXX"
+ salt-call state.apply playbook >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n75\nEnabling Checking at Boot... \nXXX"
+ checkin_at_boot >> $SETUPLOG 2>&1
+ echo -e "XXX\n95\nVerifying Install... \nXXX"
+ salt-call state.highstate >> $SETUPLOG 2>&1
+
+ } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+ GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
+ if [[ $GOODSETUP == '0' ]]; then
+ whiptail_setup_complete
+ if [[ $THEHIVE == '1' ]]; then
+ check_hive_init_then_reboot
+ else
+ shutdown -r now
+ fi
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+
+ fi
+
+ ####################
+ ## Sensor ##
+ ####################
+
+ if [ $INSTALLTYPE == 'SENSORONLY' ]; then
+ whiptail_management_nic
+ filter_nics
+ whiptail_bond_nics
+ whiptail_management_server
+ whiptail_master_updates
+ set_updates
+ whiptail_homenet_sensor
+ whiptail_sensor_config
+ # Calculate lbprocs so we can call it in the prompts
+ calculate_useable_cores
+ if [ $NSMSETUP == 'ADVANCED' ]; then
+ whiptail_bro_pins
+ whiptail_suricata_pins
+ whiptail_bond_nics_mtu
+ else
+ whiptail_basic_bro
+ whiptail_basic_suri
+ fi
+ whiptail_make_changes
+ set_hostname
+ clear_master
+ mkdir -p /nsm
+ get_filesystem_root
+ get_filesystem_nsm
+ copy_ssh_key
+ {
+ sleep 0.5
+ echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
+ set_initial_firewall_policy >> $SETUPLOG 2>&1
+ #echo -e "XXX\n1\nInstalling pip3... \nXXX"
+ #install_pip3 >> $SETUPLOG 2>&1
+ echo -e "XXX\n3\nCreating Bond Interface... \nXXX"
+ network_setup >> $SETUPLOG 2>&1
+ echo -e "XXX\n4\nGenerating Sensor Pillar... \nXXX"
+ sensor_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n5\nInstalling Salt Components... \nXXX"
+ saltify >> $SETUPLOG 2>&1
+ echo -e "XXX\n20\nInstalling Docker... \nXXX"
+ docker_install >> $SETUPLOG 2>&1
+ echo -e "XXX\n22\nConfiguring Salt Minion... \nXXX"
+ configure_minion sensor >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
+ echo -e "XXX\n25\nSending Salt Key to Master... \nXXX"
+ salt_firstcheckin >> $SETUPLOG 2>&1
+ echo -e "XXX\n26\nTelling the Master to Accept Key... \nXXX"
+ # Accept the Salt Key
+ accept_salt_key_remote >> $SETUPLOG 2>&1
+ echo -e "XXX\n27\nApplying SSL Certificates... \nXXX"
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo -e "XXX\n35\nInstalling Core Components... \nXXX"
+ salt-call state.apply common >> $SETUPLOG 2>&1
+ salt-call state.apply firewall >> $SETUPLOG 2>&1
+ echo -e "XXX\n50\nInstalling PCAP... \nXXX"
+ salt-call state.apply pcap >> $SETUPLOG 2>&1
+ echo -e "XXX\n60\nInstalling IDS components... \nXXX"
+ salt-call state.apply suricata >> $SETUPLOG 2>&1
+ echo -e "XXX\n80\nVerifying Install... \nXXX"
+ salt-call state.highstate >> $SETUPLOG 2>&1
+ checkin_at_boot >> $SETUPLOG 2>&1
+ } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+ GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
+ if [[ $GOODSETUP == '0' ]]; then
+ whiptail_setup_complete
+ shutdown -r now
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+ fi
+
+ #######################
+ ## Eval Mode ##
+ #######################
+
+ if [ $INSTALLTYPE == 'EVALMODE' ]; then
+ # Select the management NIC
+ whiptail_management_nic
+
+ # Filter out the management NIC
+ filter_nics
+
+ # Select which NICs are in the bond
+ whiptail_bond_nics
+
+ # Snag the HOME_NET
+ whiptail_homenet_master
+ whiptail_eval_adv_warning
+ whiptail_enable_components
+
+ # Set a bunch of stuff since this is eval
+ es_heapsize
+ ls_heapsize
+ NODE_ES_HEAP_SIZE="600m"
+ NODE_LS_HEAP_SIZE="500m"
+ LSPIPELINEWORKERS=1
+ LSPIPELINEBATCH=125
+ LSINPUTTHREADS=1
+ LSINPUTBATCHCOUNT=125
+ RULESETUP=ETOPEN
+ NSMSETUP=BASIC
+ NIDS=Suricata
+ BROVERSION=ZEEK
+ CURCLOSEDAYS=30
+ process_components
+ whiptail_create_socore_user
+ SCMATCH=no
+ while [ $SCMATCH != yes ]; do
+ whiptail_create_socore_user_password1
+ whiptail_create_socore_user_password2
+ check_socore_pass
+ done
+ whiptail_make_changes
+ set_hostname
+ generate_passwords
+ auth_pillar
+ clear_master
+ mkdir -p /nsm
+ get_filesystem_root
+ get_filesystem_nsm
+ get_log_size_limit
+ get_main_ip
+ # Add the user so we can sit back and relax
+ add_socore_user_master
+ {
+ sleep 0.5
+ echo -e "XXX\n0\nCreating Bond Interface... \nXXX"
+ network_setup >> $SETUPLOG 2>&1
+ #install_pip3 >> $SETUPLOG 2>&1
+ echo -e "XXX\n1\nInstalling mysql dependencies for saltstack... \nXXX"
+ salt_install_mysql_deps >> $SETUPLOG 2>&1
+ echo -e "XXX\n1\nInstalling saltstack... \nXXX"
+ saltify >> $SETUPLOG 2>&1
+ echo -e "XXX\n3\nInstalling docker... \nXXX"
+ docker_install >> $SETUPLOG 2>&1
+ echo -e "XXX\n5\nInstalling master code... \nXXX"
+ install_master >> $SETUPLOG 2>&1
+ echo -e "XXX\n6\nCopying salt code... \nXXX"
+ salt_master_directories >> $SETUPLOG 2>&1
+ echo -e "XXX\n6\nupdating suduers... \nXXX"
+ update_sudoers >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nFixing some permissions... \nXXX"
+ chown_salt_master >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nCreating the static pillar... \nXXX"
+ # Set the static values
+ master_static >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nCreating the master pillar... \nXXX"
+ master_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nConfiguring minion... \nXXX"
+ configure_minion eval >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nSetting the node type to eval... \nXXX"
+ set_node_type >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nStorage node pillar... \nXXX"
+ node_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n8\nCreating firewall policies... \nXXX"
+ set_initial_firewall_policy >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
+ echo -e "XXX\n10\nRegistering agent... \nXXX"
+ salt_firstcheckin >> $SETUPLOG 2>&1
+ echo -e "XXX\n11\nAccepting Agent... \nXXX"
+ accept_salt_key_local >> $SETUPLOG 2>&1
+ echo -e "XXX\n12\nRunning the SSL states... \nXXX"
+ salt_checkin >> $SETUPLOG 2>&1
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo -e "XXX\n15\nInstalling core components... \nXXX"
+ salt-call state.apply common >> $SETUPLOG 2>&1
+ echo -e "XXX\n18\nInitializing firewall rules... \nXXX"
+ salt-call state.apply firewall >> $SETUPLOG 2>&1
+ echo -e "XXX\n25\nInstalling master components... \nXXX"
+ salt-call state.apply master >> $SETUPLOG 2>&1
+ salt-call state.apply idstools >> $SETUPLOG 2>&1
+ if [[ $OSQUERY == '1' ]]; then
+ salt-call state.apply mysql >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n35\nInstalling ElasticSearch... \nXXX"
+ salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
+ echo -e "XXX\n40\nInstalling Logstash... \nXXX"
+ salt-call state.apply logstash >> $SETUPLOG 2>&1
+ echo -e "XXX\n45\nInstalling Kibana... \nXXX"
+ salt-call state.apply kibana >> $SETUPLOG 2>&1
+ echo -e "XXX\n50\nInstalling pcap... \nXXX"
+ salt-call state.apply pcap >> $SETUPLOG 2>&1
+ echo -e "XXX\n52\nInstalling Suricata... \nXXX"
+ salt-call state.apply suricata >> $SETUPLOG 2>&1
+ echo -e "XXX\n54\nInstalling Zeek... \nXXX"
+ salt-call state.apply bro >> $SETUPLOG 2>&1
+ echo -e "XXX\n56\nInstalling curator... \nXXX"
+ salt-call state.apply curator >> $SETUPLOG 2>&1
+ echo -e "XXX\n58\nInstalling elastalert... \nXXX"
+ salt-call state.apply elastalert >> $SETUPLOG 2>&1
+ if [[ $OSQUERY == '1' ]]; then
+ echo -e "XXX\n60\nInstalling fleet... \nXXX"
+ salt-call state.apply fleet >> $SETUPLOG 2>&1
+ salt-call state.apply redis >> $SETUPLOG 2>&1
+ fi
+ if [[ $WAZUH == '1' ]]; then
+ echo -e "XXX\n65\nInstalling Wazuh components... \nXXX"
+ salt-call state.apply wazuh >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n85\nInstalling filebeat... \nXXX"
+ salt-call state.apply filebeat >> $SETUPLOG 2>&1
+ salt-call state.apply utility >> $SETUPLOG 2>&1
+ echo -e "XXX\n95\nInstalling misc components... \nXXX"
+ salt-call state.apply schedule >> $SETUPLOG 2>&1
+ salt-call state.apply soctopus >> $SETUPLOG 2>&1
+ if [[ $THEHIVE == '1' ]]; then
+ echo -e "XXX\n96\nInstalling The Hive... \nXXX"
+ salt-call state.apply hive >> $SETUPLOG 2>&1
+ fi
+ if [[ $PLAYBOOK == '1' ]]; then
+ echo -e "XXX\n97\nInstalling Playbook... \nXXX"
+ salt-call state.apply playbook >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n98\nSetting checkin to run on boot... \nXXX"
+ checkin_at_boot >> $SETUPLOG 2>&1
+ echo -e "XXX\n99\nVerifying Setup... \nXXX"
+ salt-call state.highstate >> $SETUPLOG 2>&1
+
+ } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+ GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
+ if [ $OS == 'centos' ]; then
+ if [[ $GOODSETUP == '1' ]]; then
+ whiptail_setup_complete
+ if [[ $THEHIVE == '1' ]]; then
+ check_hive_init_then_reboot
+ else
+ shutdown -r now
+ fi
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+ else
+ if [[ $GOODSETUP == '0' ]]; then
+ whiptail_setup_complete
+ if [[ $THEHIVE == '1' ]]; then
+ check_hive_init_then_reboot
+ else
+ shutdown -r now
+ fi
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+ fi
+ fi
+
+ ###################
+ ## Nodes ##
+ ###################
+
+ if [ $INSTALLTYPE == 'STORAGENODE' ] || [ $INSTALLTYPE == 'PARSINGNODE' ] || [ $INSTALLTYPE == 'HOTNODE' ] || [ $INSTALLTYPE == 'WARMNODE' ]; then
+ whiptail_management_nic
+ whiptail_management_server
+ whiptail_master_updates
+ set_updates
+ get_log_size_limit
+ CURCLOSEDAYS=30
+ es_heapsize
+ ls_heapsize
+ whiptail_node_advanced
+ if [ $NODESETUP == 'NODEADVANCED' ]; then
+ whiptail_node_es_heap
+ whiptail_node_ls_heap
+ whiptail_node_ls_pipeline_worker
+ whiptail_node_ls_pipline_batchsize
+ whiptail_node_ls_input_threads
+ whiptail_node_ls_input_batch_count
+ whiptail_cur_close_days
+ whiptail_log_size_limit
+ else
+ NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE
+ NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE
+ LSPIPELINEWORKERS=$CPUCORES
+ LSPIPELINEBATCH=125
+ LSINPUTTHREADS=1
+ LSINPUTBATCHCOUNT=125
+ fi
+ whiptail_make_changes
+ set_hostname
+ clear_master
+ mkdir -p /nsm
+ get_filesystem_root
+ get_filesystem_nsm
+ copy_ssh_key
+ {
+ sleep 0.5
+ echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
+ set_initial_firewall_policy >> $SETUPLOG 2>&1
+ #echo -e "XXX\n1\nInstalling pip3... \nXXX"
+ #install_pip3 >> $SETUPLOG 2>&1
+ echo -e "XXX\n5\nInstalling Salt Packages... \nXXX"
+ saltify >> $SETUPLOG 2>&1
+ echo -e "XXX\n20\nInstalling Docker... \nXXX"
+ docker_install >> $SETUPLOG 2>&1
+ echo -e "XXX\n30\nInitializing Minion... \nXXX"
+ configure_minion node >> $SETUPLOG 2>&1
+ set_node_type >> $SETUPLOG 2>&1
+ node_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
+ echo -e "XXX\n35\nSending and Accepting Salt Key... \nXXX"
+ salt_firstcheckin >> $SETUPLOG 2>&1
+ # Accept the Salt Key
+ accept_salt_key_remote >> $SETUPLOG 2>&1
+ echo -e "XXX\n40\nApplying SSL Certificates... \nXXX"
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo -e "XXX\n50\nConfiguring Firewall... \nXXX"
+ salt-call state.apply common >> $SETUPLOG 2>&1
+ salt-call state.apply firewall >> $SETUPLOG 2>&1
+ echo -e "XXX\n70\nInstalling Elastic Components... \nXXX"
+ salt-call state.apply logstash >> $SETUPLOG 2>&1
+ salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
+ salt-call state.apply curator >> $SETUPLOG 2>&1
+ salt-call state.apply filebeat >> $SETUPLOG 2>&1
+ echo -e "XXX\n90\nVerifying Install... \nXXX"
+ salt-call state.highstate >> $SETUPLOG 2>&1
+ checkin_at_boot >> $SETUPLOG 2>&1
+
+ } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+ GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
+ if [[ $GOODSETUP == '0' ]]; then
+ whiptail_setup_complete
+ shutdown -r now
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+
+ #set_initial_firewall_policy
+ #saltify
+ #docker_install
+ #configure_minion node
+ #set_node_type
+ #node_pillar
+ #copy_minion_pillar nodes
+ #salt_checkin
+ # Accept the Salt Key
+ #accept_salt_key_remote
+ # Do the big checkin but first let them know it will take a bit.
+ #salt_checkin_message
+ #salt_checkin
+ #checkin_at_boot
+
+ #whiptail_setup_complete
+ fi
+
+else
+ exit
+fi
diff --git a/setup/whiplash.sh b/setup/whiplash.sh
new file mode 100644
index 000000000..1806588ae
--- /dev/null
+++ b/setup/whiplash.sh
@@ -0,0 +1,611 @@
+###########################################
+## ##
+## Whiptail Menu Section ##
+## ##
+###########################################
+
+whiptail_basic_bro() {
+
+ BASICBRO=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the number of bro processes:" 10 60 $LBPROCS 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_basic_suri() {
+
+ BASICSURI=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the number of Suricata Processes:" 10 60 $LBPROCS 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_bro_pins() {
+
+ BROPINS=$(whiptail --noitem --title "Pin Bro CPUS" --checklist "Please Select $LBPROCS cores to pin Bro to:" 20 78 12 ${LISTCORES[@]} 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+
+}
+
+whiptail_bro_version() {
+
+ BROVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate meta data?" 20 78 4 "ZEEK" "Install Zeek (aka Bro)" ON \
+ "COMMUNITY" "Install Community NSM" OFF "SURICATA" "SUPER EXPERIMENTAL" OFF 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_bond_nics() {
+
+ BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 78 12 ${FNICS[@]} 3>&1 1>&2 2>&3 )
+
+ while [ -z "$BNICS" ]
+ do
+ BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 78 12 ${FNICS[@]} 3>&1 1>&2 2>&3 )
+ done
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_bond_nics_mtu() {
+
+ # Set the MTU on the monitor interface
+ MTU=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the MTU for the monitor NICs" 10 60 1500 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_cancel() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Cancelling Setup. No changes have been made." 8 78
+ install_cleanup
+ exit
+
+}
+
+whiptail_check_exitstatus() {
+
+ if [ $1 == '1' ]; then
+ echo "They hit cancel"
+ whiptail_cancel
+ fi
+
+}
+
+whiptail_create_socore_user() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Set a password for the socore user. This account is used for adding sensors remotely." 8 78
+
+}
+
+whiptail_create_socore_user_password1() {
+
+ COREPASS1=$(whiptail --title "Security Onion Install" --passwordbox \
+ "Enter a password for user socore" 10 60 3>&1 1>&2 2>&3)
+
+}
+
+whiptail_create_socore_user_password2() {
+
+ COREPASS2=$(whiptail --title "Security Onion Install" --passwordbox \
+ "Re-enter a password for user socore" 10 60 3>&1 1>&2 2>&3)
+
+}
+
+whiptail_cur_close_days() {
+
+ CURCLOSEDAYS=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Please specify the threshold (in days) at which Elasticsearch indices will be closed" 10 60 $CURCLOSEDAYS 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+whiptail_enable_components() {
+ COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
+ "Select Components to install" 20 78 8 \
+ "GRAFANA" "Enable Grafana for system monitoring" ON \
+ "OSQUERY" "Enable Fleet with osquery" ON \
+ "WAZUH" "Enable Wazuh" ON \
+ "THEHIVE" "Enable TheHive" ON \
+ "PLAYBOOK" "Enable Playbook" ON 3>&1 1>&2 2>&3 )
+}
+
+whiptail_eval_adv() {
+ EVALADVANCED=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose your eval install:" 20 78 4 \
+ "BASIC" "Install basic components for evaluation" ON \
+ "ADVANCED" "Choose additional components to be installed" OFF 3>&1 1>&2 2>&3 )
+}
+
+whiptail_eval_adv_warning() {
+ whiptail --title "Security Onion Setup" --msgbox "Please keep in mind the more services that you enable the more RAM that is required." 8 78
+}
+
+whiptail_homenet_master() {
+
+ # Ask for the HOME_NET on the master
+ HNMASTER=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your HOME_NET separated by ," 10 60 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_homenet_sensor() {
+
+ # Ask to inherit from master
+ whiptail --title "Security Onion Setup" --yesno "Do you want to inherit the HOME_NET from the Master?" 8 78
+
+ local exitstatus=$?
+ if [ $exitstatus == 0 ]; then
+ HNSENSOR=inherit
+ else
+ HNSENSOR=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your HOME_NET separated by ," 10 60 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
+ fi
+
+}
+
+whiptail_install_type() {
+
+ # What kind of install are we doing?
+ INSTALLTYPE=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose Install Type:" 20 78 14 \
+ "SENSORONLY" "Create a forward only sensor" ON \
+ "STORAGENODE" "Add a Storage Hot Node with parsing" OFF \
+ "MASTERONLY" "Start a new grid" OFF \
+ "PARSINGNODE" "TODO Add a dedicated Parsing Node" OFF \
+ "HOTNODE" "TODO Add a Hot Node (Storage Node without Parsing)" OFF \
+ "WARMNODE" "TODO Add a Warm Node to an existing Hot or Storage node" OFF \
+ "EVALMODE" "Evaluate all the things" OFF \
+ "WAZUH" "TODO Stand Alone Wazuh Node" OFF \
+ "STRELKA" "TODO Stand Alone Strelka Node" OFF \
+ "FLEET" "TODO Stand Alone Fleet OSQuery Node" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_log_size_limit() {
+
+ LOG_SIZE_LIMIT=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
+ By default, this is set to 85% of the disk space allotted for /nsm." 10 60 $LOG_SIZE_LIMIT 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+
+whiptail_management_nic() {
+
+ MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 78 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
+
+ while [ -z "$MNIC" ]
+ do
+ MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 78 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
+ done
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_nids() {
+
+ NIDS=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose which IDS to run:" 20 78 4 \
+ "Suricata" "Suricata 4.X" ON \
+ "Snort" "Snort 3.0 Beta" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_oinkcode() {
+
+ OINKCODE=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your oinkcode" 10 60 XXXXXXX 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_make_changes() {
+
+ whiptail --title "Security Onion Setup" --yesno "We are going to set this machine up as a $INSTALLTYPE. Please hit YES to make changes or NO to cancel." 8 78
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_management_server() {
+
+ MSRV=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your Master Server HOSTNAME. It is CASE SENSITIVE!" 10 60 XXXX 3>&1 1>&2 2>&3)
+
+ # See if it resolves. Otherwise prompt to add to host file
+ TESTHOST=$(host $MSRV)
+
+ if [[ $TESTHOST = *"not found"* ]] || [[ $TESTHOST = *"connection timed out"* ]]; then
+ add_master_hostfile
+ fi
+
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+# Ask if you want to do advanced setup of the Master
+whiptail_master_adv() {
+ MASTERADV=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose what type of master install:" 20 78 4 \
+ "BASIC" "Install master with recommended settings" ON \
+ "ADVANCED" "Do additional configuration to the master" OFF 3>&1 1>&2 2>&3 )
+}
+
+# Ask which additional components to install
+whiptail_master_adv_service_brologs() {
+
+ BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
+ "conn" "Connection Logging" ON \
+ "dce_rpc" "RPC Logs" ON \
+ "dhcp" "DHCP Logs" ON \
+ "dhcpv6" "DHCP IPv6 Logs" ON \
+ "dnp3" "DNP3 Logs" ON \
+ "dns" "DNS Logs" ON \
+ "dpd" "DPD Logs" ON \
+ "files" "Files Logs" ON \
+ "ftp" "FTP Logs" ON \
+ "http" "HTTP Logs" ON \
+ "intel" "Intel Hits Logs" ON \
+ "irc" "IRC Chat Logs" ON \
+ "kerberos" "Kerberos Logs" ON \
+ "modbus" "MODBUS Logs" ON \
+ "mqtt" "MQTT Logs" ON \
+ "notice" "Zeek Notice Logs" ON \
+ "ntlm" "NTLM Logs" ON \
+ "openvpn" "OPENVPN Logs" ON \
+ "pe" "PE Logs" ON \
+ "radius" "Radius Logs" ON \
+ "rfb" "RFB Logs" ON \
+ "rdp" "RDP Logs" ON \
+ "signatures" "Signatures Logs" ON \
+ "sip" "SIP Logs" ON \
+ "smb_files" "SMB Files Logs" ON \
+ "smb_mapping" "SMB Mapping Logs" ON \
+ "smtp" "SMTP Logs" ON \
+ "snmp" "SNMP Logs" ON \
+ "software" "Software Logs" ON \
+ "ssh" "SSH Logs" ON \
+ "ssl" "SSL Logs" ON \
+ "syslog" "Syslog Logs" ON \
+ "telnet" "Telnet Logs" ON \
+ "tunnel" "Tunnel Logs" ON \
+ "weird" "Zeek Weird Logs" ON \
+ "mysql" "MySQL Logs" ON \
+ "socks" "SOCKS Logs" ON \
+ "x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
+}
+
+whiptail_network_notice() {
+
+ whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Hit YES to continue." 8 78
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_advanced() {
+
+ NODESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
+ "What type of config would you like to use?:" 20 78 4 \
+ "NODEBASIC" "Install Storage Node with recommended settings" ON \
+ "NODEADVANCED" "Advanced Node Setup" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_es_heap() {
+
+ es_heapsize
+ NODE_ES_HEAP_SIZE=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter ES Heap Size: \n \n(Recommended value is pre-populated)" 10 60 $ES_HEAP_SIZE 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_heap() {
+
+ ls_heapsize
+ NODE_LS_HEAP_SIZE=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Heap Size: \n \n(Recommended value is pre-populated)" 10 60 $LS_HEAP_SIZE 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_pipeline_worker() {
+
+ LSPIPELINEWORKERS=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Pipeline Workers: \n \n(Recommended value is pre-populated)" 10 60 $CPUCORES 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_pipline_batchsize() {
+
+ LSPIPELINEBATCH=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Pipeline Batch Size: \n \n(Default value is pre-populated)" 10 60 125 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_input_threads() {
+
+ LSINPUTTHREADS=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Input Threads: \n \n(Default value is pre-populated)" 10 60 1 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_input_batch_count() {
+
+ LSINPUTBATCHCOUNT=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Input Batch Count: \n \n(Default value is pre-populated)" 10 60 125 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_passwords_dont_match() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Passwords don't match. Please re-enter." 8 78
+
+}
+
+whiptail_patch_name_new_schedule() {
+
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 105 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ while [[ -z "$PATCHSCHEDULENAME" ]]; do
+ whiptail --title "Security Onion Setup" --msgbox "Please enter a name for this OS patch schedule." 8 65
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 105 3>&1 1>&2 2>&3)
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ done
+
+
+}
+
+whiptail_patch_schedule() {
+
+ # What kind of patch schedule are we doing?
+ PATCHSCHEDULE=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose OS patch schedule. This will NOT update Security Onion related tools such as Zeek, Elasticsearch, Kibana, SaltStack, etc." 25 115 5 \
+ "Automatic" "Package updates will be installed automatically every 8 hours if available" ON \
+ "Manual" "Package updates will need to be installed manually" OFF \
+ "Import Schedule" "Enter the name of an existing schedule on the following screen and inherit it" OFF \
+ "New Schedule" "Configure and name a new schedule on the following screen" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_patch_schedule_import() {
+
+ unset PATCHSCHEDULENAME
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 60 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ while [[ -z "$PATCHSCHEDULENAME" ]]; do
+ whiptail --title "Security Onion Setup" --msgbox "Please enter a name for the OS patch schedule you want to inherit." 8 65
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 60 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ done
+
+}
+
+whiptail_patch_schedule_select_days() {
+ # Select the days to patch
+ PATCHSCHEDULEDAYS=($(whiptail --title "Security Onion Setup" --checklist \
+ "Which days do you want to apply OS patches?" 20 55 9 \
+ "Monday" "" OFF \
+ "Tuesday" "" ON \
+ "Wednesday" "" OFF \
+ "Thursday" "" OFF \
+ "Friday" "" OFF \
+ "Saturday" "" OFF \
+ "Sunday" "" OFF 3>&1 1>&2 2>&3 ))
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
+whiptail_patch_schedule_select_hours() {
+ # Select the hours to patch
+ PATCHSCHEDULEHOURS=($(whiptail --title "Security Onion Setup" --checklist \
+ "At which time, UTC, do you want to apply OS patches on the selected days?" 35 55 26 \
+ "00:00" "" OFF \
+ "01:00" "" OFF \
+ "02:00" "" OFF \
+ "03:00" "" OFF \
+ "04:00" "" OFF \
+ "05:00" "" OFF \
+ "06:00" "" OFF \
+ "07:00" "" OFF \
+ "08:00" "" OFF \
+ "09:00" "" OFF \
+ "10:00" "" OFF \
+ "11:00" "" OFF \
+ "12:00" "" OFF \
+ "13:00" "" OFF \
+ "14:00" "" OFF \
+ "15:00" "" ON \
+ "16:00" "" OFF \
+ "17:00" "" OFF \
+ "18:00" "" OFF \
+ "19:00" "" OFF \
+ "20:00" "" OFF \
+ "21:00" "" OFF \
+ "22:00" "" OFF \
+ "23:00" "" OFF 3>&1 1>&2 2>&3 ))
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
+whiptail_rule_setup() {
+
+ # Get pulled pork info
+ RULESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
+ "What IDS rules to use?:" 20 140 4 \
+ "ETOPEN" "Emerging Threats Open - no oinkcode required" ON \
+ "ETPRO" "Emerging Threats PRO - requires ETPRO oinkcode" OFF \
+ "TALOSET" "Snort Subscriber (Talos) ruleset and Emerging Threats NoGPL ruleset - requires Snort Subscriber oinkcode" OFF \
+ "TALOS" "Snort Subscriber (Talos) ruleset only and set a Snort Subscriber policy - requires Snort Subscriber oinkcode" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_sensor_config() {
+
+ NSMSETUP=$(whiptail --title "Security Onion Setup" --radiolist \
+ "What type of configuration would you like to use?:" 20 78 4 \
+ "BASIC" "Install NSM components with recommended settings" ON \
+ "ADVANCED" "Configure each component individually" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_set_hostname() {
+
+ HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the Hostname you would like to set." 10 60 $HOSTNAME 3>&1 1>&2 2>&3)
+
+ while [[ "$HOSTNAME" == 'localhost' ]] ; do
+ whiptail --title "Security Onion Setup" --msgbox "Please choose a hostname that isn't localhost." 8 65
+ HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the Hostname you would like to set." 10 60 $HOSTNAME 3>&1 1>&2 2>&3)
+ done
+
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_setup_complete() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Finished installing this as an $INSTALLTYPE. Press Enter to reboot." 8 78
+ install_cleanup
+
+}
+
+whiptail_setup_failed() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $SETUPLOG for details. Press Enter to reboot." 8 78
+ install_cleanup
+
+}
+
+whiptail_shard_count() {
+
+ SHARDCOUNT=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter ES Shard Count: \n \n(Default value is pre-populated)" 10 60 125 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_suricata_pins() {
+
+ FILTEREDCORES=$(echo ${LISTCORES[@]} ${BROPINS[@]} | tr -d '"' | tr ' ' '\n' | sort | uniq -u | awk '{print $1 " \"" "core" "\""}')
+ SURIPINS=$(whiptail --noitem --title "Pin Suricata CPUS" --checklist "Please Select $LBPROCS cores to pin Suricata to:" 20 78 12 ${FILTEREDCORES[@]} 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_master_updates() {
+
+ MASTERUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
+ "How would you like to download updates for your grid?:" 20 78 4 \
+ "MASTER" "Have the master node act as a proxy for OS/Docker updates." ON \
+ "OPEN" "Have each node connect to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_updates() {
+
+ NODEUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
+ "How would you like to download updates for this node?:" 20 78 4 \
+ "MASTER" "Download OS/Docker updates from the Master." ON \
+ "OPEN" "Download updates directly from the Internet" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_you_sure() {
+
+ whiptail --title "Security Onion Setup" --yesno "Are you sure you want to install Security Onion over the internet?" 8 78
+
+}
diff --git a/so-setup-network.sh b/so-setup-network.sh
index 6e54b8ce4..c32635574 100644
--- a/so-setup-network.sh
+++ b/so-setup-network.sh
@@ -55,10 +55,6 @@ add_master_hostfile() {
MSRVIP=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your Master Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
- # Add the master to the host file if it doesn't resolve
- #if ! grep -q $MSRVIP /etc/hosts; then
- # echo "$MSRVIP $MSRV" >> /etc/hosts
- #fi
}
add_socore_user_master() {
@@ -77,21 +73,6 @@ add_socore_user_master() {
}
-#add_socore_user_master() {
-# echo "Add socore on the master" >> $SETUPLOG 2>&1
-# if [ $OS == 'centos' ]; then
-# local ADDUSER=adduser
-# else
-# local ADDUSER=useradd
-# fi
-# # Add user "socore" to the master. This will be for things like accepting keys.
-# groupadd --gid 939 socore
-# $ADDUSER --uid 939 --gid 939 --home-dir /opt/so socore
-# # Prompt the user to set a password for the user
-# passwd socore
-
-#}
-
add_socore_user_notmaster() {
echo "Add socore user on non master" >> $SETUPLOG 2>&1
# Add socore user to the non master system. Probably not a bad idea to make system user
@@ -255,6 +236,9 @@ configure_minion() {
fi
+ echo "use_superseded:" >> /etc/salt/minion
+ echo " - module.run" >> /etc/salt/minion
+
service salt-minion restart
}
@@ -268,14 +252,15 @@ copy_master_config() {
}
-copy_minion_pillar() {
+copy_minion_tmp_files() {
- # Pass the type so it knows where to copy the pillar
- local TYPE=$1
-
- # Copy over the pillar
- echo "Copying the pillar over" >> $SETUPLOG 2>&1
- scp -v -i /root/.ssh/so.key $TMP/$MINION_ID.sls socore@$MSRV:/opt/so/saltstack/pillar/$TYPE/$MINION_ID.sls
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ echo "rsyncing all files in $TMP to /opt/so/saltstack" >> $SETUPLOG 2>&1
+ rsync -a -v $TMP/ /opt/so/saltstack/ >> $SETUPLOG 2>&1
+ else
+ echo "scp all files in $TMP to master /opt/so/saltstack" >> $SETUPLOG 2>&1
+ scp -prv -i /root/.ssh/so.key $TMP/* socore@$MSRV:/opt/so/saltstack >> $SETUPLOG 2>&1
+ fi
}
@@ -351,7 +336,7 @@ docker_install() {
yum -y install yum-utils device-mapper-persistent-data lvm2 openssl
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum -y update
- yum -y install docker-ce docker-python python-docker
+ yum -y install docker-ce python36-docker
if [ $INSTALLTYPE != 'EVALMODE' ]; then
docker_registry
fi
@@ -377,6 +362,8 @@ docker_install() {
echo "Restarting Docker" >> $SETUPLOG 2>&1
systemctl restart docker >> $SETUPLOG 2>&1
fi
+ echo "Using pip3 to install docker-py for salt"
+ pip3 install docker
fi
}
@@ -428,6 +415,7 @@ generate_passwords(){
FLEETPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
HIVEKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
CORTEXKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ CORTEXORGUSERKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
SENSORONIKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
}
@@ -478,6 +466,18 @@ install_cleanup() {
}
+install_python3() {
+
+ echo "Installing Python3"
+
+ if [ $OS == 'ubuntu' ]; then
+ apt-get -y install python3-pip gcc python3-dev
+ elif [ $OS == 'centos' ]; then
+ yum -y install epel-release python3
+ fi
+
+}
+
install_prep() {
# Create a tmp space that isn't in /tmp
@@ -490,18 +490,22 @@ install_master() {
# Install the salt master package
if [ $OS == 'centos' ]; then
- yum -y install wget salt-common salt-master >> $SETUPLOG 2>&1
-
+ #yum -y install wget salt-common salt-master python36-mysql python36-dateutil python36-m2crypto >> $SETUPLOG 2>&1
+ echo ""
# Create a place for the keys for Ubuntu minions
- mkdir -p /opt/so/gpg
- wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub
- wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
- wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
+ #mkdir -p /opt/so/gpg
+ #wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub
+ #wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ #wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
else
- apt-get install -y salt-common=2018.3.4+ds-1 salt-master=2018.3.4+ds-1 salt-minion=2018.3.4+ds-1 python-m2crypto
+ apt-get install -y salt-common=2019.2.2+ds-1 salt-master=2019.2.2+ds-1 salt-minion=2019.2.2+ds-1
apt-mark hold salt-common salt-master salt-minion
- apt-get install -y python-m2crypto
+ echo -e "XXX\n11\nInstalling libssl-dev for M2Crypto... \nXXX"
+ apt-get -y install libssl-dev
+ echo -e "XXX\n12\nUsing pip3 to install M2Crypto for Salt... \nXXX"
+ pip3 install M2Crypto
+
fi
copy_master_config
@@ -579,6 +583,9 @@ master_static() {
echo " cortexuser: cortexadmin" >> /opt/so/saltstack/pillar/static.sls
echo " cortexpassword: cortexchangeme" >> /opt/so/saltstack/pillar/static.sls
echo " cortexkey: $CORTEXKEY" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexorgname: SecurityOnion" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexorguser: soadmin" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexorguserkey: $CORTEXORGUSERKEY" >> /opt/so/saltstack/pillar/static.sls
echo " fleetsetup: 0" >> /opt/so/saltstack/pillar/static.sls
echo " sensoronikey: $SENSORONIKEY" >> /opt/so/saltstack/pillar/static.sls
if [[ $MASTERUPDATES == 'MASTER' ]]; then
@@ -599,23 +606,77 @@ minio_generate_keys() {
node_pillar() {
+ NODEPILLARPATH=$TMP/pillar/nodes
+ if [ ! -d $NODEPILLARPATH ]; then
+ mkdir -p $NODEPILLARPATH
+ fi
+
# Create the node pillar
- touch $TMP/$MINION_ID.sls
- echo "node:" > $TMP/$MINION_ID.sls
- echo " mainip: $MAINIP" >> $TMP/$MINION_ID.sls
- echo " mainint: $MAININT" >> $TMP/$MINION_ID.sls
- echo " esheap: $NODE_ES_HEAP_SIZE" >> $TMP/$MINION_ID.sls
- echo " esclustername: {{ grains.host }}" >> $TMP/$MINION_ID.sls
- echo " lsheap: $NODE_LS_HEAP_SIZE" >> $TMP/$MINION_ID.sls
- echo " ls_pipeline_workers: $LSPIPELINEWORKERS" >> $TMP/$MINION_ID.sls
- echo " ls_pipeline_batch_size: $LSPIPELINEBATCH" >> $TMP/$MINION_ID.sls
- echo " ls_input_threads: $LSINPUTTHREADS" >> $TMP/$MINION_ID.sls
- echo " ls_batch_count: $LSINPUTBATCHCOUNT" >> $TMP/$MINION_ID.sls
- echo " es_shard_count: $SHARDCOUNT" >> $TMP/$MINION_ID.sls
- echo " node_type: $NODETYPE" >> $TMP/$MINION_ID.sls
- echo " es_port: $NODE_ES_PORT" >> $TMP/$MINION_ID.sls
- echo " log_size_limit: $LOG_SIZE_LIMIT" >> $TMP/$MINION_ID.sls
- echo " cur_close_days: $CURCLOSEDAYS" >> $TMP/$MINION_ID.sls
+ touch $NODEPILLARPATH/$MINION_ID.sls
+ echo "node:" > $NODEPILLARPATH/$MINION_ID.sls
+ echo " mainip: $MAINIP" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " mainint: $MAININT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " esheap: $NODE_ES_HEAP_SIZE" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " esclustername: {{ grains.host }}" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " lsheap: $NODE_LS_HEAP_SIZE" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_pipeline_workers: $LSPIPELINEWORKERS" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_pipeline_batch_size: $LSPIPELINEBATCH" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_input_threads: $LSINPUTTHREADS" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_batch_count: $LSINPUTBATCHCOUNT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " es_shard_count: $SHARDCOUNT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " node_type: $NODETYPE" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " es_port: $NODE_ES_PORT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " log_size_limit: $LOG_SIZE_LIMIT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " cur_close_days: $CURCLOSEDAYS" >> $NODEPILLARPATH/$MINION_ID.sls
+
+}
+
+patch_pillar() {
+
+ case $INSTALLTYPE in
+ MASTERONLY | EVALMODE)
+ PATCHPILLARPATH=/opt/so/saltstack/pillar/masters
+ ;;
+ SENSORONLY)
+ PATCHPILLARPATH=$SENSORPILLARPATH
+ ;;
+ STORAGENODE | PARSINGNODE | HOTNODE | WARMNODE)
+ PATCHPILLARPATH=$NODEPILLARPATH
+ ;;
+ esac
+
+
+ echo "" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo "patch:" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " os:" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " schedule_name: $PATCHSCHEDULENAME" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " enabled: True" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " splay: 300" >> $PATCHPILLARPATH/$MINION_ID.sls
+
+
+}
+
+patch_schedule_os_new() {
+ OSPATCHSCHEDULEDIR="$TMP/salt/patch/os/schedules"
+ OSPATCHSCHEDULE="$OSPATCHSCHEDULEDIR/$PATCHSCHEDULENAME.yml"
+
+ if [ ! -d $OSPATCHSCHEDULEDIR ] ; then
+ mkdir -p $OSPATCHSCHEDULEDIR
+ fi
+
+ echo "patch:" > $OSPATCHSCHEDULE
+ echo " os:" >> $OSPATCHSCHEDULE
+ echo " schedule:" >> $OSPATCHSCHEDULE
+ for psd in "${PATCHSCHEDULEDAYS[@]}"
+ do
+ psd=$(echo $psd | sed 's/"//g')
+ echo " - $psd:" >> $OSPATCHSCHEDULE
+ for psh in "${PATCHSCHEDULEHOURS[@]}"
+ do
+ psh=$(echo $psh | sed 's/"//g')
+ echo " - '$psh'" >> $OSPATCHSCHEDULE
+ done
+ done
}
@@ -641,9 +702,14 @@ saltify() {
ADDUSER=adduser
if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
- yum -y install https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
- cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2018-3.repo
- sed -i 's/latest/2018.3/g' /etc/yum.repos.d/salt-2018-3.repo
+ yum -y install wget https://repo.saltstack.com/py3/redhat/salt-py3-repo-latest-2.el7.noarch.rpm
+ cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2019-2.repo
+ sed -i 's/latest/2019.2/g' /etc/yum.repos.d/salt-2019-2.repo
+ # Download Ubuntu Keys in case master updates = 1
+ mkdir -p /opt/so/gpg
+ wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub
+ wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
cat > /etc/yum.repos.d/wazuh.repo <<\EOF
[wazuh_repo]
gpgcheck=1
@@ -750,20 +816,20 @@ EOF
# Proxy is hating on me.. Lets just set it manually
echo "[salt-latest]" > /etc/yum.repos.d/salt-latest.repo
echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-latest.repo
- echo "baseurl=https://repo.saltstack.com/yum/redhat/7/\$basearch/latest" >> /etc/yum.repos.d/salt-latest.repo
+ echo "baseurl=https://repo.saltstack.com/py3/redhat/7/\$basearch/latest" >> /etc/yum.repos.d/salt-latest.repo
echo "failovermethod=priority" >> /etc/yum.repos.d/salt-latest.repo
echo "enabled=1" >> /etc/yum.repos.d/salt-latest.repo
echo "gpgcheck=1" >> /etc/yum.repos.d/salt-latest.repo
echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-latest.repo
# Proxy is hating on me.. Lets just set it manually
- echo "[salt-2018.3]" > /etc/yum.repos.d/salt-2018-3.repo
- echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "baseurl=https://repo.saltstack.com/yum/redhat/7/\$basearch/2018.3" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "failovermethod=priority" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "enabled=1" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "gpgcheck=1" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-2018-3.repo
+ echo "[salt-2019.2]" > /etc/yum.repos.d/salt-2019-2.repo
+ echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "baseurl=https://repo.saltstack.com/py3/redhat/7/\$basearch/2019.2" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "failovermethod=priority" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "enabled=1" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "gpgcheck=1" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-2019-2.repo
cat > /etc/yum.repos.d/wazuh.repo <<\EOF
[wazuh_repo]
@@ -775,9 +841,9 @@ baseurl=https://packages.wazuh.com/3.x/yum/
protect=1
EOF
else
- yum -y install https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
- cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2018-3.repo
- sed -i 's/latest/2018.3/g' /etc/yum.repos.d/salt-2018-3.repo
+ yum -y install https://repo.saltstack.com/py3/redhat/salt-py3-repo-latest-2.el7.noarch.rpm
+ cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2019-2.repo
+ sed -i 's/latest/2019.2/g' /etc/yum.repos.d/salt-2019-2.repo
cat > /etc/yum.repos.d/wazuh.repo <<\EOF
[wazuh_repo]
gpgcheck=1
@@ -791,16 +857,15 @@ EOF
fi
yum clean expire-cache
- yum -y install salt-minion-2018.3.4 yum-utils device-mapper-persistent-data lvm2 openssl
+ yum -y install epel-release salt-minion-2019.2.2 yum-utils device-mapper-persistent-data lvm2 openssl
yum -y update exclude=salt*
systemctl enable salt-minion
- # Nasty hack but required for now
if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
- yum -y install salt-master-2018.3.4 python-m2crypto salt-minion-2018.3.4 m2crypto
+ yum -y install salt-master-2019.2.2 python3 python36-m2crypto salt-minion-2019.2.2 python36-dateutil python36-mysql python36-docker
systemctl enable salt-master
else
- yum -y install salt-minion-2018.3.4 python-m2m2crypto m2crypto
+ yum -y install salt-minion-2019.2.2 python3 python36-m2crypto python36-dateutil python36-docker
fi
echo "exclude=salt*" >> /etc/yum.conf
@@ -817,11 +882,13 @@ EOF
# Nasty hack but required for now
if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ #echo "Using pip3 to install python-dateutil for salt"
+ #pip3 install python-dateutil
# Install the repo for salt
wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
- wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2018.3/SALTSTACK-GPG-KEY.pub | apt-key add -
- echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
- echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2018.3 xenial main" > /etc/apt/sources.list.d/saltstack2018.list
+ wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2019.2/SALTSTACK-GPG-KEY.pub | apt-key add -
+ echo "deb http://repo.saltstack.com/py3/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
+ echo "deb http://repo.saltstack.com/py3/ubuntu/$UVER/amd64/2019.2 xenial main" > /etc/apt/sources.list.d/saltstack2019.list
# Lets get the docker repo added
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
@@ -840,7 +907,8 @@ EOF
# Initialize the new repos
apt-get update >> $SETUPLOG 2>&1
- apt-get -y install salt-minion=2018.3.4+ds-1 salt-common=2018.3.4+ds-1 python-m2crypto >> $SETUPLOG 2>&1
+ # Need to add python packages here
+ apt-get -y install salt-minion=2019.2.2+ds-1 salt-common=2019.2.2+ds-1 python3-dateutil >> $SETUPLOG 2>&1
apt-mark hold salt-minion salt-common
else
@@ -854,7 +922,8 @@ EOF
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list
# Initialize the new repos
apt-get update >> $SETUPLOG 2>&1
- apt-get -y install salt-minion=2018.3.4+ds-1 salt-common=2018.3.4+ds-1 python-m2crypto >> $SETUPLOG 2>&1
+ # Need to add python dateutil here
+ apt-get -y install salt-minion=2019.2.2+ds-1 salt-common=2019.2.2+ds-1 >> $SETUPLOG 2>&1
apt-mark hold salt-minion salt-common
fi
@@ -924,39 +993,63 @@ salt_master_directories() {
}
+salt_install_mysql_deps() {
+
+ if [ $OS == 'centos' ]; then
+ yum -y install mariadb-devel
+ elif [ $OS == 'ubuntu' ]; then
+ apt-get -y install libmysqlclient-dev python3-mysqldb
+ fi
+
+}
+
sensor_pillar() {
+ SENSORPILLARPATH=$TMP/pillar/sensors
+ if [ ! -d $SENSORPILLARPATH ]; then
+ mkdir -p $SENSORPILLARPATH
+ fi
+
# Create the sensor pillar
- touch $TMP/$MINION_ID.sls
- echo "sensor:" > $TMP/$MINION_ID.sls
- echo " interface: bond0" >> $TMP/$MINION_ID.sls
- echo " mainip: $MAINIP" >> $TMP/$MINION_ID.sls
- echo " mainint: $MAININT" >> $TMP/$MINION_ID.sls
+ touch $SENSORPILLARPATH/$MINION_ID.sls
+ echo "sensor:" > $SENSORPILLARPATH/$MINION_ID.sls
+ echo " interface: bond0" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " mainip: $MAINIP" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " mainint: $MAININT" >> $SENSORPILLARPATH/$MINION_ID.sls
if [ $NSMSETUP == 'ADVANCED' ]; then
- echo " bro_pins:" >> $TMP/$MINION_ID.sls
+ echo " bro_pins:" >> $SENSORPILLARPATH/$MINION_ID.sls
for PIN in $BROPINS; do
PIN=$(echo $PIN | cut -d\" -f2)
- echo " - $PIN" >> $TMP/$MINION_ID.sls
+ echo " - $PIN" >> $SENSORPILLARPATH/$MINION_ID.sls
done
- echo " suripins:" >> $TMP/$MINION_ID.sls
+ echo " suripins:" >> $SENSORPILLARPATH/$MINION_ID.sls
for SPIN in $SURIPINS; do
SPIN=$(echo $SPIN | cut -d\" -f2)
- echo " - $SPIN" >> $TMP/$MINION_ID.sls
+ echo " - $SPIN" >> $SENSORPILLARPATH/$MINION_ID.sls
done
else
- echo " bro_lbprocs: $BASICBRO" >> $TMP/$MINION_ID.sls
- echo " suriprocs: $BASICSURI" >> $TMP/$MINION_ID.sls
+ echo " bro_lbprocs: $BASICBRO" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " suriprocs: $BASICSURI" >> $SENSORPILLARPATH/$MINION_ID.sls
fi
- echo " brobpf:" >> $TMP/$MINION_ID.sls
- echo " pcapbpf:" >> $TMP/$MINION_ID.sls
- echo " nidsbpf:" >> $TMP/$MINION_ID.sls
- echo " master: $MSRV" >> $TMP/$MINION_ID.sls
- echo " mtu: $MTU" >> $TMP/$MINION_ID.sls
+ echo " brobpf:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " pcapbpf:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " nidsbpf:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " master: $MSRV" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " mtu: $MTU" >> $SENSORPILLARPATH/$MINION_ID.sls
if [ $HNSENSOR != 'inherit' ]; then
- echo " hnsensor: $HNSENSOR" >> $TMP/$MINION_ID.sls
+ echo " hnsensor: $HNSENSOR" >> $SENSORPILLARPATH/$MINION_ID.sls
fi
- echo " access_key: $ACCESS_KEY" >> $TMP/$MINION_ID.sls
- echo " access_secret: $ACCESS_SECRET" >> $TMP/$MINION_ID.sls
+ echo " access_key: $ACCESS_KEY" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " access_secret: $ACCESS_SECRET" >> $SENSORPILLARPATH/$MINION_ID.sls
+
+}
+
+set_environment_var() {
+
+ echo "Setting environment variable: $1"
+
+ export "$1"
+ echo "$1" >> /etc/environment
}
@@ -1469,6 +1562,109 @@ whiptail_passwords_dont_match() {
}
+whiptail_patch_name_new_schedule() {
+
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 105 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ while [[ -z "$PATCHSCHEDULENAME" ]]; do
+ whiptail --title "Security Onion Setup" --msgbox "Please enter a name for this OS patch schedule." 8 65
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 105 3>&1 1>&2 2>&3)
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ done
+
+
+}
+
+whiptail_patch_schedule() {
+
+ # What kind of patch schedule are we doing?
+ PATCHSCHEDULE=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose OS patch schedule. This will NOT update Security Onion related tools such as Zeek, Elasticsearch, Kibana, SaltStack, etc." 25 115 5 \
+ "Automatic" "Package updates will be installed automatically every 8 hours if available" ON \
+ "Manual" "Package updates will need to be installed manually" OFF \
+ "Import Schedule" "Enter the name of an existing schedule on the following screen and inherit it" OFF \
+ "New Schedule" "Configure and name a new schedule on the following screen" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_patch_schedule_import() {
+
+ unset PATCHSCHEDULENAME
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 60 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ while [[ -z "$PATCHSCHEDULENAME" ]]; do
+ whiptail --title "Security Onion Setup" --msgbox "Please enter a name for the OS patch schedule you want to inherit." 8 65
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 60 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ done
+
+}
+
+whiptail_patch_schedule_select_days() {
+ # Select the days to patch
+ PATCHSCHEDULEDAYS=($(whiptail --title "Security Onion Setup" --checklist \
+ "Which days do you want to apply OS patches?" 20 55 9 \
+ "Monday" "" OFF \
+ "Tuesday" "" ON \
+ "Wednesday" "" OFF \
+ "Thursday" "" OFF \
+ "Friday" "" OFF \
+ "Saturday" "" OFF \
+ "Sunday" "" OFF 3>&1 1>&2 2>&3 ))
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
+whiptail_patch_schedule_select_hours() {
+ # Select the hours to patch
+ PATCHSCHEDULEHOURS=($(whiptail --title "Security Onion Setup" --checklist \
+ "At which time, UTC, do you want to apply OS patches on the selected days?" 35 55 26 \
+ "00:00" "" OFF \
+ "01:00" "" OFF \
+ "02:00" "" OFF \
+ "03:00" "" OFF \
+ "04:00" "" OFF \
+ "05:00" "" OFF \
+ "06:00" "" OFF \
+ "07:00" "" OFF \
+ "08:00" "" OFF \
+ "09:00" "" OFF \
+ "10:00" "" OFF \
+ "11:00" "" OFF \
+ "12:00" "" OFF \
+ "13:00" "" OFF \
+ "14:00" "" OFF \
+ "15:00" "" ON \
+ "16:00" "" OFF \
+ "17:00" "" OFF \
+ "18:00" "" OFF \
+ "19:00" "" OFF \
+ "20:00" "" OFF \
+ "21:00" "" OFF \
+ "22:00" "" OFF \
+ "23:00" "" OFF 3>&1 1>&2 2>&3 ))
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
whiptail_rule_setup() {
# Get pulled pork info
@@ -1501,6 +1697,13 @@ whiptail_set_hostname() {
HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter the Hostname you would like to set." 10 60 $HOSTNAME 3>&1 1>&2 2>&3)
+ while [[ "$HOSTNAME" == 'localhost' ]] ; do
+ whiptail --title "Security Onion Setup" --msgbox "Please choose a hostname that isn't localhost." 8 65
+ HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the Hostname you would like to set." 10 60 $HOSTNAME 3>&1 1>&2 2>&3)
+ done
+
+
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -1609,6 +1812,26 @@ if (whiptail_you_sure); then
# What kind of install are we doing?
whiptail_install_type
+ # How do we want to handle OS patching? manual, auto or scheduled days and hours
+ whiptail_patch_schedule
+ case $PATCHSCHEDULE in
+ 'New Schedule')
+ whiptail_patch_schedule_select_days
+ whiptail_patch_schedule_select_hours
+ whiptail_patch_name_new_schedule
+ patch_schedule_os_new
+ ;;
+ 'Import Schedule')
+ whiptail_patch_schedule_import
+ ;;
+ Automatic)
+ PATCHSCHEDULENAME=auto
+ ;;
+ Manual)
+ PATCHSCHEDULENAME=manual
+ ;;
+ esac
+
####################
## Master ##
####################
@@ -1684,8 +1907,10 @@ if (whiptail_you_sure); then
# Install salt and dependencies
{
sleep 0.5
- echo -e "XXX\n0\nInstalling and configuring Salt... \nXXX"
+ #install_pip3 >> $SETUPLOG 2>&1
+ echo -e "XXX\n1\nInstalling and configuring Salt... \nXXX"
echo " ** Installing Salt and Dependencies **" >> $SETUPLOG
+ salt_install_mysql_deps >> $SETUPLOG 2>&1
saltify >> $SETUPLOG 2>&1
echo -e "XXX\n5\nInstalling Docker... \nXXX"
docker_install >> $SETUPLOG 2>&1
@@ -1703,7 +1928,11 @@ if (whiptail_you_sure); then
master_static >> $SETUPLOG 2>&1
echo "** Generating the master pillar **" >> $SETUPLOG
master_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
echo -e "XXX\n30\nAccepting Salt Keys... \nXXX"
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
# Do a checkin to push the key up
echo "** Pushing the key up to Master **" >> $SETUPLOG
salt_firstcheckin >> $SETUPLOG 2>&1
@@ -1814,18 +2043,22 @@ if (whiptail_you_sure); then
sleep 0.5
echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
set_initial_firewall_policy >> $SETUPLOG 2>&1
+ #echo -e "XXX\n1\nInstalling pip3... \nXXX"
+ #install_pip3 >> $SETUPLOG 2>&1
echo -e "XXX\n3\nCreating Bond Interface... \nXXX"
network_setup >> $SETUPLOG 2>&1
echo -e "XXX\n4\nGenerating Sensor Pillar... \nXXX"
sensor_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
echo -e "XXX\n5\nInstalling Salt Components... \nXXX"
saltify >> $SETUPLOG 2>&1
echo -e "XXX\n20\nInstalling Docker... \nXXX"
docker_install >> $SETUPLOG 2>&1
echo -e "XXX\n22\nConfiguring Salt Minion... \nXXX"
configure_minion sensor >> $SETUPLOG 2>&1
- echo -e "XXX\n24\nCopying Sensor Pillar to Master... \nXXX"
- copy_minion_pillar sensors >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
echo -e "XXX\n25\nSending Salt Key to Master... \nXXX"
salt_firstcheckin >> $SETUPLOG 2>&1
echo -e "XXX\n26\nTelling the Master to Accept Key... \nXXX"
@@ -1912,6 +2145,9 @@ if (whiptail_you_sure); then
sleep 0.5
echo -e "XXX\n0\nCreating Bond Interface... \nXXX"
network_setup >> $SETUPLOG 2>&1
+ #install_pip3 >> $SETUPLOG 2>&1
+ echo -e "XXX\n1\nInstalling mysql dependencies for saltstack... \nXXX"
+ salt_install_mysql_deps >> $SETUPLOG 2>&1
echo -e "XXX\n1\nInstalling saltstack... \nXXX"
saltify >> $SETUPLOG 2>&1
echo -e "XXX\n3\nInstalling docker... \nXXX"
@@ -1929,6 +2165,8 @@ if (whiptail_you_sure); then
master_static >> $SETUPLOG 2>&1
echo -e "XXX\n7\nCreating the master pillar... \nXXX"
master_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
echo -e "XXX\n7\nConfiguring minion... \nXXX"
configure_minion eval >> $SETUPLOG 2>&1
echo -e "XXX\n7\nSetting the node type to eval... \nXXX"
@@ -1937,6 +2175,8 @@ if (whiptail_you_sure); then
node_pillar >> $SETUPLOG 2>&1
echo -e "XXX\n8\nCreating firewall policies... \nXXX"
set_initial_firewall_policy >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
echo -e "XXX\n10\nRegistering agent... \nXXX"
salt_firstcheckin >> $SETUPLOG 2>&1
echo -e "XXX\n11\nAccepting Agent... \nXXX"
@@ -2070,6 +2310,8 @@ if (whiptail_you_sure); then
sleep 0.5
echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
set_initial_firewall_policy >> $SETUPLOG 2>&1
+ #echo -e "XXX\n1\nInstalling pip3... \nXXX"
+ #install_pip3 >> $SETUPLOG 2>&1
echo -e "XXX\n5\nInstalling Salt Packages... \nXXX"
saltify >> $SETUPLOG 2>&1
echo -e "XXX\n20\nInstalling Docker... \nXXX"
@@ -2078,7 +2320,10 @@ if (whiptail_you_sure); then
configure_minion node >> $SETUPLOG 2>&1
set_node_type >> $SETUPLOG 2>&1
node_pillar >> $SETUPLOG 2>&1
- copy_minion_pillar nodes >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
echo -e "XXX\n35\nSending and Accepting Salt Key... \nXXX"
salt_firstcheckin >> $SETUPLOG 2>&1
# Accept the Salt Key