diff --git a/README.md b/README.md
index c13c1741c..3b6188b5c 100644
--- a/README.md
+++ b/README.md
@@ -1,32 +1,35 @@
-## Hybrid Hunter Alpha 1.1.2
+## Hybrid Hunter Alpha 1.1.3
-- Quick firewall fix to address latest docker version.
-- Added the option to install playbook from the initial install.
-- Fixed an issue with multiple monitor interfaces not working properly.
+### ISO Download:
-ISO Download: [HH 1.1.2-2](https://github.com/Security-Onion-Solutions/securityonion-hh-iso/releases/download/HH1.1.2/HH-1.1.2-2.iso)
-MD5 (HH-1.1.2-2.iso) = abbbae7b40a50623546ed3d7f8cda0ec
+[HH1.1.3-20.iso](https://github.com/Security-Onion-Solutions/securityonion-hh-iso/releases/download/HH1.1.3/HH-1.1.3-20.iso)
+MD5: 5A97980365A2A63EBFABB8C1DEB32BB6
+SHA1: 2A780B41903D907CED91D944569FD24FC131281F
+SHA256: 56FA65EB5957903B967C16E792B17386848101CD058E0289878373110446C4B2
-
-## Hybrid Hunter Alpha 1.1.1
+```
+Default Username: onion
+Default Password: V@daL1aZ
+```
### Changes:
-- Alpha 2 is here!
-- Suricata 4.1.5.
-- Bro/Zeek 2.6.4.
-- TheHive 3.4.0 (Includes ES 6.8.3 for TheHive only).
-- Fixed Bro/Zeek packet loss calculation for Grafana.
-- Updated to latest Sensoroni which includes websockets support for job status updates without having to refresh the page.
-- NIDS and HIDS dashboard updates.
-- Playbook and ATT&CK Navigator features are now included.
-- Filebeat now logs to a file, instead of stdout.
-- Elastalert has been updated to use Python 3 and allow for use of custom alerters.
-- Moved Bro/Zeek log parsing from Logstash to Elasticsearch Ingest for higher performance and lower memory usage!
-- Several changes to the setup script have been made to improve stability of the setup process:
- - Setup now modifies your hosts file so that the install works better in environments without DNS.
- - You are now prompted for setting a password for the socore user.
- - The install now forces a reboot at the end of the install. This fixes an issue with some of the Docker containers being in the wrong state from a manual reboot. Manual reboots are fine after the initial reboot.
+- Overhaul of the setup script to support both ISO and network based setups.
+- ISO will now boot properly from a USB stick.
+- Python 3 is now default.
+- Fix Filebeat from restarting every check in due to x509 refresh issue.
+- Cortex installed and integrated with TheHive.
+- Switched to using vanilla Kolide Fleet and upgraded to latest version (2.4) .
+- Playbook changes:
+ - Now preloaded with Plays generated from Sysmon Sigma signatures in the [Sigma community repo](https://github.com/Neo23x0/sigma/tree/master/rules/windows/sysmon).
+ - New update script that updates / pulls in new Sigma signatures from the community repo .
+ - Bulk enable / disable plays from the webui .
+ - Updated sigmac mapping template & configuration (backend is now `elastalert`) .
+ - Updated TheHive alerts formatting .
+- OS patch scheduling:
+ - During setup, choose between auto, manual, or scheduled OS patch interval
+ - For scheduled, create a new or import an existing named schedule
+
### Warnings and Disclaimers
diff --git a/pillar/patch/needs_restarting.sls b/pillar/patch/needs_restarting.sls
new file mode 100644
index 000000000..f77dd2269
--- /dev/null
+++ b/pillar/patch/needs_restarting.sls
@@ -0,0 +1,2 @@
+mine_functions:
+ needs_restarting.check: []
diff --git a/pillar/top.sls b/pillar/top.sls
index bc68aa644..17bf33e02 100644
--- a/pillar/top.sls
+++ b/pillar/top.sls
@@ -1,19 +1,22 @@
base:
+ '*':
+ - patch.needs_restarting
+
'G@role:so-sensor':
- - sensors.{{ grains.host }}
+ - sensors.{{ grains.id }}
- static
- firewall.*
- brologs
'G@role:so-master':
- - masters.{{ grains.host }}
+ - masters.{{ grains.id }}
- static
- firewall.*
- data.*
- auth
'G@role:so-eval':
- - masters.{{ grains.host }}
+ - masters.{{ grains.id }}
- static
- firewall.*
- data.*
@@ -21,6 +24,15 @@ base:
- auth
'G@role:so-node':
- - nodes.{{ grains.host }}
+ - nodes.{{ grains.id }}
- static
- firewall.*
+
+ 'G@role:so-helix':
+ - masters.{{ grains.id }}
+ - sensors.{{ grains.id }}
+ - static
+ - firewall.*
+ - fireeye
+ - static
+ - brologs
diff --git a/salt/_modules/needs_restarting.py b/salt/_modules/needs_restarting.py
new file mode 100644
index 000000000..5afb6f02a
--- /dev/null
+++ b/salt/_modules/needs_restarting.py
@@ -0,0 +1,24 @@
+from os import path
+import subprocess
+
+def check():
+
+ os = __grains__['os']
+ retval = 'False'
+
+ if os == 'Ubuntu':
+ if path.exists('/var/run/reboot-required'):
+ retval = 'True'
+
+ elif os == 'CentOS':
+ cmd = 'needs-restarting -r > /dev/null 2>&1'
+
+ try:
+ needs_restarting = subprocess.check_call(cmd, shell=True)
+ except subprocess.CalledProcessError:
+ retval = 'True'
+
+ else:
+ retval = 'Unsupported OS: %s' % os
+
+ return retval
diff --git a/salt/bro/init.sls b/salt/bro/init.sls
index f406558be..2e6f10f3c 100644
--- a/salt/bro/init.sls
+++ b/salt/bro/init.sls
@@ -92,13 +92,13 @@ localbrosync:
so-communitybroimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-communitybro:HH1.0.3
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-communitybro:HH1.0.3
so-bro:
docker_container.running:
- require:
- so-communitybroimage
- - image: soshybridhunter/so-communitybro:HH1.0.3
+ - image: docker.io/soshybridhunter/so-communitybro:HH1.0.3
- privileged: True
- binds:
- /nsm/bro/logs:/nsm/bro/logs:rw
@@ -125,13 +125,13 @@ localbrosync:
so-broimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-bro:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-bro:HH1.1.1
so-bro:
docker_container.running:
- require:
- so-broimage
- - image: soshybridhunter/so-bro:HH1.1.1
+ - image: docker.io/soshybridhunter/so-bro:HH1.1.1
- privileged: True
- binds:
- /nsm/bro/logs:/nsm/bro/logs:rw
diff --git a/salt/ca/files/signing_policies.conf b/salt/ca/files/signing_policies.conf
index a6ecdd4c3..e253f8911 100644
--- a/salt/ca/files/signing_policies.conf
+++ b/salt/ca/files/signing_policies.conf
@@ -10,7 +10,7 @@ x509_signing_policies:
- keyUsage: "digitalSignature, nonRepudiation"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- - days_valid: 3000
+ - days_valid: 820
- copypath: /etc/pki/issued_certs/
registry:
- minions: '*'
@@ -23,7 +23,8 @@ x509_signing_policies:
- keyUsage: "critical keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- - days_valid: 3000
+ - extendedKeyUsage: serverAuth
+ - days_valid: 820
- copypath: /etc/pki/issued_certs/
masterssl:
- minions: '*'
@@ -36,7 +37,8 @@ x509_signing_policies:
- keyUsage: "critical keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- - days_valid: 3000
+ - extendedKeyUsage: serverAuth
+ - days_valid: 820
- copypath: /etc/pki/issued_certs/
influxdb:
- minions: '*'
@@ -49,7 +51,8 @@ x509_signing_policies:
- keyUsage: "critical keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- - days_valid: 3000
+ - extendedKeyUsage: serverAuth
+ - days_valid: 820
- copypath: /etc/pki/issued_certs/
fleet:
- minions: '*'
@@ -62,5 +65,6 @@ x509_signing_policies:
- keyUsage: "critical keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- - days_valid: 3000
+ - extendedKeyUsage: serverAuth
+ - days_valid: 820
- copypath: /etc/pki/issued_certs/
diff --git a/salt/ca/init.sls b/salt/ca/init.sls
index 27344fc7f..407516f6e 100644
--- a/salt/ca/init.sls
+++ b/salt/ca/init.sls
@@ -39,10 +39,10 @@ pki_private_key:
- require:
- file: /etc/pki
-mine.send:
+send_x509_pem_entries_to_mine:
module.run:
- - func: x509.get_pem_entries
- - kwargs:
- glob_path: /etc/pki/ca.crt
+ - mine.send:
+ - func: x509.get_pem_entries
+ - glob_path: /etc/pki/ca.crt
- onchanges:
- x509: /etc/pki/ca.crt
diff --git a/salt/common/init.sls b/salt/common/init.sls
index 9d34c4a20..e34431a46 100644
--- a/salt/common/init.sls
+++ b/salt/common/init.sls
@@ -38,6 +38,7 @@ sensorpkgs:
- pkgs:
- docker-ce
- wget
+ - jq
{% if grains['os'] != 'CentOS' %}
- python-docker
- python-m2crypto
@@ -116,13 +117,13 @@ nginxtmp:
# Start the core docker
so-coreimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-core:HH1.1.2
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-core:HH1.1.3
so-core:
docker_container.running:
- require:
- so-coreimage
- - image: soshybridhunter/so-core:HH1.1.2
+ - image: docker.io/soshybridhunter/so-core:HH1.1.3
- hostname: so-core
- user: socore
- binds:
@@ -176,13 +177,13 @@ tgrafconf:
so-telegrafimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-telegraf:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-telegraf:HH1.1.0
so-telegraf:
docker_container.running:
- require:
- so-telegrafimage
- - image: soshybridhunter/so-telegraf:HH1.1.0
+ - image: docker.io/soshybridhunter/so-telegraf:HH1.1.0
- environment:
- HOST_PROC=/host/proc
- HOST_ETC=/host/etc
@@ -213,7 +214,7 @@ so-telegraf:
- /opt/so/conf/telegraf/etc/telegraf.conf
- /opt/so/conf/telegraf/scripts
-# If its a master or eval lets install the back end for now
+# If its a master or eval lets install the back end for now
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' and GRAFANA == 1 %}
# Influx DB
@@ -237,13 +238,13 @@ influxdbconf:
so-influximage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-influxdb:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-influxdb:HH1.1.0
so-influxdb:
docker_container.running:
- require:
- so-influximage
- - image: soshybridhunter/so-influxdb:HH1.1.0
+ - image: docker.io/soshybridhunter/so-influxdb:HH1.1.0
- hostname: influxdb
- environment:
- INFLUXDB_HTTP_LOG_ENABLED=false
@@ -316,7 +317,7 @@ grafanaconf:
- source: salt://common/grafana/etc
{% if salt['pillar.get']('mastertab', False) %}
-{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %}
dashboard-master:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json
@@ -337,7 +338,7 @@ dashboard-master:
{% endif %}
{% if salt['pillar.get']('sensorstab', False) %}
-{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/forward_nodes/{{ SN }}-Sensor.json
@@ -358,7 +359,7 @@ dashboard-{{ SN }}:
{% endif %}
{% if salt['pillar.get']('nodestab', False) %}
-{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/storage_nodes/{{ SN }}-Node.json
@@ -379,7 +380,7 @@ dashboard-{{ SN }}:
{% endif %}
{% if salt['pillar.get']('evaltab', False) %}
-{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/eval/{{ SN }}-Node.json
@@ -402,11 +403,11 @@ dashboard-{{ SN }}:
# Install the docker. This needs to be behind nginx at some point
so-grafanaimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-grafana:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-grafana:HH1.1.0
so-grafana:
docker_container.running:
- - image: soshybridhunter/so-grafana:HH1.1.0
+ - image: docker.io/soshybridhunter/so-grafana:HH1.1.0
- hostname: grafana
- user: socore
- binds:
diff --git a/salt/common/nginx/nginx.conf.so-eval b/salt/common/nginx/nginx.conf.so-eval
index 344ca4aed..b5cf6ef5a 100644
--- a/salt/common/nginx/nginx.conf.so-eval
+++ b/salt/common/nginx/nginx.conf.so-eval
@@ -152,10 +152,7 @@ http {
}
location /fleet/ {
- auth_basic "Security Onion";
- auth_basic_user_file /opt/so/conf/nginx/.htpasswd;
- rewrite /fleet/(.*) /$1 break;
- proxy_pass https://{{ masterip }}:8080/;
+ proxy_pass https://{{ masterip }}:8080/fleet/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -177,6 +174,30 @@ http {
}
+ location /cortex/ {
+ proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_http_version 1.1; # this is essential for chunked responses to work
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
+ location /cyberchef/ {
+ proxy_pass http://{{ masterip }}:9080/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_http_version 1.1; # this is essential for chunked responses to work
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/;
proxy_read_timeout 90;
diff --git a/salt/common/nginx/nginx.conf.so-helix b/salt/common/nginx/nginx.conf.so-helix
new file mode 100644
index 000000000..39688f3df
--- /dev/null
+++ b/salt/common/nginx/nginx.conf.so-helix
@@ -0,0 +1,89 @@
+# For more information on configuration, see:
+# * Official English Documentation: http://nginx.org/en/docs/
+# * Official Russian Documentation: http://nginx.org/ru/docs/
+
+user nginx;
+worker_processes auto;
+error_log /var/log/nginx/error.log;
+pid /run/nginx.pid;
+
+# Load dynamic modules. See /usr/share/nginx/README.dynamic.
+include /usr/share/nginx/modules/*.conf;
+
+events {
+ worker_connections 1024;
+}
+
+http {
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
+ '$status $body_bytes_sent "$http_referer" '
+ '"$http_user_agent" "$http_x_forwarded_for"';
+
+ access_log /var/log/nginx/access.log main;
+
+ sendfile on;
+ tcp_nopush on;
+ tcp_nodelay on;
+ keepalive_timeout 65;
+ types_hash_max_size 2048;
+
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ # Load modular configuration files from the /etc/nginx/conf.d directory.
+ # See http://nginx.org/en/docs/ngx_core_module.html#include
+ # for more information.
+ include /etc/nginx/conf.d/*.conf;
+
+ server {
+ listen 80 default_server;
+ listen [::]:80 default_server;
+ server_name _;
+ root /usr/share/nginx/html;
+
+ # Load configuration files for the default server block.
+ include /etc/nginx/default.d/*.conf;
+
+ location / {
+ }
+
+ error_page 404 /404.html;
+ location = /40x.html {
+ }
+
+ error_page 500 502 503 504 /50x.html;
+ location = /50x.html {
+ }
+ }
+
+# Settings for a TLS enabled server.
+#
+# server {
+# listen 443 ssl http2 default_server;
+# listen [::]:443 ssl http2 default_server;
+# server_name _;
+# root /usr/share/nginx/html;
+#
+# ssl_certificate "/etc/pki/nginx/server.crt";
+# ssl_certificate_key "/etc/pki/nginx/private/server.key";
+# ssl_session_cache shared:SSL:1m;
+# ssl_session_timeout 10m;
+# ssl_ciphers HIGH:!aNULL:!MD5;
+# ssl_prefer_server_ciphers on;
+#
+# # Load configuration files for the default server block.
+# include /etc/nginx/default.d/*.conf;
+#
+# location / {
+# }
+#
+# error_page 404 /404.html;
+# location = /40x.html {
+# }
+#
+# error_page 500 502 503 504 /50x.html;
+# location = /50x.html {
+# }
+# }
+
+}
diff --git a/salt/common/nginx/nginx.conf.so-master b/salt/common/nginx/nginx.conf.so-master
index dcedcafaf..265413fa2 100644
--- a/salt/common/nginx/nginx.conf.so-master
+++ b/salt/common/nginx/nginx.conf.so-master
@@ -176,6 +176,30 @@ http {
}
+ location /cortex/ {
+ proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_http_version 1.1; # this is essential for chunked responses to work
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
+ location /cyberchef/ {
+ proxy_pass http://{{ masterip }}:9080/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_http_version 1.1; # this is essential for chunked responses to work
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/;
proxy_read_timeout 90;
diff --git a/salt/common/telegraf/etc/telegraf.conf b/salt/common/telegraf/etc/telegraf.conf
index cf12f89bf..af9941bfa 100644
--- a/salt/common/telegraf/etc/telegraf.conf
+++ b/salt/common/telegraf/etc/telegraf.conf
@@ -76,7 +76,7 @@
logfile = "/var/log/telegraf/telegraf.log"
## Override default hostname, if empty use os.Hostname()
- hostname = "{{ grains.host }}"
+ hostname = "{{ grains.id }}"
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
diff --git a/salt/common/tools/sbin/so-helix-apikey b/salt/common/tools/sbin/so-helix-apikey
new file mode 100644
index 000000000..529ab93e4
--- /dev/null
+++ b/salt/common/tools/sbin/so-helix-apikey
@@ -0,0 +1,24 @@
+#!/bin/bash
+got_root() {
+
+ # Make sure you are root
+ if [ "$(id -u)" -ne 0 ]; then
+ echo "This script must be run using sudo!"
+ exit 1
+ fi
+
+}
+
+got_root
+if [ ! -f /opt/so/saltstack/pillar/fireeye/init.sls ]; then
+ echo "This is nto configured for Helix Mode. Please re-install."
+ exit
+else
+ echo "Enter your Helix API Key: "
+ read APIKEY
+ sed -i "s/^ api_key.*/ api_key: $APIKEY/g" /opt/so/saltstack/pillar/fireeye/init.sls
+ docker stop so-logstash
+ docker rm so-logstash
+ echo "Restarting Logstash for updated key"
+ salt-call state.apply logstash queue=True
+fi
diff --git a/salt/common/tools/sbin/so-playbook-ruleupdate b/salt/common/tools/sbin/so-playbook-ruleupdate
new file mode 100644
index 000000000..6e2d16f5d
--- /dev/null
+++ b/salt/common/tools/sbin/so-playbook-ruleupdate
@@ -0,0 +1 @@
+sudo docker exec so-soctopus python3 playbook_bulk-update.py
diff --git a/salt/common/tools/sbin/so-playbook-sync b/salt/common/tools/sbin/so-playbook-sync
new file mode 100644
index 000000000..3fc13c199
--- /dev/null
+++ b/salt/common/tools/sbin/so-playbook-sync
@@ -0,0 +1 @@
+sudo docker exec so-soctopus python3 playbook_play-sync.py
diff --git a/salt/common/tools/sbin/so-redis-count b/salt/common/tools/sbin/so-redis-count
new file mode 100644
index 000000000..5b299e494
--- /dev/null
+++ b/salt/common/tools/sbin/so-redis-count
@@ -0,0 +1 @@
+sudo docker exec -it so-redis redis-cli llen logstash:unparsed
diff --git a/salt/curator/init.sls b/salt/curator/init.sls
index 5c788b891..74dd47a99 100644
--- a/salt/curator/init.sls
+++ b/salt/curator/init.sls
@@ -114,13 +114,13 @@ curdel:
so-curatorimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-curator:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-curator:HH1.1.0
so-curator:
docker_container.running:
- require:
- so-curatorimage
- - image: soshybridhunter/so-curator:HH1.1.0
+ - image: docker.io/soshybridhunter/so-curator:HH1.1.0
- hostname: curator
- name: so-curator
- user: curator
diff --git a/salt/cyberchef/init.sls b/salt/cyberchef/init.sls
new file mode 100644
index 000000000..202b15037
--- /dev/null
+++ b/salt/cyberchef/init.sls
@@ -0,0 +1,53 @@
+# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# Create the cyberchef group
+cyberchefgroup:
+ group.present:
+ - name: cyberchef
+ - gid: 946
+
+# Add the cyberchef user
+cyberchef:
+ user.present:
+ - uid: 946
+ - gid: 946
+ - home: /opt/so/conf/cyberchef
+
+cyberchefconfdir:
+ file.directory:
+ - name: /opt/so/conf/cyberchef
+ - user: 946
+ - group: 939
+ - makedirs: True
+
+cybercheflog:
+ file.directory:
+ - name: /opt/so/log/cyberchef
+ - user: 946
+ - group: 946
+ - makedirs: True
+
+so-cyberchefimage:
+ cmd.run:
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-cyberchef:HH1.1.3
+
+so-cyberchef:
+ docker_container.running:
+ - require:
+ - so-cyberchefimage
+ - image: docker.io/soshybridhunter/so-cyberchef:HH1.1.3
+ - port_bindings:
+ - 0.0.0.0:9080:8080
diff --git a/salt/elastalert/files/elastalert_config.yaml b/salt/elastalert/files/elastalert_config.yaml
index 6a918093b..735ccb190 100644
--- a/salt/elastalert/files/elastalert_config.yaml
+++ b/salt/elastalert/files/elastalert_config.yaml
@@ -8,6 +8,11 @@ rules_folder: /etc/elastalert/rules/
# the rules directory - true or false
scan_subdirectories: true
+# Do not disable a rule when an uncaught exception is thrown -
+# This setting should be tweaked once the following issue has been fixed
+# https://github.com/Security-Onion-Solutions/securityonion-saltstack/issues/98
+disable_rules_on_error: false
+
# How often ElastAlert will query Elasticsearch
# The unit can be anything from weeks to seconds
run_every:
diff --git a/salt/elastalert/files/rules/so/nids2hive.yaml b/salt/elastalert/files/rules/so/nids2hive.yaml
index 7d55b4675..019a0844f 100644
--- a/salt/elastalert/files/rules/so/nids2hive.yaml
+++ b/salt/elastalert/files/rules/so/nids2hive.yaml
@@ -15,7 +15,7 @@ timeframe:
buffer_time:
minutes: 10
allow_buffer_time_overlap: true
-query_key: alert
+query_key: ["alert", "ips"]
realert:
days: 1
@@ -36,11 +36,11 @@ hive_proxies:
hive_alert_config:
title: '{match[alert]}'
- type: 'external'
+ type: 'NIDS'
source: 'SecurityOnion'
description: "`NIDS Dashboard:` \n\n \n\n `IPs: `{match[source_ip]}:{match[source_port]} --> {match[destination_ip]}:{match[destination_port]} \n\n `Signature:` {match[rule_signature]}"
severity: 2
- tags: ['elastalert', 'SecurityOnion', 'NIDS']
+ tags: ['{match[sid]}','{match[source_ip]}','{match[destination_ip]}']
tlp: 3
status: 'New'
follow: True
diff --git a/salt/elastalert/init.sls b/salt/elastalert/init.sls
index 8e8b32ae6..999bbbd91 100644
--- a/salt/elastalert/init.sls
+++ b/salt/elastalert/init.sls
@@ -111,13 +111,13 @@ elastaconf:
so-elastalertimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-elastalert:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-elastalert:HH1.1.1
so-elastalert:
docker_container.running:
- require:
- so-elastalertimage
- - image: soshybridhunter/so-elastalert:HH1.1.1
+ - image: docker.io/soshybridhunter/so-elastalert:HH1.1.1
- hostname: elastalert
- name: so-elastalert
- user: elastalert
diff --git a/salt/elasticsearch/files/so-elasticsearch-pipelines b/salt/elasticsearch/files/so-elasticsearch-pipelines
index c0dd44aa9..b1b6db158 100755
--- a/salt/elasticsearch/files/so-elasticsearch-pipelines
+++ b/salt/elasticsearch/files/so-elasticsearch-pipelines
@@ -19,7 +19,7 @@ ELASTICSEARCH_HOST=$1
ELASTICSEARCH_PORT=9200
# Define a default directory to load pipelines from
-ELASTICSEARCH_INGEST_PIPELINES="/opt/so/saltstack/salt/elasticsearch/files/ingest/"
+ELASTICSEARCH_INGEST_PIPELINES="/opt/so/conf/elasticsearch/ingest/"
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
@@ -39,7 +39,7 @@ while [[ "$COUNT" -le 240 ]]; do
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
- echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
+ echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
fi
diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls
index 1eb3a19e4..6036d5da8 100644
--- a/salt/elasticsearch/init.sls
+++ b/salt/elasticsearch/init.sls
@@ -60,6 +60,20 @@ esconfdir:
- group: 939
- makedirs: True
+esingestdir:
+ file.directory:
+ - name: /opt/so/conf/elasticsearch/ingest
+ - user: 930
+ - group: 939
+ - makedirs: True
+
+esingestconf:
+ file.recurse:
+ - name: /opt/so/conf/elasticsearch/ingest
+ - source: salt://elasticsearch/files/ingest
+ - user: 930
+ - group: 939
+
eslog4jfile:
file.managed:
- name: /opt/so/conf/elasticsearch/log4j2.properties
@@ -92,13 +106,13 @@ eslogdir:
so-elasticsearchimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-elasticsearch:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-elasticsearch:HH1.1.0
so-elasticsearch:
docker_container.running:
- require:
- so-elasticsearchimage
- - image: soshybridhunter/so-elasticsearch:HH1.1.0
+ - image: docker.io/soshybridhunter/so-elasticsearch:HH1.1.0
- hostname: elasticsearch
- name: so-elasticsearch
- user: elasticsearch
@@ -121,9 +135,17 @@ so-elasticsearch:
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
+so-elasticsearch-pipelines-file:
+ file.managed:
+ - name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines
+ - source: salt://elasticsearch/files/so-elasticsearch-pipelines
+ - user: 930
+ - group: 939
+ - mode: 754
+
so-elasticsearch-pipelines:
cmd.run:
- - name: /opt/so/saltstack/salt/elasticsearch/files/so-elasticsearch-pipelines {{ esclustername }}
+ - name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines {{ esclustername }}
# Tell the main cluster I am here
#curl -XPUT http://\$ELASTICSEARCH_HOST:\$ELASTICSEARCH_PORT/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"$HOSTNAME": {"skip_unavailable": "true", "seeds": ["$DOCKER_INTERFACE:$REVERSE_PORT"]}}}}}'
@@ -155,13 +177,13 @@ freqlogdir:
so-freqimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-freqserver:HH1.0.3
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-freqserver:HH1.0.3
so-freq:
docker_container.running:
- require:
- so-freqimage
- - image: soshybridhunter/so-freqserver:HH1.0.3
+ - image: docker.io/soshybridhunter/so-freqserver:HH1.0.3
- hostname: freqserver
- name: so-freqserver
- user: freqserver
@@ -197,13 +219,13 @@ dstatslogdir:
so-domainstatsimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-domainstats:HH1.0.3
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-domainstats:HH1.0.3
so-domainstats:
docker_container.running:
- require:
- so-domainstatsimage
- - image: soshybridhunter/so-domainstats:HH1.0.3
+ - image: docker.io/soshybridhunter/so-domainstats:HH1.0.3
- hostname: domainstats
- name: so-domainstats
- user: domainstats
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index f0d3a8587..0da9b68bc 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -12,7 +12,7 @@ name: {{ HOSTNAME }}
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
-logging.level: debug
+logging.level: error
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are "beat", "publish", "service"
@@ -66,7 +66,7 @@ filebeat.modules:
# List of prospectors to fetch data.
filebeat.prospectors:
#------------------------------ Log prospector --------------------------------
-{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" %}
+{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" %}
{%- if BROVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
- type: log
diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls
index b92899ef0..fce1c6b38 100644
--- a/salt/filebeat/init.sls
+++ b/salt/filebeat/init.sls
@@ -39,9 +39,9 @@ filebeatpkidir:
# This needs to be owned by root
filebeatconfsync:
- file.recurse:
- - name: /opt/so/conf/filebeat/etc
- - source: salt://filebeat/etc
+ file.managed:
+ - name: /opt/so/conf/filebeat/etc/filebeat.yml
+ - source: salt://filebeat/etc/filebeat.yml
- user: 0
- group: 0
- template: jinja
@@ -58,13 +58,13 @@ filebeatconfsync:
so-filebeatimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-filebeat:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-filebeat:HH1.1.1
so-filebeat:
docker_container.running:
- require:
- so-filebeatimage
- - image: soshybridhunter/so-filebeat:HH1.1.1
+ - image: docker.io/soshybridhunter/so-filebeat:HH1.1.1
- hostname: so-filebeat
- user: root
- extra_hosts: {{ MASTER }}:{{ MASTERIP }}
@@ -85,4 +85,4 @@ so-filebeat:
{%- endif %}
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
- watch:
- - file: /opt/so/conf/filebeat/etc
+ - file: /opt/so/conf/filebeat/etc/filebeat.yml
diff --git a/salt/firewall/init.sls b/salt/firewall/init.sls
index 68d1f66cd..8a6d41f0f 100644
--- a/salt/firewall/init.sls
+++ b/salt/firewall/init.sls
@@ -1,5 +1,5 @@
# Firewall Magic for the grid
-{%- if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
+{%- if grains['role'] in ['so-eval','so-master','so-helix'] %}
{%- set ip = salt['pillar.get']('static:masterip', '') %}
{%- elif grains['role'] == 'so-node' %}
{%- set ip = salt['pillar.get']('node:mainip', '') %}
@@ -20,7 +20,7 @@ iptables_fix_fwd:
- jump: ACCEPT
- position: 1
- target: DOCKER-USER
-
+
# Keep localhost in the game
iptables_allow_localhost:
iptables.append:
@@ -131,7 +131,7 @@ enable_wazuh_manager_1514_udp_{{ip}}:
- save: True
# Rules if you are a Master
-{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
+{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix'%}
#This should be more granular
iptables_allow_master_docker:
iptables.insert:
@@ -265,6 +265,29 @@ enable_master_navigator_4200_{{ip}}:
- position: 1
- save: True
+enable_master_cortex_9001_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: tcp
+ - source: {{ ip }}
+ - dport: 9001
+ - position: 1
+ - save: True
+
+enable_master_cyberchef_9080_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: tcp
+ - source: {{ ip }}
+ - dport: 9080
+ - position: 1
+ - save: True
+
+
{% endfor %}
# Make it so all the minions can talk to salt and update etc.
diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls
index 83c019880..917ee541e 100644
--- a/salt/fleet/init.sls
+++ b/salt/fleet/init.sls
@@ -61,13 +61,13 @@ fleetdbpriv:
so-fleetimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-fleet:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-fleet:HH1.1.3
so-fleet:
docker_container.running:
- require:
- so-fleetimage
- - image: soshybridhunter/so-fleet:HH1.1.0
+ - image: docker.io/soshybridhunter/so-fleet:HH1.1.3
- hostname: so-fleet
- port_bindings:
- 0.0.0.0:8080:8080
@@ -83,6 +83,7 @@ so-fleet:
- KOLIDE_AUTH_JWT_KEY=thisisatest
- KOLIDE_OSQUERY_STATUS_LOG_FILE=/var/log/osquery/status.log
- KOLIDE_OSQUERY_RESULT_LOG_FILE=/var/log/osquery/result.log
+ - KOLIDE_SERVER_URL_PREFIX=/fleet
- binds:
- /etc/pki/fleet.key:/ssl/server.key:ro
- /etc/pki/fleet.crt:/ssl/server.cert:ro
diff --git a/salt/fleet/so-fleet-setup.sh b/salt/fleet/so-fleet-setup.sh
index 7691b1eb2..32bbddbe7 100644
--- a/salt/fleet/so-fleet-setup.sh
+++ b/salt/fleet/so-fleet-setup.sh
@@ -7,7 +7,7 @@ fi
initpw=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
-docker exec so-fleet fleetctl config set --address https://$1:443 --tls-skip-verify
+docker exec so-fleet fleetctl config set --address https://$1:443 --tls-skip-verify --url-prefix /fleet
docker exec so-fleet fleetctl setup --email $2 --password $initpw
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/options.yaml
@@ -29,7 +29,7 @@ docker run \
--rm \
--mount type=bind,source=/opt/so/conf/fleet/packages,target=/output \
--mount type=bind,source=/etc/pki/launcher.crt,target=/var/launcher/launcher.crt \
- soshybridhunter/so-fleet-launcher:HH1.1.0 "$esecret" "$1":8080
+ docker.io/soshybridhunter/so-fleet-launcher:HH1.1.0 "$esecret" "$1":8080
cp /opt/so/conf/fleet/packages/launcher.* /opt/so/saltstack/salt/launcher/packages/
#Update timestamp on packages webpage
diff --git a/salt/hive/init.sls b/salt/hive/init.sls
index 5897f6a93..73b29b501 100644
--- a/salt/hive/init.sls
+++ b/salt/hive/init.sls
@@ -21,6 +21,28 @@ hiveconf:
- group: 939
- template: jinja
+cortexconfdir:
+ file.directory:
+ - name: /opt/so/conf/cortex
+ - makedirs: True
+ - user: 939
+ - group: 939
+
+cortexlogdir:
+ file.directory:
+ - name: /opt/so/log/cortex
+ - makedirs: True
+ - user: 939
+ - group: 939
+
+cortexconf:
+ file.recurse:
+ - name: /opt/so/conf/cortex
+ - source: salt://hive/thehive/etc
+ - user: 939
+ - group: 939
+ - template: jinja
+
# Install Elasticsearch
# Made directory for ES data to live in
@@ -33,13 +55,13 @@ hiveesdata:
so-thehive-esimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-thehive-es:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-thehive-es:HH1.1.1
so-thehive-es:
docker_container.running:
- require:
- so-thehive-esimage
- - image: soshybridhunter/so-thehive-es:HH1.1.1
+ - image: docker.io/soshybridhunter/so-thehive-es:HH1.1.1
- hostname: so-thehive-es
- name: so-thehive-es
- user: 939
@@ -66,27 +88,38 @@ so-thehive-es:
# Install Cortex
-#so-corteximage:
-# cmd.run:
-# - name: docker pull --disable-content-trust=false soshybridhunter/so-cortex:HH1.0.3
+so-corteximage:
+ cmd.run:
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-thehive-cortex:HH1.1.3
-#so-cortex:
-# docker_container.running:
-# - image: thehiveproject/cortex:latest
-# - hostname: so-cortex
-# - name: so-cortex
-# - port_bindings:
-# - 0.0.0.0:9001:9001
+so-cortex:
+ docker_container.running:
+ - require:
+ - so-corteximage
+ - image: docker.io/soshybridhunter/so-thehive-cortex:HH1.1.3
+ - hostname: so-cortex
+ - name: so-cortex
+ - user: 939
+ - binds:
+ - /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
+ - port_bindings:
+ - 0.0.0.0:9001:9001
+
+cortexscript:
+ cmd.script:
+ - source: salt://hive/thehive/scripts/cortex_init.sh
+ - cwd: /opt/so
+ - template: jinja
so-thehiveimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-thehive:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-thehive:HH1.1.1
so-thehive:
docker_container.running:
- require:
- so-thehiveimage
- - image: soshybridhunter/so-thehive:HH1.1.1
+ - image: docker.io/soshybridhunter/so-thehive:HH1.1.1
- environment:
- ELASTICSEARCH_HOST={{ MASTERIP }}
- hostname: so-thehive
diff --git a/salt/hive/thehive/etc/application.conf b/salt/hive/thehive/etc/application.conf
index e4dd1e2b2..14a635e54 100644
--- a/salt/hive/thehive/etc/application.conf
+++ b/salt/hive/thehive/etc/application.conf
@@ -1,4 +1,5 @@
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
# Secret Key
# The secret key is used to secure cryptographic functions.
@@ -130,15 +131,15 @@ play.http.parser.maxDiskBuffer = 1G
#
# In order to use Cortex, first you need to enable the Cortex module by uncommenting the next line
-#play.modules.enabled += connectors.cortex.CortexConnector
+play.modules.enabled += connectors.cortex.CortexConnector
cortex {
- #"CORTEX-SERVER-ID" {
- # url = ""
- # key = ""
+ "CORTEX-SERVER-ID" {
+ url = "http://{{ MASTERIP }}:9001/cortex/"
+ key = "{{ CORTEXKEY }}"
# # HTTP client configuration (SSL and proxy)
# ws {}
- #}
+ }
}
# MISP
@@ -207,3 +208,8 @@ misp {
# purpose = ImportAndExport
#} ## <-- Uncomment to complete the configuration
}
+webhooks {
+ SOCtopusWebHook {
+ url = "http://{{ MASTERIP }}:7000/enrich"
+ }
+}
diff --git a/salt/hive/thehive/etc/cortex-application.conf b/salt/hive/thehive/etc/cortex-application.conf
new file mode 100644
index 000000000..543a2a3e9
--- /dev/null
+++ b/salt/hive/thehive/etc/cortex-application.conf
@@ -0,0 +1,130 @@
+{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+
+# Secret Key
+# The secret key is used to secure cryptographic functions.
+# WARNING: If you deploy your application on several servers, make sure to use the same key.
+play.http.secret.key="letsdewdis"
+play.http.context=/cortex/
+search.uri = "http://{{ MASTERIP }}:9400"
+
+# Elasticsearch
+search {
+ # Name of the index
+ index = cortex
+ # Name of the Elasticsearch cluster
+ cluster = hive
+ # Address of the Elasticsearch instance
+ host = ["{{ MASTERIP }}:9500"]
+ # Scroll keepalive
+ keepalive = 1m
+ # Size of the page for scroll
+ pagesize = 50
+ # Number of shards
+ nbshards = 5
+ # Number of replicas
+ nbreplicas = 1
+ # Arbitrary settings
+ settings {
+ # Maximum number of nested fields
+ mapping.nested_fields.limit = 100
+ }
+
+ ## Authentication configuration
+ #search.username = ""
+ #search.password = ""
+
+ ## SSL configuration
+ #search.keyStore {
+ # path = "/path/to/keystore"
+ # type = "JKS" # or PKCS12
+ # password = "keystore-password"
+ #}
+ #search.trustStore {
+ # path = "/path/to/trustStore"
+ # type = "JKS" # or PKCS12
+ # password = "trustStore-password"
+ #}
+}
+
+## Cache
+#
+# If an analyzer is executed against the same observable, the previous report can be returned without re-executing the
+# analyzer. The cache is used only if the second job occurs within cache.job (the default is 10 minutes).
+cache.job = 10 minutes
+
+## Authentication
+auth {
+ # "provider" parameter contains the authentication provider(s). It can be multi-valued, which is useful
+ # for migration.
+ # The available auth types are:
+ # - services.LocalAuthSrv : passwords are stored in the user entity within ElasticSearch). No
+ # configuration are required.
+ # - ad : use ActiveDirectory to authenticate users. The associated configuration shall be done in
+ # the "ad" section below.
+ # - ldap : use LDAP to authenticate users. The associated configuration shall be done in the
+ # "ldap" section below.
+ provider = [local]
+
+ ad {
+ # The Windows domain name in DNS format. This parameter is required if you do not use
+ # 'serverNames' below.
+ #domainFQDN = "mydomain.local"
+
+ # Optionally you can specify the host names of the domain controllers instead of using 'domainFQDN
+ # above. If this parameter is not set, TheHive uses 'domainFQDN'.
+ #serverNames = [ad1.mydomain.local, ad2.mydomain.local]
+
+ # The Windows domain name using short format. This parameter is required.
+ #domainName = "MYDOMAIN"
+
+ # If 'true', use SSL to connect to the domain controller.
+ #useSSL = true
+ }
+
+ ldap {
+ # The LDAP server name or address. The port can be specified using the 'host:port'
+ # syntax. This parameter is required if you don't use 'serverNames' below.
+ #serverName = "ldap.mydomain.local:389"
+
+ # If you have multiple LDAP servers, use the multi-valued setting 'serverNames' instead.
+ #serverNames = [ldap1.mydomain.local, ldap2.mydomain.local]
+
+ # Account to use to bind to the LDAP server. This parameter is required.
+ #bindDN = "cn=thehive,ou=services,dc=mydomain,dc=local"
+
+ # Password of the binding account. This parameter is required.
+ #bindPW = "***secret*password***"
+
+ # Base DN to search users. This parameter is required.
+ #baseDN = "ou=users,dc=mydomain,dc=local"
+
+ # Filter to search user in the directory server. Please note that {0} is replaced
+ # by the actual user name. This parameter is required.
+ #filter = "(cn={0})"
+
+ # If 'true', use SSL to connect to the LDAP directory server.
+ #useSSL = true
+ }
+}
+
+## ANALYZERS
+#
+analyzer {
+ # Absolute path where you have pulled the Cortex-Analyzers repository.
+ path = ["/Cortex-Analyzers/analyzers"]
+
+ # Sane defaults. Do not change unless you know what you are doing.
+ fork-join-executor {
+
+ # Min number of threads available for analysis.
+ parallelism-min = 2
+
+ # Parallelism (threads) ... ceil(available processors * factor).
+ parallelism-factor = 2.0
+
+ # Max number of threads available for analysis.
+ parallelism-max = 4
+ }
+}
+
+# It's the end my friend. Happy hunting!
diff --git a/salt/hive/thehive/scripts/cortex_init.sh b/salt/hive/thehive/scripts/cortex_init.sh
new file mode 100644
index 000000000..506b14be5
--- /dev/null
+++ b/salt/hive/thehive/scripts/cortex_init.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set CORTEXUSER = salt['pillar.get']('static:cortexuser', '') %}
+{%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', '') %}
+{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %}
+{%- set CORTEXORGNAME = salt['pillar.get']('static:cortexorgname', '') %}
+{%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %}
+{%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
+
+cortex_init(){
+ sleep 60
+ CORTEX_IP="{{MASTERIP}}"
+ CORTEX_USER="{{CORTEXUSER}}"
+ CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
+ CORTEX_KEY="{{CORTEXKEY}}"
+ CORTEX_ORG_NAME="{{CORTEXORGNAME}}"
+ CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup"
+ CORTEX_ORG_USER="{{CORTEXORGUSER}}"
+ CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
+ SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
+
+
+ # Migrate DB
+ curl -v -k -XPOST "https://$CORTEX_IP:/cortex/api/maintenance/migrate"
+
+ # Create intial Cortex superadmin
+ curl -v -k "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
+
+ # Create user-supplied org
+ curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
+
+ # Create user-supplied org user
+ curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
+
+ # Enable URLScan.io Analyzer
+ curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
+
+ # Enable Cert PassiveDNS Analyzer
+ curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
+
+ # Revoke $CORTEX_USER key
+ curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" "https:///$CORTEX_IP/api/user/$CORTEX_USER/key"
+
+ # Update SOCtopus config with apikey value
+ #sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG
+
+ touch /opt/so/state/cortex.txt
+
+}
+
+if [ -f /opt/so/state/cortex.txt ]; then
+ exit 0
+else
+ rm -f garbage_file
+ while ! wget -O garbage_file {{MASTERIP}}:9500 2>/dev/null
+ do
+ echo "Waiting for Elasticsearch..."
+ rm -f garbage_file
+ sleep 1
+ done
+ rm -f garbage_file
+ sleep 5
+ cortex_init
+fi
diff --git a/salt/hive/thehive/scripts/hive_init.sh b/salt/hive/thehive/scripts/hive_init.sh
index 4e121e078..2215d4e44 100755
--- a/salt/hive/thehive/scripts/hive_init.sh
+++ b/salt/hive/thehive/scripts/hive_init.sh
@@ -19,7 +19,13 @@ hive_init(){
# Create intial TheHive user
curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
+
+ # Pre-load custom fields
+ #
+ # reputation
+ curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
+
# Update SOCtopus config with apikey value
#sed -i "s/hive_key = .*/hive_key = $HIVE_KEY/" $SOCTOPUS_CONFIG
diff --git a/salt/idstools/init.sls b/salt/idstools/init.sls
index cabd0ee73..9ec6f53f7 100644
--- a/salt/idstools/init.sls
+++ b/salt/idstools/init.sls
@@ -63,13 +63,13 @@ ruleslink:
so-idstoolsimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-idstools:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-idstools:HH1.1.0
so-idstools:
docker_container.running:
- require:
- so-idstoolsimage
- - image: soshybridhunter/so-idstools:HH1.1.0
+ - image: docker.io/soshybridhunter/so-idstools:HH1.1.0
- hostname: so-idstools
- user: socore
- binds:
diff --git a/salt/kibana/init.sls b/salt/kibana/init.sls
index 26910b5b0..0d6262600 100644
--- a/salt/kibana/init.sls
+++ b/salt/kibana/init.sls
@@ -56,14 +56,14 @@ synckibanacustom:
so-kibanaimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-kibana:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-kibana:HH1.1.1
# Start the kibana docker
so-kibana:
docker_container.running:
- require:
- so-kibanaimage
- - image: soshybridhunter/so-kibana:HH1.1.1
+ - image: docker.io/soshybridhunter/so-kibana:HH1.1.1
- hostname: kibana
- user: kibana
- environment:
diff --git a/salt/logstash/conf/conf.enabled.txt.so-helix b/salt/logstash/conf/conf.enabled.txt.so-helix
new file mode 100644
index 000000000..ec07b5a90
--- /dev/null
+++ b/salt/logstash/conf/conf.enabled.txt.so-helix
@@ -0,0 +1,47 @@
+# This is where can specify which LogStash configs get loaded.
+#
+# The custom folder on the master gets automatically synced to each logstash
+# node.
+#
+# To enable a custom configuration see the following example and uncomment:
+# /usr/share/logstash/pipeline.custom/1234_input_custom.conf
+##
+# All of the defaults are loaded.
+/usr/share/logstash/pipeline.dynamic/0010_input_hhbeats.conf
+/usr/share/logstash/pipeline.so/1033_preprocess_snort.conf
+/usr/share/logstash/pipeline.so/1100_preprocess_bro_conn.conf
+/usr/share/logstash/pipeline.so/1101_preprocess_bro_dhcp.conf
+/usr/share/logstash/pipeline.so/1102_preprocess_bro_dns.conf
+/usr/share/logstash/pipeline.so/1103_preprocess_bro_dpd.conf
+/usr/share/logstash/pipeline.so/1104_preprocess_bro_files.conf
+/usr/share/logstash/pipeline.so/1105_preprocess_bro_ftp.conf
+/usr/share/logstash/pipeline.so/1106_preprocess_bro_http.conf
+/usr/share/logstash/pipeline.so/1107_preprocess_bro_irc.conf
+/usr/share/logstash/pipeline.so/1108_preprocess_bro_kerberos.conf
+/usr/share/logstash/pipeline.so/1109_preprocess_bro_notice.conf
+/usr/share/logstash/pipeline.so/1110_preprocess_bro_rdp.conf
+/usr/share/logstash/pipeline.so/1111_preprocess_bro_signatures.conf
+/usr/share/logstash/pipeline.so/1112_preprocess_bro_smtp.conf
+/usr/share/logstash/pipeline.so/1113_preprocess_bro_snmp.conf
+/usr/share/logstash/pipeline.so/1114_preprocess_bro_software.conf
+/usr/share/logstash/pipeline.so/1115_preprocess_bro_ssh.conf
+/usr/share/logstash/pipeline.so/1116_preprocess_bro_ssl.conf
+/usr/share/logstash/pipeline.so/1117_preprocess_bro_syslog.conf
+/usr/share/logstash/pipeline.so/1118_preprocess_bro_tunnel.conf
+/usr/share/logstash/pipeline.so/1119_preprocess_bro_weird.conf
+/usr/share/logstash/pipeline.so/1121_preprocess_bro_mysql.conf
+/usr/share/logstash/pipeline.so/1122_preprocess_bro_socks.conf
+/usr/share/logstash/pipeline.so/1123_preprocess_bro_x509.conf
+/usr/share/logstash/pipeline.so/1124_preprocess_bro_intel.conf
+/usr/share/logstash/pipeline.so/1125_preprocess_bro_modbus.conf
+/usr/share/logstash/pipeline.so/1126_preprocess_bro_sip.conf
+/usr/share/logstash/pipeline.so/1127_preprocess_bro_radius.conf
+/usr/share/logstash/pipeline.so/1128_preprocess_bro_pe.conf
+/usr/share/logstash/pipeline.so/1129_preprocess_bro_rfb.conf
+/usr/share/logstash/pipeline.so/1130_preprocess_bro_dnp3.conf
+/usr/share/logstash/pipeline.so/1131_preprocess_bro_smb_files.conf
+/usr/share/logstash/pipeline.so/1132_preprocess_bro_smb_mapping.conf
+/usr/share/logstash/pipeline.so/1133_preprocess_bro_ntlm.conf
+/usr/share/logstash/pipeline.so/1134_preprocess_bro_dce_rpc.conf
+/usr/share/logstash/pipeline.so/8001_postprocess_common_ip_augmentation.conf
+/usr/share/logstash/pipeline.dynamic/9997_output_helix.conf
diff --git a/salt/logstash/files/dynamic/0006_input_beats.conf b/salt/logstash/files/dynamic/0006_input_beats.conf
index df590b6a1..a7140f859 100644
--- a/salt/logstash/files/dynamic/0006_input_beats.conf
+++ b/salt/logstash/files/dynamic/0006_input_beats.conf
@@ -1,7 +1,7 @@
input {
beats {
port => "5044"
- ssl => true
+ ssl => false
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key"
@@ -9,23 +9,6 @@ input {
}
}
filter {
- if [type] == "ids" or [type] =~ "bro" {
- mutate {
- rename => { "host" => "beat_host" }
- remove_tag => ["beat"]
- add_field => { "sensor_name" => "%{[beat][name]}" }
- add_field => { "syslog-host_from" => "%{[beat][name]}" }
- remove_field => [ "beat", "prospector", "input", "offset" ]
- }
- }
- if [type] =~ "ossec" {
- mutate {
- rename => { "host" => "beat_host" }
- remove_tag => ["beat"]
- add_field => { "syslog-host_from" => "%{[beat][name]}" }
- remove_field => [ "beat", "prospector", "input", "offset" ]
- }
- }
if [type] == "osquery" {
mutate {
rename => { "host" => "beat_host" }
diff --git a/salt/logstash/files/dynamic/9997_output_helix.conf b/salt/logstash/files/dynamic/9997_output_helix.conf
new file mode 100644
index 000000000..5dd0036fe
--- /dev/null
+++ b/salt/logstash/files/dynamic/9997_output_helix.conf
@@ -0,0 +1,142 @@
+{% set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %}
+
+filter {
+ if [type] =~ /^bro_conn|bro_dns|bro_http|bro_files|bro_ssl|bro_dhcp|bro_x509$/ {
+ grok {
+ match => [
+ "source_ip", "^%{IPV4:srcipv4}$",
+ "source_ip", "(?^([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{1,4}$|((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4})$)"
+ ]
+ }
+ grok {
+ match => [
+ "destination_ip", "(?^([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{1,4}$|((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4})$)",
+ "destination_ip", "^%{IPV4:dstipv4}$"
+ ]
+ }
+
+ geoip {
+ source => "[source_ip]"
+ target => "source_geo"
+ }
+ geoip {
+ source => "[destination_ip]"
+ target => "destination_geo"
+ }
+ mutate {
+ #rename => { "%{[source_geo][country_code]}" => "srccountrycode" }
+ #rename => { "%{[destination_geo][country_code]}" => "dstcountrycode" }
+ rename => { "[beat_host][name]" => "sensor" }
+ copy => { "sensor" => "rawmsghostname" }
+ rename => { "message" => "rawmsg" }
+ #rename => { "event_type" => "program" }
+ copy => { "type" => "class" }
+ copy => { "class" => "program"}
+ rename => { "source_port" => "srcport" }
+ rename => { "destination_port" => "dstport" }
+ remove_field => ["source_ip", "destination_ip"]
+ remove_field => ["sensorname", "sensor_name", "service", "source", "tags", "syslog-host"]
+ remove_field => ["sensor_name", "source_ips", "ips", "destination_ips", "syslog-priority", "syslog-file_name", "syslog-facility"]
+ }
+ if "bro_conn" in [class] {
+ mutate {
+ #add_field => { "metaclass" => "connection" }
+ rename => { "original_bytes" => "sentbytes" }
+ rename => { "respond_bytes" => "rcvdbytes" }
+ rename => { "connection_state" => "connstate" }
+ rename => { "uid" => "connectionid" }
+ rename => { "respond_packets" => "rcvdpackets" }
+ rename => { "original_packets" => "sentpackets" }
+ rename => { "respond_ip_bytes" => "rcvdipbytes" }
+ rename => { "original_ip_bytes" => "sentipbytes" }
+ rename => { "local_respond" => "local_resp" }
+ rename => { "local_orig" => "localorig" }
+ rename => { "missed_bytes" => "missingbytes" }
+ }
+ }
+ if "bro_dns" in [class] {
+ mutate{
+ #add_field = { "metaclass" => "dns"}
+ rename => { "answers" => "answer" }
+ rename => { "query" => "domain" }
+ rename => { "query_class" => "queryclass" }
+ rename => { "query_class_name" => "queryclassname" }
+ rename => { "query_type" => "querytype" }
+ rename => { "query_type_name" => "querytypename" }
+ rename => { "ra" => "recursionavailable" }
+ rename => { "rd" => "recursiondesired" }
+ }
+ }
+ if "bro_dhcp" in [class] {
+ mutate{
+ #add_field = { "metaclass" => "dhcp"}
+ rename => { "message_types" => "direction" }
+ rename => { "lease_time" => "duration" }
+ }
+ }
+ if "bro_files" in [class] {
+ mutate{
+ #add_field = { "metaclass" => "dns"}
+ rename => { "missing_bytes" => "missingbytes" }
+ rename => { "fuid" => "fileid" }
+ rename => { "uid" => "connectionid" }
+ }
+ }
+ if "bro_http" in [class] {
+ mutate{
+ #add_field = { "metaclass" => "dns"}
+ rename => { "virtual_host" => "hostname" }
+ rename => { "status_code" => "statuscode" }
+ rename => { "status_message" => "statusmsg" }
+ rename => { "resp_mime_types" => "rcvdmimetype" }
+ rename => { "resp_fuids" => "rcvdfileid" }
+ rename => { "response_body_len" => "rcvdbodybytes" }
+ rename => { "request_body_len" => "sentbodybytes" }
+ rename => { "uid" => "connectionid" }
+ rename => { "ts"=> "eventtime" }
+ rename => { "@timestamp"=> "eventtime" }
+ }
+ }
+ if "bro_ssl" in [class] {
+ mutate{
+ #add_field = { "metaclass" => "dns"}
+ rename => { "status_code" => "statuscode" }
+ rename => { "status_message" => "statusmsg" }
+ rename => { "resp_mime_types" => "rcvdmimetype" }
+ rename => { "resp_fuids" => "rcvdfileid" }
+ rename => { "response_body_len" => "rcvdbodybytes" }
+ rename => { "request_body_len" => "sentbodybytes" }
+ }
+ }
+ if "bro_weird" in [class] {
+ mutate{
+ #add_field = { "metaclass" => "dns"}
+ rename => { "name" => "eventname" }
+ }
+ }
+ if "bro_x509" in [class] {
+ mutate{
+ #add_field = { "metaclass" => "dns"}
+ rename => { "certificate_common_name" => "certname" }
+ rename => { "certificate_subject" => "certsubject" }
+ rename => { "issuer_common_name" => "issuer" }
+ rename => { "certificate_issuer" => "issuersubject" }
+ rename => { "certificate_not_valid_before" => "issuetime" }
+ rename => { "certificate_key_type" => "cert_type" }
+ }
+ }
+ }
+}
+
+output {
+ if [type] =~ /^bro_conn|bro_dns|bro_http|bro_files|bro_ssl|bro_dhcp|bro_x509$/ {
+ http {
+ url => "https://helix-integrations.cloud.aws.apps.fireeye.com/api/upload"
+ http_method => post
+ http_compression => true
+ socket_timeout => 60
+ headers => ["Authorization","{{ HELIX_API_KEY }}"]
+ format => json_batch
+ }
+ }
+}
diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls
index 589d22d1c..c2b80346f 100644
--- a/salt/logstash/init.sls
+++ b/salt/logstash/init.sls
@@ -30,6 +30,13 @@
{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
+{% elif grains['role'] == 'so-helix' %}
+
+{% set lsheap = salt['pillar.get']('master:lsheap', '') %}
+{% set freq = salt['pillar.get']('master:freq', '0') %}
+{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
+{% set nodetype = salt['grains.get']('role', '') %}
+
{% elif grains['role'] == 'so-eval' %}
{% set lsheap = salt['pillar.get']('master:lsheap', '') %}
@@ -148,13 +155,13 @@ lslogdir:
# Add the container
so-logstashimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-logstash:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-logstash:HH1.1.1
so-logstash:
docker_container.running:
- require:
- so-logstashimage
- - image: soshybridhunter/so-logstash:HH1.1.1
+ - image: docker.io/soshybridhunter/so-logstash:HH1.1.1
- hostname: so-logstash
- name: so-logstash
- user: logstash
diff --git a/salt/master/files/registry/scripts/so-docker-download.sh b/salt/master/files/registry/scripts/so-docker-download.sh
new file mode 100644
index 000000000..33b5065ae
--- /dev/null
+++ b/salt/master/files/registry/scripts/so-docker-download.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+MASTER={{ MASTER }}
+VERSION="HH1.1.3"
+TRUSTED_CONTAINERS=( \
+"so-core:$VERSION" \
+"so-cyberchef:$VERSION" \
+"so-acng:$VERSION" \
+"so-sensoroni:$VERSION" \
+"so-fleet:$VERSION" \
+"so-soctopus:$VERSION" \
+"so-steno:$VERSION" \
+"so-playbook:$VERSION" \
+"so-thehive-cortex:$VERSION" \
+"so-thehive:$VERSION" \
+"so-thehive-es:$VERSION" \
+"so-wazuh:$VERSION" \
+"so-kibana:$VERSION" \
+"so-auth-ui:$VERSION" \
+"so-auth-api:$VERSION" \
+"so-elastalert:$VERSION" \
+"so-navigator:$VERSION" \
+"so-filebeat:$VERSION" \
+"so-suricata:$VERSION" \
+"so-logstash:$VERSION" \
+"so-bro:$VERSION" \
+"so-idstools:$VERSION" \
+"so-fleet-launcher:$VERSION" \
+"so-freqserver:$VERSION" \
+"so-influxdb:$VERSION" \
+"so-grafana:$VERSION" \
+"so-telegraf:$VERSION" \
+"so-redis:$VERSION" \
+"so-mysql:$VERSION" \
+"so-curtor:$VERSION" \
+"so-elasticsearch:$VERSION" \
+"so-domainstats:$VERSION" \
+"so-tcpreplay:$VERSION" \
+)
+
+for i in "${TRUSTED_CONTAINERS[@]}"
+do
+ # Pull down the trusted docker image
+ docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
+ # Tag it with the new registry destination
+ docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
+ docker push $MASTER:5000/soshybridhunter/$i
+done
diff --git a/salt/master/init.sls b/salt/master/init.sls
index 8b68a3cf1..c6e11279d 100644
--- a/salt/master/init.sls
+++ b/salt/master/init.sls
@@ -17,6 +17,15 @@
{% if masterproxy == 1 %}
+socore_own_saltstack:
+ file.directory:
+ - name: /opt/so/saltstack
+ - user: socore
+ - group: socore
+ - recurse:
+ - user
+ - group
+
# Create the directories for apt-cacher-ng
aptcacherconfdir:
file.directory:
@@ -48,14 +57,14 @@ acngcopyconf:
so-acngimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-acng:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-acng:HH1.1.0
# Install the apt-cacher-ng container
so-aptcacherng:
docker_container.running:
- require:
- so-acngimage
- - image: soshybridhunter/so-acng:HH1.1.0
+ - image: docker.io/soshybridhunter/so-acng:HH1.1.0
- hostname: so-acng
- port_bindings:
- 0.0.0.0:3142:3142
diff --git a/salt/motd/files/package_update_reboot_required.jinja b/salt/motd/files/package_update_reboot_required.jinja
new file mode 100644
index 000000000..6d94fc613
--- /dev/null
+++ b/salt/motd/files/package_update_reboot_required.jinja
@@ -0,0 +1,26 @@
+{% set needs_restarting_check = salt['mine.get']('*', 'needs_restarting.check', tgt_type='glob') -%}
+
+{%- if needs_restarting_check %}
+ {%- set minions_need_restarted = [] %}
+
+ {%- for minion, need_restarted in needs_restarting_check | dictsort() %}
+ {%- if need_restarted == 'True' %}
+ {% do minions_need_restarted.append(minion) %}
+ {%- endif %}
+ {%- endfor -%}
+
+ {%- if minions_need_restarted | length > 0 %}
+****************************************************************************************************
+* The following nodes in your Security Onion grid may need to be restarted due to package updates. *
+* If the node has already been patched, restarted and been up for less than 15 minutes, then it *
+* may not have updated it's restart_needed status yet. This will cause it to be listed below, even *
+* if it has already been restarted. This feature will be improved in the future. *
+****************************************************************************************************
+
+ {% for minion in minions_need_restarted -%}
+ {{ minion }}
+ {% endfor -%}
+
+ {%- endif -%}
+
+{%- endif -%}
diff --git a/salt/motd/init.sls b/salt/motd/init.sls
new file mode 100644
index 000000000..4dae979bf
--- /dev/null
+++ b/salt/motd/init.sls
@@ -0,0 +1,5 @@
+package_update_reboot_required_motd:
+ file.managed:
+ - name: /etc/motd
+ - source: salt://motd/files/package_update_reboot_required.jinja
+ - template: jinja
diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls
index 4ade15f33..a0136ae9b 100644
--- a/salt/mysql/init.sls
+++ b/salt/mysql/init.sls
@@ -50,13 +50,13 @@ mysqldatadir:
so-mysqlimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-mysql:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-mysql:HH1.1.0
so-mysql:
docker_container.running:
- require:
- so-mysqlimage
- - image: soshybridhunter/so-mysql:HH1.1.0
+ - image: docker.io/soshybridhunter/so-mysql:HH1.1.0
- hostname: so-mysql
- user: socore
- port_bindings:
diff --git a/salt/patch/needs_restarting.sls b/salt/patch/needs_restarting.sls
new file mode 100644
index 000000000..f60909d22
--- /dev/null
+++ b/salt/patch/needs_restarting.sls
@@ -0,0 +1,5 @@
+needs_restarting:
+ module.run:
+ - mine.send:
+ - func: needs_restarting.check
+ - order: last
diff --git a/salt/patch/os/init.sls b/salt/patch/os/init.sls
new file mode 100644
index 000000000..ade35294a
--- /dev/null
+++ b/salt/patch/os/init.sls
@@ -0,0 +1,10 @@
+include:
+{% if grains.os == "CentOS" %}
+ - yum.packages
+{% endif %}
+ - patch.needs_restarting
+
+patch_os:
+ pkg.uptodate:
+ - name: patch_os
+ - refresh: True
diff --git a/salt/patch/os/schedule.sls b/salt/patch/os/schedule.sls
new file mode 100644
index 000000000..a91e61dfe
--- /dev/null
+++ b/salt/patch/os/schedule.sls
@@ -0,0 +1,76 @@
+{% if salt['pillar.get']('patch:os:schedule_name') %}
+ {% set patch_os_pillar = salt['pillar.get']('patch:os') %}
+ {% set schedule_name = patch_os_pillar.schedule_name %}
+ {% set splay = patch_os_pillar.get('splay', 300) %}
+
+ {% if schedule_name != 'manual' and schedule_name != 'auto' %}
+ {% import_yaml "patch/os/schedules/"~schedule_name~".yml" as os_schedule %}
+
+ {% if patch_os_pillar.enabled %}
+
+patch_os_schedule:
+ schedule.present:
+ - function: state.sls
+ - job_args:
+ - patch.os
+ - when:
+ {% for days in os_schedule.patch.os.schedule %}
+ {% for day, times in days.items() %}
+ {% for time in times %}
+ - {{day}} {{time}}
+ {% endfor %}
+ {% endfor %}
+ {% endfor %}
+ - splay: {{splay}}
+ - return_job: True
+
+ {% else %}
+
+disable_patch_os_schedule:
+ schedule.disabled:
+ - name: patch_os_schedule
+
+ {% endif %}
+
+
+ {% elif schedule_name == 'auto' %}
+
+ {% if patch_os_pillar.enabled %}
+
+patch_os_schedule:
+ schedule.present:
+ - function: state.sls
+ - job_args:
+ - patch.os
+ - hours: 8
+ - splay: {{splay}}
+ - return_job: True
+
+ {% else %}
+
+disable_patch_os_schedule:
+ schedule.disabled:
+ - name: patch_os_schedule
+
+ {% endif %}
+
+ {% elif schedule_name == 'manual' %}
+
+remove_patch_os_schedule:
+ schedule.absent:
+ - name: patch_os_schedule
+
+ {% endif %}
+
+{% else %}
+
+no_patch_os_schedule_name_set:
+ test.fail_without_changes:
+ - name: "Set a pillar value for patch:os:schedule_name in this minion's .sls file. If an OS patch schedule is not listed as enabled in show_schedule output below, then OS patches will need to be applied manually until this is corrected."
+
+show_patch_os_schedule:
+ module.run:
+ - schedule.is_enabled:
+ - name: patch_os_schedule
+
+{% endif %}
diff --git a/salt/patch/os/schedules/example_schedule.yml b/salt/patch/os/schedules/example_schedule.yml
new file mode 100644
index 000000000..b2748ab09
--- /dev/null
+++ b/salt/patch/os/schedules/example_schedule.yml
@@ -0,0 +1,10 @@
+patch:
+ os:
+ schedule:
+ - Tuesday:
+ - '15:00'
+ - Thursday:
+ - '03:00'
+ - Saturday:
+ - '01:00'
+ - '15:00'
diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls
index 86408c0e3..a49dc00e3 100644
--- a/salt/pcap/init.sls
+++ b/salt/pcap/init.sls
@@ -96,13 +96,13 @@ stenolog:
so-stenoimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-steno:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-steno:HH1.1.3
so-steno:
docker_container.running:
- require:
- so-stenoimage
- - image: soshybridhunter/so-steno:HH1.1.1
+ - image: docker.io/soshybridhunter/so-steno:HH1.1.3
- network_mode: host
- privileged: True
- port_bindings:
diff --git a/salt/playbook/files/redmine.db b/salt/playbook/files/redmine.db
index fdf24eae4..7d84b5856 100644
Binary files a/salt/playbook/files/redmine.db and b/salt/playbook/files/redmine.db differ
diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls
index a72514fe9..770316ab9 100644
--- a/salt/playbook/init.sls
+++ b/salt/playbook/init.sls
@@ -11,9 +11,9 @@ playbookdb:
playbookwebhook:
module.run:
- - name: sqlite3.modify
- - db: /opt/so/conf/playbook/redmine.db
- - sql: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
+ - sqlite3.modify:
+ - db: /opt/so/conf/playbook/redmine.db
+ - sql: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
navigatorconfig:
file.managed:
@@ -26,13 +26,13 @@ navigatorconfig:
so-playbookimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-playbook:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-playbook:HH1.1.3
so-playbook:
docker_container.running:
- require:
- so-playbookimage
- - image: soshybridhunter/so-playbook:HH1.1.1
+ - image: docker.io/soshybridhunter/so-playbook:HH1.1.3
- hostname: playbook
- name: so-playbook
- binds:
@@ -42,13 +42,13 @@ so-playbook:
so-navigatorimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-navigator:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-navigator:HH1.1.1
so-navigator:
docker_container.running:
- require:
- so-navigatorimage
- - image: soshybridhunter/so-navigator:HH1.1.1
+ - image: docker.io/soshybridhunter/so-navigator:HH1.1.1
- hostname: navigator
- name: so-navigator
- binds:
@@ -56,3 +56,9 @@ so-navigator:
- /opt/so/conf/playbook/nav_layer_playbook.json:/nav-app/src/assets/playbook.json:ro
- port_bindings:
- 0.0.0.0:4200:4200
+
+/usr/sbin/so-playbook-sync:
+ cron.present:
+ - identifier: so-playbook-sync
+ - user: root
+ - minute: '*/5'
diff --git a/salt/redis/init.sls b/salt/redis/init.sls
index 174c1725b..18178ce3b 100644
--- a/salt/redis/init.sls
+++ b/salt/redis/init.sls
@@ -46,13 +46,13 @@ redisconfsync:
so-redisimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-redis:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-redis:HH1.1.0
so-redis:
docker_container.running:
- require:
- so-redisimage
- - image: soshybridhunter/so-redis:HH1.1.0
+ - image: docker.io/soshybridhunter/so-redis:HH1.1.0
- hostname: so-redis
- user: socore
- port_bindings:
diff --git a/salt/sensoroni/init.sls b/salt/sensoroni/init.sls
index 2c06ba7a8..19fcd8b4a 100644
--- a/salt/sensoroni/init.sls
+++ b/salt/sensoroni/init.sls
@@ -29,19 +29,19 @@ sensoronisync:
so-sensoroniimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-sensoroni:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-sensoroni:HH1.1.3
so-sensoroni:
docker_container.running:
- require:
- so-sensoroniimage
- - image: soshybridhunter/so-sensoroni:HH1.1.1
+ - image: docker.io/soshybridhunter/so-sensoroni:HH1.1.3
- hostname: sensoroni
- name: so-sensoroni
- binds:
- /nsm/sensoroni/jobs:/opt/sensoroni/jobs:rw
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
- - /opt/so/log/sensoroni/:/opt/sensoroni/log/:rw
+ - /opt/so/log/sensoroni/:/opt/sensoroni/logs/:rw
- port_bindings:
- 0.0.0.0:9822:9822
- watch:
diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf
index f1d311602..37ec0172d 100644
--- a/salt/soctopus/files/SOCtopus.conf
+++ b/salt/soctopus/files/SOCtopus.conf
@@ -1,9 +1,16 @@
{%- set ip = salt['pillar.get']('static:masterip', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
+{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
[es]
es_url = http://{{ip}}:9200
+[cortex]
+auto_analyze_alerts = no
+cortex_url = https://{{ip}}/cortex/
+cortex_key = {{ CORTEXKEY }}
+supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
+
[fir]
fir_url = YOURFIRURL
fir_token = YOURFIRTOKEN
@@ -50,4 +57,4 @@ playbook_url = http://{{ip}}:3200/playbook
playbook_key = a4a34538782804adfcb8dfae96262514ad70c37c
[log]
-logfile = /tmp/soctopus.log
+logfile = /var/log/SOCtopus/soctopus.log
diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template
index 992db3fa9..80dd1a762 100644
--- a/salt/soctopus/files/templates/generic.template
+++ b/salt/soctopus/files/templates/generic.template
@@ -1,23 +1,6 @@
{% set es = salt['pillar.get']('static:masterip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
-es_host: {{es}}
-es_port: 9200
-name: Alert-Name
-type: frequency
-index: "*:logstash-*"
-num_events: 1
-timeframe:
- minutes: 10
-buffer_time:
- minutes: 10
-allow_buffer_time_overlap: true
-
-filter:
-- query:
- query_string:
- query: 'select from test'
-
alert: modules.so.thehive.TheHiveAlerter
hive_connection:
@@ -29,12 +12,12 @@ hive_proxies:
https: ''
hive_alert_config:
- title: '{rule[name]}'
- type: 'external'
+ title: '{rule[name]} - '
+ type: 'playbook'
source: 'SecurityOnion'
- description: '`Data:` {match[message]}'
+ description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` \n\n `Raw Data:` {match[message]}"
severity: 2
- tags: ['elastalert', 'SecurityOnion']
+ tags: ['playbook']
tlp: 3
status: 'New'
follow: True
diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template
index 1e85a3182..5f1c6961a 100644
--- a/salt/soctopus/files/templates/osquery.template
+++ b/salt/soctopus/files/templates/osquery.template
@@ -1,23 +1,6 @@
{% set es = salt['pillar.get']('static:masterip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
-es_host: {{es}}
-es_port: 9200
-name: Alert-Name
-type: frequency
-index: "*:logstash-*"
-num_events: 1
-timeframe:
- minutes: 10
-buffer_time:
- minutes: 10
-allow_buffer_time_overlap: true
-
-filter:
-- query:
- query_string:
- query: 'select from test'
-
alert: modules.so.thehive.TheHiveAlerter
hive_connection:
@@ -28,20 +11,22 @@ hive_proxies:
http: ''
https: ''
-hive_alert_config:
- title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}'
- type: 'external'
- source: 'SecurityOnion'
- description: '`Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}'
- severity: 2
- tags: ['elastalert', 'SecurityOnion']
- tlp: 3
- status: 'New'
- follow: True
- caseTemplate: '5000'
-
hive_observable_data_mapping:
- ip: '{match[osquery][EndpointIP1]}'
- ip: '{match[osquery][EndpointIP2]}'
- other: '{match[osquery][hostIdentifier]}'
- other: '{match[osquery][hostname]}'
+
+hive_alert_config:
+ title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}'
+ type: 'osquery'
+ source: 'SecurityOnion'
+ description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` \n\n `Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}"
+ severity: 2
+ tags: ['playbook','osquery']
+ tlp: 3
+ status: 'New'
+ follow: True
+ caseTemplate: '5000'
+
+
diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls
index 98a9a4158..578789a76 100644
--- a/salt/soctopus/init.sls
+++ b/salt/soctopus/init.sls
@@ -13,6 +13,12 @@ soctopussync:
- group: 939
- template: jinja
+soctopuslogdir:
+ file.directory:
+ - name: /opt/so/log/soctopus
+ - user: 939
+ - group: 939
+
playbookrulesdir:
file.directory:
- name: /opt/so/rules/elastalert/playbook
@@ -40,17 +46,18 @@ navigatordefaultlayer:
so-soctopusimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-soctopus:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-soctopus:HH1.1.3
so-soctopus:
docker_container.running:
- require:
- so-soctopusimage
- - image: soshybridhunter/so-soctopus:HH1.1.1
+ - image: docker.io/soshybridhunter/so-soctopus:HH1.1.3
- hostname: soctopus
- name: so-soctopus
- binds:
- /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro
+ - /opt/so/log/soctopus/:/var/log/SOCtopus/:rw
- /opt/so/rules/elastalert/playbook:/etc/playbook-rules:rw
- /opt/so/conf/playbook/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
- port_bindings:
diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls
index 841fc32ff..0dff9b855 100644
--- a/salt/ssl/init.sls
+++ b/salt/ssl/init.sls
@@ -1,15 +1,24 @@
{% set master = salt['grains.get']('master') %}
+{% set master_minion_id = master.split(".")[0] %}
{%- set masterip = salt['pillar.get']('static:masterip', '') -%}
+{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
+ {% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %}
+ {% set ca_server = grains.id %}
+{% else %}
+ {% set trusttheca_text = salt['mine.get'](master_minion_id, 'x509.get_pem_entries')[master_minion_id]['/etc/pki/ca.crt']|replace('\n', '') %}
+ {% set ca_server = master_minion_id %}
+{% endif %}
+
# Trust the CA
trusttheca:
x509.pem_managed:
- name: /etc/ssl/certs/intca.crt
- - text: {{ salt['mine.get'](master, 'x509.get_pem_entries')[master]['/etc/pki/ca.crt']|replace('\n', '') }}
+ - text: {{ trusttheca_text }}
-# Install packages needed for the sensor
{% if grains['os'] != 'CentOS' %}
+# Install packages needed for the sensor
m2cryptopkgs:
pkg.installed:
- skip_suggestions: False
@@ -20,29 +29,29 @@ m2cryptopkgs:
# Create a cert for the talking to influxdb
/etc/pki/influxdb.crt:
x509.certificate_managed:
- - ca_server: {{ master }}
+ - ca_server: {{ ca_server }}
- signing_policy: influxdb
- public_key: /etc/pki/influxdb.key
- CN: {{ master }}
- days_remaining: 0
- - days_valid: 3650
+ - days_valid: 820
- backup: True
- managed_private_key:
name: /etc/pki/influxdb.key
bits: 4096
backup: True
-{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
+{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix' %}
# Request a cert and drop it where it needs to go to be distributed
/etc/pki/filebeat.crt:
x509.certificate_managed:
- - ca_server: {{ master }}
+ - ca_server: {{ ca_server }}
- signing_policy: filebeat
- public_key: /etc/pki/filebeat.key
- CN: {{ master }}
- days_remaining: 0
- - days_valid: 3650
+ - days_valid: 820
- backup: True
- managed_private_key:
name: /etc/pki/filebeat.key
@@ -70,12 +79,12 @@ fbcrtlink:
# Create a cert for the docker registry
/etc/pki/registry.crt:
x509.certificate_managed:
- - ca_server: {{ master }}
+ - ca_server: {{ ca_server }}
- signing_policy: registry
- public_key: /etc/pki/registry.key
- CN: {{ master }}
- days_remaining: 0
- - days_valid: 3650
+ - days_valid: 820
- backup: True
- managed_private_key:
name: /etc/pki/registry.key
@@ -85,12 +94,12 @@ fbcrtlink:
# Create a cert for the reverse proxy
/etc/pki/masterssl.crt:
x509.certificate_managed:
- - ca_server: {{ master }}
+ - ca_server: {{ ca_server }}
- signing_policy: masterssl
- public_key: /etc/pki/masterssl.key
- CN: {{ master }}
- days_remaining: 0
- - days_valid: 3650
+ - days_valid: 820
- backup: True
- managed_private_key:
name: /etc/pki/masterssl.key
@@ -103,7 +112,7 @@ fbcrtlink:
- CN: {{ master }}
- bits: 4096
- days_remaining: 0
- - days_valid: 3650
+ - days_valid: 820
- backup: True
/etc/pki/fleet.crt:
@@ -112,7 +121,7 @@ fbcrtlink:
- CN: {{ master }}
- subjectAltName: DNS:{{ master }},IP:{{ masterip }}
- days_remaining: 0
- - days_valid: 3650
+ - days_valid: 820
- backup: True
- managed_private_key:
name: /etc/pki/fleet.key
@@ -120,7 +129,7 @@ fbcrtlink:
backup: True
{% endif %}
-{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-node' or grains['role'] == 'so-eval' %}
+{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-node' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix' %}
fbcertdir:
file.directory:
@@ -130,12 +139,12 @@ fbcertdir:
# Request a cert and drop it where it needs to go to be distributed
/opt/so/conf/filebeat/etc/pki/filebeat.crt:
x509.certificate_managed:
- - ca_server: {{ master }}
+ - ca_server: {{ ca_server }}
- signing_policy: filebeat
- public_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
- CN: {{ master }}
- days_remaining: 0
- - days_valid: 3650
+ - days_valid: 820
- backup: True
- managed_private_key:
name: /opt/so/conf/filebeat/etc/pki/filebeat.key
diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml
index 1acce5b96..093612335 100644
--- a/salt/suricata/files/suricata.yaml
+++ b/salt/suricata/files/suricata.yaml
@@ -3,6 +3,8 @@
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
{%- if grains['role'] == 'so-eval' %}
{%- set MTU = 1500 %}
+{%- elif grains['role'] == 'so-helix' %}
+{%- set MTU = 9000 %}
{%- else %}
{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
{%- endif %}
diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls
index 2c3b1aba8..a30010d5e 100644
--- a/salt/suricata/init.sls
+++ b/salt/suricata/init.sls
@@ -72,13 +72,13 @@ suriconfigsync:
so-suricataimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-suricata:HH1.1.1
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-suricata:HH1.1.1
so-suricata:
docker_container.running:
- require:
- so-suricataimage
- - image: soshybridhunter/so-suricata:HH1.1.1
+ - image: docker.io/soshybridhunter/so-suricata:HH1.1.1
- privileged: True
- environment:
- INTERFACE={{ interface }}
diff --git a/salt/tcpreplay/init.sls b/salt/tcpreplay/init.sls
new file mode 100644
index 000000000..a6cc62c32
--- /dev/null
+++ b/salt/tcpreplay/init.sls
@@ -0,0 +1,18 @@
+{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-eval' %}
+
+so-tcpreplayimage:
+ cmd.run:
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-tcpreplay:HH1.1.4
+
+so-tcpreplay:
+ docker_container.running:
+ - require:
+ - so-tcpreplay
+ - network_mode: "host"
+ - image: docker.io/soshybridhunter/so-tcpreplay:HH1.1.4
+ - name: so-tcpreplay
+ - user: root
+ - interactive: True
+ - tty: True
+
+{% endif %}
diff --git a/salt/top.sls b/salt/top.sls
index cf5d47699..7a6d5b99b 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -5,6 +5,24 @@
{%- set THEHIVE = salt['pillar.get']('master:thehive', '0') -%}
{%- set PLAYBOOK = salt['pillar.get']('master:playbook', '0') -%}
base:
+ '*':
+ - patch.os.schedule
+ - motd
+
+ 'G@role:so-helix':
+ - ca
+ - ssl
+ - common
+ - firewall
+ - idstools
+ - pcap
+ - suricata
+ - bro
+ - redis
+ - logstash
+ - filebeat
+ - schedule
+
'G@role:so-sensor':
- ca
- ssl
@@ -40,6 +58,7 @@ base:
- suricata
- bro
- curator
+ - cyberchef
- elastalert
{%- if OSQUERY != 0 %}
- fleet
@@ -66,6 +85,7 @@ base:
- ca
- ssl
- common
+ - cyberchef
- sensoroni
- firewall
- master
@@ -95,7 +115,7 @@ base:
{%- if PLAYBOOK != 0 %}
- playbook
{%- endif %}
-
+
# Storage node logic
diff --git a/salt/utility/bin/crossthestreams.sh b/salt/utility/bin/crossthestreams.sh
index b9c8f6c1d..3cd8b005c 100644
--- a/salt/utility/bin/crossthestreams.sh
+++ b/salt/utility/bin/crossthestreams.sh
@@ -31,6 +31,6 @@ echo "Applying cross cluster search config..."
# Add all the storage nodes to cross cluster searching.
-{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
+{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}'
{%- endfor %}
diff --git a/salt/utility/bin/eval.sh b/salt/utility/bin/eval.sh
index effbdfd33..03eceef56 100644
--- a/salt/utility/bin/eval.sh
+++ b/salt/utility/bin/eval.sh
@@ -1,5 +1,4 @@
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
-{%- set MASTER = grains['master'] %}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
COUNT=0
@@ -27,4 +26,4 @@ fi
echo "Applying cross cluster search config..."
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \
- -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MASTER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
+ -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ grains.id }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls
index b83240dfa..a59a1d215 100644
--- a/salt/wazuh/init.sls
+++ b/salt/wazuh/init.sls
@@ -5,7 +5,7 @@ ossecgroup:
group.present:
- name: ossec
- gid: 945
-
+
# Add ossecm user
ossecm:
user.present:
@@ -41,7 +41,7 @@ wazuhpkgs:
pkg.installed:
- skip_suggestions: False
- pkgs:
- - wazuh-agent
+ - wazuh-agent: 3.10.2-1
# Add Wazuh agent conf
wazuhagentconf:
@@ -64,13 +64,13 @@ wazuhagentregister:
so-wazuhimage:
cmd.run:
- - name: docker pull --disable-content-trust=false soshybridhunter/so-wazuh:HH1.1.0
+ - name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-wazuh:HH1.1.3
so-wazuh:
docker_container.running:
- require:
- so-wazuhimage
- - image: soshybridhunter/so-wazuh:HH1.1.0
+ - image: docker.io/soshybridhunter/so-wazuh:HH1.1.3
- hostname: {{HOSTNAME}}-wazuh-manager
- name: so-wazuh
- detach: True
diff --git a/salt/yum/packages.sls b/salt/yum/packages.sls
new file mode 100644
index 000000000..4c773d0e9
--- /dev/null
+++ b/salt/yum/packages.sls
@@ -0,0 +1,3 @@
+install_yum_utils:
+ pkg.installed:
+ - name: yum-utils
diff --git a/setup/functions.sh b/setup/functions.sh
new file mode 100644
index 000000000..e0145c7a1
--- /dev/null
+++ b/setup/functions.sh
@@ -0,0 +1,1304 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+SCRIPTDIR=$(dirname "$0")
+source $SCRIPTDIR/whiptail.sh
+
+accept_salt_key_local() {
+ echo "Accept the key locally on the master" >> $SETUPLOG 2>&1
+ # Accept the key locally on the master
+ salt-key -ya $MINION_ID
+
+}
+
+accept_salt_key_remote() {
+ echo "Accept the key remotely on the master" >> $SETUPLOG 2>&1
+ # Delete the key just in case.
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo salt-key -d $MINION_ID -y
+ salt-call state.apply ca
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo salt-key -a $MINION_ID -y
+
+}
+
+add_admin_user() {
+
+ # Add an admin user with full sudo rights if this is an ISO install.
+ useradd $ADMINUSER && echo $ADMINUSER:$ADMINPASS1 | chpasswd --crypt-method=SHA512
+ usermod -aG wheel $ADMINUSER
+
+}
+
+add_master_hostfile() {
+ echo "Checking if I can resolve master. If not add to hosts file" >> $SETUPLOG 2>&1
+ # Pop up an input to get the IP address
+ MSRVIP=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your Master Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+add_socore_user_master() {
+
+ echo "Add socore on the master" >>~/sosetup.log 2>&1
+ # Add user "socore" to the master. This will be for things like accepting keys.
+ if [ $OS == 'centos' ]; then
+ local ADDUSER=adduser
+ else
+ local ADDUSER=useradd
+ fi
+ groupadd --gid 939 socore
+ $ADDUSER --uid 939 --gid 939 --home-dir /opt/so socore
+ # Set the password for socore that we got during setup
+ echo socore:$COREPASS1 | chpasswd --crypt-method=SHA512
+
+}
+
+add_socore_user_notmaster() {
+ echo "Add socore user on non master" >> $SETUPLOG 2>&1
+ # Add socore user to the non master system. Probably not a bad idea to make system user
+ groupadd --gid 939 socore
+ $ADDUSER --uid 939 --gid 939 --home-dir /opt/so --no-create-home socore
+
+}
+
+# Create an auth pillar so that passwords survive re-install
+auth_pillar(){
+
+ if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
+ echo "Creating Auth Pillar" >> $SETUPLOG 2>&1
+ mkdir -p /opt/so/saltstack/pillar
+ echo "auth:" >> /opt/so/saltstack/pillar/auth.sls
+ echo " mysql: $MYSQLPASS" >> /opt/so/saltstack/pillar/auth.sls
+ echo " fleet: $FLEETPASS" >> /opt/so/saltstack/pillar/auth.sls
+ fi
+
+}
+
+# Enable Bro Logs
+bro_logs_enabled() {
+ echo "Enabling Bro Logs" >> $SETUPLOG 2>&1
+
+ echo "brologs:" > pillar/brologs.sls
+ echo " enabled:" >> pillar/brologs.sls
+
+ if [ $MASTERADV == 'ADVANCED' ]; then
+ for BLOG in ${BLOGS[@]}; do
+ echo " - $BLOG" | tr -d '"' >> pillar/brologs.sls
+ done
+ else
+ echo " - conn" >> pillar/brologs.sls
+ echo " - dce_rpc" >> pillar/brologs.sls
+ echo " - dhcp" >> pillar/brologs.sls
+ echo " - dhcpv6" >> pillar/brologs.sls
+ echo " - dnp3" >> pillar/brologs.sls
+ echo " - dns" >> pillar/brologs.sls
+ echo " - dpd" >> pillar/brologs.sls
+ echo " - files" >> pillar/brologs.sls
+ echo " - ftp" >> pillar/brologs.sls
+ echo " - http" >> pillar/brologs.sls
+ echo " - intel" >> pillar/brologs.sls
+ echo " - irc" >> pillar/brologs.sls
+ echo " - kerberos" >> pillar/brologs.sls
+ echo " - modbus" >> pillar/brologs.sls
+ echo " - mqtt" >> pillar/brologs.sls
+ echo " - notice" >> pillar/brologs.sls
+ echo " - ntlm" >> pillar/brologs.sls
+ echo " - openvpn" >> pillar/brologs.sls
+ echo " - pe" >> pillar/brologs.sls
+ echo " - radius" >> pillar/brologs.sls
+ echo " - rfb" >> pillar/brologs.sls
+ echo " - rdp" >> pillar/brologs.sls
+ echo " - signatures" >> pillar/brologs.sls
+ echo " - sip" >> pillar/brologs.sls
+ echo " - smb_files" >> pillar/brologs.sls
+ echo " - smb_mapping" >> pillar/brologs.sls
+ echo " - smtp" >> pillar/brologs.sls
+ echo " - snmp" >> pillar/brologs.sls
+ echo " - software" >> pillar/brologs.sls
+ echo " - ssh" >> pillar/brologs.sls
+ echo " - ssl" >> pillar/brologs.sls
+ echo " - syslog" >> pillar/brologs.sls
+ echo " - telnet" >> pillar/brologs.sls
+ echo " - tunnel" >> pillar/brologs.sls
+ echo " - weird" >> pillar/brologs.sls
+ echo " - mysql" >> pillar/brologs.sls
+ echo " - socks" >> pillar/brologs.sls
+ echo " - x509" >> pillar/brologs.sls
+ fi
+}
+
+calculate_useable_cores() {
+
+ # Calculate reasonable core usage
+ local CORES4BRO=$(( $CPUCORES/2 - 1 ))
+ LBPROCSROUND=$(printf "%.0f\n" $CORES4BRO)
+ # We don't want it to be 0
+ if [ "$LBPROCSROUND" -lt 1 ]; then
+ LBPROCS=1
+ else
+ LBPROCS=$LBPROCSROUND
+ fi
+
+}
+
+check_admin_pass() {
+
+ if [ $ADMINPASS1 == $ADMINPASS2 ]; then
+ APMATCH=yes
+ else
+ whiptail_passwords_dont_match
+ fi
+
+}
+
+check_hive_init_then_reboot() {
+ WAIT_STEP=0
+ MAX_WAIT=100
+ until [ -f /opt/so/state/thehive.txt ] ; do
+ WAIT_STEP=$(( ${WAIT_STEP} + 1 ))
+ echo "Waiting on the_hive to init...Attempt #$WAIT_STEP"
+ if [ ${WAIT_STEP} -gt ${MAX_WAIT} ]; then
+ echo "ERROR: We waited ${MAX_WAIT} seconds but the_hive is not working."
+ exit 5
+ fi
+ sleep 1s;
+ done
+ docker stop so-thehive
+ docker rm so-thehive
+ shutdown -r now
+}
+
+check_socore_pass() {
+
+ if [ $COREPASS1 == $COREPASS2 ]; then
+ SCMATCH=yes
+ else
+ whiptail_passwords_dont_match
+ fi
+
+}
+
+checkin_at_boot() {
+ echo "Enabling checkin at boot" >> $SETUPLOG 2>&1
+ echo "startup_states: highstate" >> /etc/salt/minion
+}
+
+chown_salt_master() {
+
+ echo "Chown the salt dirs on the master for socore" >> $SETUPLOG 2>&1
+ chown -R socore:socore /opt/so
+
+}
+
+clear_master() {
+ # Clear out the old master public key in case this is a re-install.
+ # This only happens if you re-install the master.
+ if [ -f /etc/salt/pki/minion/minion_master.pub ]; then
+ echo "Clearing old master key" >> $SETUPLOG 2>&1
+ rm /etc/salt/pki/minion/minion_master.pub
+ service salt-minion restart
+ fi
+
+}
+
+configure_minion() {
+
+ # You have to pass the TYPE to this function so it knows if its a master or not
+ local TYPE=$1
+ echo "Configuring minion type as $TYPE" >> $SETUPLOG 2>&1
+ touch /etc/salt/grains
+ echo "role: so-$TYPE" > /etc/salt/grains
+ if [ $TYPE == 'master' ] || [ $TYPE == 'eval' ]; then
+ echo "master: $HOSTNAME" > /etc/salt/minion
+ echo "id: $MINION_ID" >> /etc/salt/minion
+ echo "mysql.host: '$MAINIP'" >> /etc/salt/minion
+ echo "mysql.port: 3306" >> /etc/salt/minion
+ echo "mysql.user: 'root'" >> /etc/salt/minion
+ if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
+ echo "mysql.pass: '$MYSQLPASS'" >> /etc/salt/minion
+ else
+ OLDPASS=$(cat /opt/so/saltstack/pillar/auth.sls | grep mysql | awk {'print $2'})
+ echo "mysql.pass: '$OLDPASS'" >> /etc/salt/minion
+ fi
+ elif [ $TYPE == 'helix' ]; then
+ echo "master: $HOSTNAME" > /etc/salt/minion
+ echo "id: $MINION_ID" >> /etc/salt/minion
+ else
+ echo "master: $MSRV" > /etc/salt/minion
+ echo "id: $MINION_ID" >> /etc/salt/minion
+
+ fi
+
+ echo "use_superseded:" >> /etc/salt/minion
+ echo " - module.run" >> /etc/salt/minion
+
+ service salt-minion restart
+
+}
+
+copy_master_config() {
+
+ # Copy the master config template to the proper directory
+ if [ $INSTALLMETHOD == 'iso' ]; then
+ cp /root/SecurityOnion/files/master /etc/salt/master
+ else
+ cp $SCRIPTDIR/../files/master /etc/salt/master
+ fi
+
+ # Restart the service so it picks up the changes -TODO Enable service on CentOS
+ service salt-master restart
+
+}
+
+copy_minion_tmp_files() {
+
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ] || [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ echo "Copying pillar and salt files in $TMP to /opt/so/saltstack"
+ cp -Rv $TMP/pillar/ /opt/so/saltstack/pillar/ >> $SETUPLOG 2>&1
+ if [ -d $TMP/salt ] ; then
+ cp -Rv $TMP/salt/ /opt/so/saltstack/salt/ >> $SETUPLOG 2>&1
+ fi
+ else
+ echo "scp pillar and salt files in $TMP to master /opt/so/saltstack"
+ scp -prv -i /root/.ssh/so.key $TMP/pillar/* socore@$MSRV:/opt/so/saltstack/pillar >> $SETUPLOG 2>&1
+ scp -prv -i /root/.ssh/so.key $TMP/salt/* socore@$MSRV:/opt/so/saltstack/salt >> $SETUPLOG 2>&1
+ fi
+
+ }
+
+copy_ssh_key() {
+
+ echo "Generating SSH key"
+ # Generate SSH key
+ mkdir -p /root/.ssh
+ cat /dev/zero | ssh-keygen -f /root/.ssh/so.key -t rsa -q -N ""
+ chown -R $SUDO_USER:$SUDO_USER /root/.ssh
+ echo "Copying the SSH key to the master"
+ #Copy the key over to the master
+ ssh-copy-id -f -i /root/.ssh/so.key socore@$MSRV
+
+}
+
+create_sensor_bond() {
+ echo "Setting up sensor bond" >> $SETUPLOG 2>&1
+
+ # Set the MTU
+ if [[ $NSMSETUP != 'ADVANCED' ]]; then
+ MTU=1500
+ fi
+
+ # Create the bond interface
+ nmcli con add ifname bond0 con-name "bond0" type bond mode 0 -- \
+ ipv4.method disabled \
+ ipv6.method link-local \
+ ethernet.mtu $MTU \
+ connection.autoconnect "yes" >> $SETUPLOG 2>&1
+
+ for BNIC in ${BNICS[@]}; do
+ # Strip the quotes from the NIC names
+ BONDNIC="$(echo -e "${BNIC}" | tr -d '"')"
+ # Turn off various offloading settings for the interface
+ for i in rx tx sg tso ufo gso gro lro; do
+ ethtool -K $BONDNIC $i off >> $SETUPLOG 2>&1
+ done
+ # Create the slave interface and assign it to the bond
+ nmcli con add type ethernet ifname $BONDNIC con-name "bond0-slave-$BONDNIC" master bond0 -- \
+ ethernet.mtu $MTU \
+ connection.autoconnect "yes" >> $SETUPLOG 2>&1
+ # Bring the slave interface up
+ nmcli con up bond0-slave-$BONDNIC >> $SETUPLOG 2>&1
+ done
+}
+
+detect_os() {
+
+ # Detect Base OS
+ echo "Detecting Base OS" >> $SETUPLOG 2>&1
+ if [ -f /etc/redhat-release ]; then
+ OS=centos
+ if grep -q "CentOS Linux release 7" /etc/redhat-release; then
+ OSVER=7
+ elif grep -q "CentOS Linux release 8" /etc/redhat-release; then
+ OSVER=8
+ echo "We currently do not support CentOS $OSVER but we are working on it!"
+ exit
+ else
+ echo "We do not support the version of CentOS you are trying to use"
+ exit
+ fi
+
+ # Install bind-utils so the host command exists
+ yum -y install bind-utils
+
+
+ elif [ -f /etc/os-release ]; then
+ OS=ubuntu
+ if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then
+ OSVER=bionic
+ echo "We currently don't support Ubuntu $OSVER but we are working on it!"
+ exit
+ elif grep -q "UBUNTU_CODENAME=xenial" /etc/os-release; then
+ OSVER=xenial
+ else
+ echo "We do not support your current version of Ubuntu"
+ exit
+ fi
+ # Install network manager so we can do interface stuff
+ apt install -y network-manager
+ /bin/systemctl enable network-manager
+ /bin/systemctl start network-manager
+ else
+ echo "We were unable to determine if you are using a supported OS." >> $SETUPLOG 2>&1
+ exit
+ fi
+
+ echo "Found OS: $OS $OSVER" >> $SETUPLOG 2>&1
+
+}
+
+#disable_dnsmasq() {
+
+# if [ -f /etc/NetworkManager/NetworkManager.conf ]; then
+# echo "Disabling dnsmasq in /etc/NetworkManager/NetworkManager.conf"
+# sed -e 's/^dns=dnsmasq/#dns=dnsmasq/g' -i /etc/NetworkManager/NetworkManager.conf
+# fi
+
+#}
+
+disable_onion_user() {
+
+ # Disable the default account cause security.
+ usermod -L onion
+
+}
+
+disable_unused_nics() {
+ for UNUSED_NIC in ${FNICS[@]}; do
+ # Disable DHCPv4/v6 and autoconnect
+ nmcli con mod $UNUSED_NIC \
+ ipv4.method disabled \
+ ipv6.method link-local \
+ connection.autoconnect "no" >> $SETUPLOG 2>&1
+
+ # Flush any existing IPs
+ ip addr flush $UNUSED_NIC >> $SETUPLOG 2>&1
+ done
+}
+
+docker_install() {
+
+ if [ $OS == 'centos' ]; then
+ yum clean expire-cache
+ yum -y install yum-utils device-mapper-persistent-data lvm2 openssl
+ yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+ yum -y update
+ yum -y install docker-ce python36-docker
+ if [ $INSTALLTYPE != 'EVALMODE' ]; then
+ docker_registry
+ fi
+ echo "Restarting Docker" >> $SETUPLOG 2>&1
+ systemctl restart docker
+ systemctl enable docker
+
+ else
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ apt-get update >> $SETUPLOG 2>&1
+ apt-get -y install docker-ce python3-docker >> $SETUPLOG 2>&1
+ if [ $INSTALLTYPE != 'EVALMODE' ]; then
+ docker_registry >> $SETUPLOG 2>&1
+ fi
+ echo "Restarting Docker" >> $SETUPLOG 2>&1
+ systemctl restart docker >> $SETUPLOG 2>&1
+ else
+ apt-key add $TMP/gpg/docker.pub >> $SETUPLOG 2>&1
+ add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" >> $SETUPLOG 2>&1
+ apt-get update >> $SETUPLOG 2>&1
+ apt-get -y install docker-ce python3-docker >> $SETUPLOG 2>&1
+ docker_registry >> $SETUPLOG 2>&1
+ echo "Restarting Docker" >> $SETUPLOG 2>&1
+ systemctl restart docker >> $SETUPLOG 2>&1
+ fi
+ fi
+
+}
+
+docker_registry() {
+
+ echo "Setting up Docker Registry" >> $SETUPLOG 2>&1
+ mkdir -p /etc/docker >> $SETUPLOG 2>&1
+ # Make the host use the master docker registry
+ echo "{" > /etc/docker/daemon.json
+ echo " \"registry-mirrors\": [\"https://$MSRV:5000\"]" >> /etc/docker/daemon.json
+ echo "}" >> /etc/docker/daemon.json
+ echo "Docker Registry Setup - Complete" >> $SETUPLOG 2>&1
+
+}
+
+es_heapsize() {
+
+ # Determine ES Heap Size
+ if [ $TOTAL_MEM -lt 8000 ] ; then
+ ES_HEAP_SIZE="600m"
+ elif [ $TOTAL_MEM -ge 100000 ]; then
+ # Set a max of 25GB for heap size
+ # https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
+ ES_HEAP_SIZE="25000m"
+ else
+ # Set heap size to 25% of available memory
+ ES_HEAP_SIZE=$(($TOTAL_MEM / 4))"m"
+ fi
+
+}
+
+filter_unused_nics() {
+ # Set the main NIC as the default grep search string
+ grep_string=$MNIC
+
+ # If we call this function and NICs have already been assigned to the bond interface then add them to the grep search string
+ if [[ $BNICS ]]; then
+ for BONDNIC in ${BNICS[@]}; do
+ grep_string="$grep_string\|$BONDNIC"
+ done
+ fi
+
+ # Finally, set FNICS to any NICs we aren't using (and ignore interfaces that aren't of use)
+ FNICS=$(ip link | grep -vwe $grep_string | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}')
+}
+
+fireeye_pillar() {
+
+ FIREEYEPILLARPATH=/opt/so/saltstack/pillar/fireeye
+ mkdir -p $FIREEYEPILLARPATH
+
+ echo "" >> $FIREEYEPILLARPATH/init.sls
+ echo "fireeye:" >> $FIREEYEPILLARPATH/init.sls
+ echo " helix:" >> $FIREEYEPILLARPATH/init.sls
+ echo " api_key: $HELIXAPIKEY" >> $FIREEYEPILLARPATH/init.sls
+
+}
+
+generate_passwords(){
+ # Generate Random Passwords for Things
+ MYSQLPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ FLEETPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ HIVEKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ CORTEXKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ CORTEXORGUSERKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ SENSORONIKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+}
+
+get_filesystem_nsm(){
+ FSNSM=$(df /nsm | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
+}
+
+get_log_size_limit() {
+
+ DISK_DIR="/"
+ if [ -d /nsm ]; then
+ DISK_DIR="/nsm"
+ fi
+ DISK_SIZE_K=`df $DISK_DIR |grep -v "^Filesystem" | awk '{print $2}'`
+ PERCENTAGE=85
+ DISK_SIZE=DISK_SIZE_K*1000
+ PERCENTAGE_DISK_SPACE=`echo $(($DISK_SIZE*$PERCENTAGE/100))`
+ LOG_SIZE_LIMIT=$(($PERCENTAGE_DISK_SPACE/1000000000))
+
+}
+
+get_filesystem_root(){
+ FSROOT=$(df / | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
+}
+
+get_main_ip() {
+
+ # Get the main IP address the box is using
+ MAINIP=$(ip route get 1 | awk '{print $NF;exit}')
+ MAININT=$(ip route get 1 | awk '{print $5;exit}')
+
+}
+
+got_root() {
+
+ # Make sure you are root
+ if [ "$(id -u)" -ne 0 ]; then
+ echo "This script must be run using sudo!"
+ exit 1
+ fi
+
+}
+
+install_cleanup() {
+
+ echo "install_cleanup called" >> $SETUPLOG 2>&1
+
+ # Clean up after ourselves
+ rm -rf /root/installtmp
+
+}
+
+install_prep() {
+
+ # Create a tmp space that isn't in /tmp
+ mkdir /root/installtmp
+ TMP=/root/installtmp
+
+}
+
+install_master() {
+
+ # Install the salt master package
+ if [ $OS == 'centos' ]; then
+ #yum -y install wget salt-common salt-master python36-mysql python36-dateutil python36-m2crypto >> $SETUPLOG 2>&1
+ echo ""
+ # Create a place for the keys for Ubuntu minions
+ #mkdir -p /opt/so/gpg
+ #wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub
+ #wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ #wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
+
+ else
+ apt-get install -y salt-common=2019.2.2+ds-1 salt-master=2019.2.2+ds-1 salt-minion=2019.2.2+ds-1 libssl-dev python-m2crypto
+ apt-mark hold salt-common salt-master salt-minion
+ fi
+
+ copy_master_config
+
+}
+
+ls_heapsize() {
+
+ # Determine LS Heap Size
+ if [ $TOTAL_MEM -ge 32000 ] ; then
+ LS_HEAP_SIZE="1000m"
+ else
+ # If minimal RAM, then set minimal heap
+ LS_HEAP_SIZE="500m"
+ fi
+
+}
+
+master_pillar() {
+
+ # Create the master pillar
+ touch /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo "master:" > /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " mainip: $MAINIP" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " mainint: $MAININT" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " esheap: $ES_HEAP_SIZE" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " esclustername: {{ grains.host }}" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ if [ $INSTALLTYPE == 'EVALMODE' ] || [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ echo " freq: 0" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " domainstats: 0" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " ls_pipeline_batch_size: 125" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " ls_input_threads: 1" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " ls_batch_count: 125" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " mtu: 1500" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+
+ else
+ echo " freq: 0" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " domainstats: 0" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ fi
+ if [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ echo " lsheap: 1000m" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ else
+ echo " lsheap: $LS_HEAP_SIZE" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ fi
+ echo " lsaccessip: 127.0.0.1" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " elastalert: 1" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " ls_pipeline_workers: $CPUCORES" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " nids_rules: $RULESETUP" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " oinkcode: $OINKCODE" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ #echo " access_key: $ACCESS_KEY" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ #echo " access_secret: $ACCESS_SECRET" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " es_port: $NODE_ES_PORT" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " log_size_limit: $LOG_SIZE_LIMIT" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " cur_close_days: $CURCLOSEDAYS" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ #echo " mysqlpass: $MYSQLPASS" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ #echo " fleetpass: $FLEETPASS" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " grafana: $GRAFANA" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " osquery: $OSQUERY" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " wazuh: $WAZUH" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " thehive: $THEHIVE" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ echo " playbook: $PLAYBOOK" >> /opt/so/saltstack/pillar/masters/$MINION_ID.sls
+ }
+
+master_static() {
+
+ # Create a static file for global values
+ touch /opt/so/saltstack/pillar/static.sls
+
+ echo "static:" > /opt/so/saltstack/pillar/static.sls
+ echo " hnmaster: $HNMASTER" >> /opt/so/saltstack/pillar/static.sls
+ echo " ntpserver: $NTPSERVER" >> /opt/so/saltstack/pillar/static.sls
+ echo " proxy: $PROXY" >> /opt/so/saltstack/pillar/static.sls
+ echo " broversion: $BROVERSION" >> /opt/so/saltstack/pillar/static.sls
+ echo " ids: $NIDS" >> /opt/so/saltstack/pillar/static.sls
+ echo " masterip: $MAINIP" >> /opt/so/saltstack/pillar/static.sls
+ echo " hiveuser: hiveadmin" >> /opt/so/saltstack/pillar/static.sls
+ echo " hivepassword: hivechangeme" >> /opt/so/saltstack/pillar/static.sls
+ echo " hivekey: $HIVEKEY" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexuser: cortexadmin" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexpassword: cortexchangeme" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexkey: $CORTEXKEY" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexorgname: SecurityOnion" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexorguser: soadmin" >> /opt/so/saltstack/pillar/static.sls
+ echo " cortexorguserkey: $CORTEXORGUSERKEY" >> /opt/so/saltstack/pillar/static.sls
+ echo " fleetsetup: 0" >> /opt/so/saltstack/pillar/static.sls
+ echo " sensoronikey: $SENSORONIKEY" >> /opt/so/saltstack/pillar/static.sls
+ if [[ $MASTERUPDATES == 'MASTER' ]]; then
+ echo " masterupdate: 1" >> /opt/so/saltstack/pillar/static.sls
+ else
+ echo " masterupdate: 0" >> /opt/so/saltstack/pillar/static.sls
+ fi
+}
+
+minio_generate_keys() {
+
+ local charSet="[:graph:]"
+
+ ACCESS_KEY=$(cat /dev/urandom | tr -cd "$charSet" | tr -d \' | tr -d \" | head -c 20)
+ ACCESS_SECRET=$(cat /dev/urandom | tr -cd "$charSet" | tr -d \' | tr -d \" | head -c 40)
+
+}
+
+network_setup() {
+ echo "Finishing up network setup" >> $SETUPLOG 2>&1
+
+ echo "... Disabling unused NICs" >> $SETUPLOG 2>&1
+ disable_unused_nics >> $SETUPLOG 2>&1
+
+ echo "... Setting ONBOOT for management interface" >> $SETUPLOG 2>&1
+ nmcli con mod $MAININT connection.autoconnect "yes" >> $SETUPLOG 2>&1
+
+ echo "... Copying disable-checksum-offload.sh" >> $SETUPLOG 2>&1
+ cp $SCRIPTDIR/install_scripts/disable-checksum-offload.sh /etc/NetworkManager/dispatcher.d/disable-checksum-offload.sh >> $SETUPLOG 2>&1
+
+ echo "... Modifying disable-checksum-offload.sh" >> $SETUPLOG 2>&1
+ sed -i "s/\$MAININT/${MAININT}/g" /etc/NetworkManager/dispatcher.d/disable-checksum-offload.sh >> $SETUPLOG 2>&1
+}
+
+node_pillar() {
+
+ NODEPILLARPATH=$TMP/pillar/nodes
+ if [ ! -d $NODEPILLARPATH ]; then
+ mkdir -p $NODEPILLARPATH
+ fi
+
+ # Create the node pillar
+ touch $NODEPILLARPATH/$MINION_ID.sls
+ echo "node:" > $NODEPILLARPATH/$MINION_ID.sls
+ echo " mainip: $MAINIP" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " mainint: $MAININT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " esheap: $NODE_ES_HEAP_SIZE" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " esclustername: {{ grains.host }}" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " lsheap: $NODE_LS_HEAP_SIZE" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_pipeline_workers: $LSPIPELINEWORKERS" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_pipeline_batch_size: $LSPIPELINEBATCH" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_input_threads: $LSINPUTTHREADS" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " ls_batch_count: $LSINPUTBATCHCOUNT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " es_shard_count: $SHARDCOUNT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " node_type: $NODETYPE" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " es_port: $NODE_ES_PORT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " log_size_limit: $LOG_SIZE_LIMIT" >> $NODEPILLARPATH/$MINION_ID.sls
+ echo " cur_close_days: $CURCLOSEDAYS" >> $NODEPILLARPATH/$MINION_ID.sls
+
+}
+
+patch_pillar() {
+
+ case $INSTALLTYPE in
+ MASTERONLY | EVALMODE | HELIXSENSOR)
+ PATCHPILLARPATH=/opt/so/saltstack/pillar/masters
+ ;;
+ SENSORONLY)
+ PATCHPILLARPATH=$SENSORPILLARPATH
+ ;;
+ STORAGENODE | PARSINGNODE | HOTNODE | WARMNODE)
+ PATCHPILLARPATH=$NODEPILLARPATH
+ ;;
+ esac
+
+
+ echo "" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo "patch:" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " os:" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " schedule_name: $PATCHSCHEDULENAME" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " enabled: True" >> $PATCHPILLARPATH/$MINION_ID.sls
+ echo " splay: 300" >> $PATCHPILLARPATH/$MINION_ID.sls
+
+
+}
+
+patch_schedule_os_new() {
+ OSPATCHSCHEDULEDIR="$TMP/salt/patch/os/schedules"
+ OSPATCHSCHEDULE="$OSPATCHSCHEDULEDIR/$PATCHSCHEDULENAME.yml"
+
+ if [ ! -d $OSPATCHSCHEDULEDIR ] ; then
+ mkdir -p $OSPATCHSCHEDULEDIR
+ fi
+
+ echo "patch:" > $OSPATCHSCHEDULE
+ echo " os:" >> $OSPATCHSCHEDULE
+ echo " schedule:" >> $OSPATCHSCHEDULE
+ for psd in "${PATCHSCHEDULEDAYS[@]}"
+ do
+ psd=$(echo $psd | sed 's/"//g')
+ echo " - $psd:" >> $OSPATCHSCHEDULE
+ for psh in "${PATCHSCHEDULEHOURS[@]}"
+ do
+ psh=$(echo $psh | sed 's/"//g')
+ echo " - '$psh'" >> $OSPATCHSCHEDULE
+ done
+ done
+
+}
+
+process_components() {
+ CLEAN=${COMPONENTS//\"}
+ GRAFANA=0
+ OSQUERY=0
+ WAZUH=0
+ THEHIVE=0
+ PLAYBOOK=0
+
+ IFS=$' '
+ for item in $(echo "$CLEAN"); do
+ let $item=1
+ done
+ unset IFS
+}
+
+reserve_group_ids() {
+
+ # This is a hack to fix CentOS from taking group IDs that we need
+ groupadd -g 930 elasticsearch
+ groupadd -g 931 logstash
+ groupadd -g 932 kibana
+ groupadd -g 933 elastalert
+ groupadd -g 934 curator
+ groupadd -g 937 bro
+ groupadd -g 939 socore
+ groupadd -g 940 suricata
+ groupadd -g 941 stenographer
+ groupadd -g 945 ossec
+ groupadd -g 946 cyberchef
+
+}
+
+saltify() {
+
+ # Install updates and Salt
+ if [ $OS == 'centos' ]; then
+ ADDUSER=adduser
+
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ] || [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ reserve_group_ids
+ yum -y install wget https://repo.saltstack.com/py3/redhat/salt-py3-repo-latest-2.el7.noarch.rpm
+ cp /etc/yum.repos.d/salt-py3-latest.repo /etc/yum.repos.d/salt-py3-2019-2.repo
+ sed -i 's/latest/2019.2/g' /etc/yum.repos.d/salt-py3-2019-2.repo
+ # Download Ubuntu Keys in case master updates = 1
+ mkdir -p /opt/so/gpg
+ wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub
+ wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
+ cat > /etc/yum.repos.d/wazuh.repo <<\EOF
+[wazuh_repo]
+gpgcheck=1
+gpgkey=https://packages.wazuh.com/key/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://packages.wazuh.com/3.x/yum/
+protect=1
+EOF
+
+ else
+
+ if [ $MASTERUPDATES == 'MASTER' ]; then
+
+ # Create the GPG Public Key for the Salt Repo
+ echo "-----BEGIN PGP PUBLIC KEY BLOCK-----" > /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "Version: GnuPG v2.0.22 (GNU/Linux)" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "mQENBFOpvpgBCADkP656H41i8fpplEEB8IeLhugyC2rTEwwSclb8tQNYtUiGdna9" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "m38kb0OS2DDrEdtdQb2hWCnswxaAkUunb2qq18vd3dBvlnI+C4/xu5ksZZkRj+fW" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "tArNR18V+2jkwcG26m8AxIrT+m4M6/bgnSfHTBtT5adNfVcTHqiT1JtCbQcXmwVw" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "WbqS6v/LhcsBE//SHne4uBCK/GHxZHhQ5jz5h+3vWeV4gvxS3Xu6v1IlIpLDwUts" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "kT1DumfynYnnZmWTGc6SYyIFXTPJLtnoWDb9OBdWgZxXfHEcBsKGha+bXO+m2tHA" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "gNneN9i5f8oNxo5njrL8jkCckOpNpng18BKXABEBAAG0MlNhbHRTdGFjayBQYWNr" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "YWdpbmcgVGVhbSA8cGFja2FnaW5nQHNhbHRzdGFjay5jb20+iQE4BBMBAgAiBQJT" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "qb6YAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAOCKFJ3le/vhkqB/0Q" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "WzELZf4d87WApzolLG+zpsJKtt/ueXL1W1KA7JILhXB1uyvVORt8uA9FjmE083o1" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "yE66wCya7V8hjNn2lkLXboOUd1UTErlRg1GYbIt++VPscTxHxwpjDGxDB1/fiX2o" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "nK5SEpuj4IeIPJVE/uLNAwZyfX8DArLVJ5h8lknwiHlQLGlnOu9ulEAejwAKt9CU" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "4oYTszYM4xrbtjB/fR+mPnYh2fBoQO4d/NQiejIEyd9IEEMd/03AJQBuMux62tjA" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "/NwvQ9eqNgLw9NisFNHRWtP4jhAOsshv1WW+zPzu3ozoO+lLHixUIz7fqRk38q8Q" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "9oNR31KvrkSNrFbA3D89uQENBFOpvpgBCADJ79iH10AfAfpTBEQwa6vzUI3Eltqb" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "9aZ0xbZV8V/8pnuU7rqM7Z+nJgldibFk4gFG2bHCG1C5aEH/FmcOMvTKDhJSFQUx" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "uhgxttMArXm2c22OSy1hpsnVG68G32Nag/QFEJ++3hNnbyGZpHnPiYgej3FrerQJ" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "zv456wIsxRDMvJ1NZQB3twoCqwapC6FJE2hukSdWB5yCYpWlZJXBKzlYz/gwD/Fr" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "GL578WrLhKw3UvnJmlpqQaDKwmV2s7MsoZogC6wkHE92kGPG2GmoRD3ALjmCvN1E" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "PsIsQGnwpcXsRpYVCoW7e2nW4wUf7IkFZ94yOCmUq6WreWI4NggRcFC5ABEBAAGJ" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "AR8EGAECAAkFAlOpvpgCGwwACgkQDgihSd5Xv74/NggA08kEdBkiWWwJZUZEy7cK" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "WWcgjnRuOHd4rPeT+vQbOWGu6x4bxuVf9aTiYkf7ZjVF2lPn97EXOEGFWPZeZbH4" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "vdRFH9jMtP+rrLt6+3c9j0M8SIJYwBL1+CNpEC/BuHj/Ra/cmnG5ZNhYebm76h5f" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "T9iPW9fFww36FzFka4VPlvA4oB7ebBtquFg3sdQNU/MmTVV4jPFWXxh4oRDDR+8N" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "1bcPnbB11b5ary99F/mqr7RgQ+YFF0uKRE3SKa7a+6cIuHEZ7Za+zhPaQlzAOZlx" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "fuBmScum8uQTrEF5+Um5zkwC7EXTdH1co/+/V/fpOtxIg4XO4kcugZefVm5ERfVS" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "MA==" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "=dtMN" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ echo "-----END PGP PUBLIC KEY BLOCK-----" >> /etc/pki/rpm-gpg/saltstack-signing-key
+
+ # Add the Wazuh Key
+ cat > /etc/pki/rpm-gpg/GPG-KEY-WAZUH <<\EOF
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQINBFeeyYwBEACyf4VwV8c2++J5BmCl6ofLCtSIW3UoVrF4F+P19k/0ngnSfjWb
+8pSWB11HjZ3Mr4YQeiD7yY06UZkrCXk+KXDlUjMK3VOY7oNPkqzNaP6+8bDwj4UA
+hADMkaXBvWooGizhCoBtDb1bSbHKcAnQ3PTdiuaqF5bcyKk8hv939CHulL2xH+BP
+mmTBi+PM83pwvR+VRTOT7QSzf29lW1jD79v4rtXHJs4KCz/amT/nUm/tBpv3q0sT
+9M9rH7MTQPdqvzMl122JcZST75GzFJFl0XdSHd5PAh2mV8qYak5NYNnwA41UQVIa
++xqhSu44liSeZWUfRdhrQ/Nb01KV8lLAs11Sz787xkdF4ad25V/Rtg/s4UXt35K3
+klGOBwDnzPgHK/OK2PescI5Ve1z4x1C2bkGze+gk/3IcfGJwKZDfKzTtqkZ0MgpN
+7RGghjkH4wpFmuswFFZRyV+s7jXYpxAesElDSmPJ0O07O4lQXQMROE+a2OCcm0eF
+3+Cr6qxGtOp1oYMOVH0vOLYTpwOkAM12/qm7/fYuVPBQtVpTojjV5GDl2uGq7p0o
+h9hyWnLeNRbAha0px6rXcF9wLwU5n7mH75mq5clps3sP1q1/VtP/Fr84Lm7OGke4
+9eD+tPNCdRx78RNWzhkdQxHk/b22LCn1v6p1Q0qBco9vw6eawEkz1qwAjQARAQAB
+tDFXYXp1aC5jb20gKFdhenVoIFNpZ25pbmcgS2V5KSA8c3VwcG9ydEB3YXp1aC5j
+b20+iQI9BBMBCAAnBQJXnsmMAhsDBQkFo5qABQsJCAcDBRUKCQgLBRYCAwEAAh4B
+AheAAAoJEJaz7l8pERFFHEsQAIaslejcW2NgjgOZuvn1Bht4JFMbCIPOekg4Z5yF
+binRz0wmA7JNaawDHTBYa6L+A2Xneu/LmuRjFRMesqopUukVeGQgHBXbGMzY46eI
+rqq/xgvgWzHSbWweiOX0nn+exbEAM5IyW+efkWNz0e8xM1LcxdYZxkVOqFqkp3Wv
+J9QUKw6z9ifUOx++G8UO307O3hT2f+x4MUoGZeOF4q1fNy/VyBS2lMg2HF7GWy2y
+kjbSe0p2VOFGEZLuu2f5tpPNth9UJiTliZKmgSk/zbKYmSjiVY2eDqNJ4qjuqes0
+vhpUaBjA+DgkEWUrUVXG5yfQDzTiYIF84LknjSJBYSLZ4ABsMjNO+GApiFPcih+B
+Xc9Kx7E9RNsNTDqvx40y+xmxDOzVIssXeKqwO8r5IdG3K7dkt2Vkc/7oHOpcKwE5
+8uASMPiqqMo+t1RVa6Spckp3Zz8REILbotnnVwDIwo2HmgASirMGUcttEJzubaIa
+Mv43GKs8RUH9s5NenC02lfZG7D8WQCz5ZH7yEWrt5bCaQRNDXjhsYE17SZ/ToHi3
+OpWu050ECWOHdxlXNG3dOWIdFDdBJM7UfUNSSOe2Y5RLsWfwvMFGbfpdlgJcMSDV
+X+ienkrtXhBteTu0dwPu6HZTFOjSftvtAo0VIqGQrKMvKelkkdNGdDFLQw2mUDcw
+EQj6uQINBFeeyYwBEADD1Y3zW5OrnYZ6ghTd5PXDAMB8Z1ienmnb2IUzLM+i0yE2
+TpKSP/XYCTBhFa390rYgFO2lbLDVsiz7Txd94nHrdWXGEQfwrbxsvdlLLWk7iN8l
+Fb4B60OfRi3yoR96a/kIPNa0x26+n79LtDuWZ/DTq5JSHztdd9F1sr3h8i5zYmtv
+luj99ZorpwYejbBVUm0+gP0ioaXM37uO56UFVQk3po9GaS+GtLnlgoE5volgNYyO
+rkeIua4uZVsifREkHCKoLJip6P7S3kTyfrpiSLhouEZ7kV1lbMbFgvHXyjm+/AIx
+HIBy+H+e+HNt5gZzTKUJsuBjx44+4jYsOR67EjOdtPOpgiuJXhedzShEO6rbu/O4
+wM1rX45ZXDYa2FGblHCQ/VaS0ttFtztk91xwlWvjTR8vGvp5tIfCi+1GixPRQpbN
+Y/oq8Kv4A7vB3JlJscJCljvRgaX0gTBzlaF6Gq0FdcWEl5F1zvsWCSc/Fv5WrUPY
+5mG0m69YUTeVO6cZS1aiu9Qh3QAT/7NbUuGXIaAxKnu+kkjLSz+nTTlOyvbG7BVF
+a6sDmv48Wqicebkc/rCtO4g8lO7KoA2xC/K/6PAxDrLkVyw8WPsAendmezNfHU+V
+32pvWoQoQqu8ysoaEYc/j9fN4H3mEBCN3QUJYCugmHP0pu7VtpWwwMUqcGeUVwAR
+AQABiQIlBBgBCAAPBQJXnsmMAhsMBQkFo5qAAAoJEJaz7l8pERFFz8IP/jfBxJSB
+iOw+uML+C4aeYxuHSdxmSsrJclYjkw7Asha/fm4Kkve00YAW8TGxwH2kgS72ooNJ
+1Q7hUxNbVyrJjQDSMkRKwghmrPnUM3UyHmE0dq+G2NhaPdFo8rKifLOPgwaWAfSV
+wgMTK86o0kqRbGpXgVIG5eRwv2FcxM3xGfy7sub07J2VEz7Ba6rYQ3NTbPK42AtV
++wRJDXcgS7y6ios4XQtSbIB5f6GI56zVlwfRd3hovV9ZAIJQ6DKM31wD6Kt/pRun
+DjwMZu0/82JMoqmxX/00sNdDT1S13guCfl1WhBu7y1ja9MUX5OpUzyEKg5sxme+L
+iY2Rhs6CjmbTm8ER4Uj8ydKyVTy8zbumbB6T8IwCAbEMtPxm6pKh/tgLpoJ+Bj0y
+AsGjmhV7R6PKZSDXg7/qQI98iC6DtWc9ibC/QuHLcvm3hz40mBgXAemPJygpxGst
+mVtU7O3oHw9cIUpkbMuVqSxgPFmSSq5vEYkka1CYeg8bOz6aCTuO5J0GDlLrpjtx
+6lyImbZAF/8zKnW19aq5lshT2qJlTQlZRwwDZX5rONhA6T8IEUnUyD4rAIQFwfJ+
+gsXa4ojD/tA9NLdiNeyEcNfyX3FZwXWCtVLXflzdRN293FKamcdnMjVRjkCnp7iu
+7eO7nMgcRoWddeU+2aJFqCoQtKCp/5EKhFey
+=UIVm
+-----END PGP PUBLIC KEY BLOCK-----
+EOF
+
+ # Proxy is hating on me.. Lets just set it manually
+ echo "[salt-latest]" > /etc/yum.repos.d/salt-latest.repo
+ echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-latest.repo
+ echo "baseurl=https://repo.saltstack.com/py3/redhat/7/\$basearch/latest" >> /etc/yum.repos.d/salt-latest.repo
+ echo "failovermethod=priority" >> /etc/yum.repos.d/salt-latest.repo
+ echo "enabled=1" >> /etc/yum.repos.d/salt-latest.repo
+ echo "gpgcheck=1" >> /etc/yum.repos.d/salt-latest.repo
+ echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-latest.repo
+
+ # Proxy is hating on me.. Lets just set it manually
+ echo "[salt-2019.2]" > /etc/yum.repos.d/salt-2019-2.repo
+ echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "baseurl=https://repo.saltstack.com/py3/redhat/7/\$basearch/2019.2" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "failovermethod=priority" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "enabled=1" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "gpgcheck=1" >> /etc/yum.repos.d/salt-2019-2.repo
+ echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-2019-2.repo
+
+ cat > /etc/yum.repos.d/wazuh.repo <<\EOF
+[wazuh_repo]
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://packages.wazuh.com/3.x/yum/
+protect=1
+EOF
+ else
+ yum -y install https://repo.saltstack.com/py3/redhat/salt-py3-repo-latest-2.el7.noarch.rpm
+ cp /etc/yum.repos.d/salt-py3-latest.repo /etc/yum.repos.d/salt-2019-2.repo
+ sed -i 's/latest/2019.2/g' /etc/yum.repos.d/salt-2019-2.repo
+cat > /etc/yum.repos.d/wazuh.repo <<\EOF
+[wazuh_repo]
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://packages.wazuh.com/3.x/yum/
+protect=1
+EOF
+ fi
+ fi
+
+ yum clean expire-cache
+ yum -y install epel-release salt-minion-2019.2.2 yum-utils device-mapper-persistent-data lvm2 openssl
+ yum -y update exclude=salt*
+ systemctl enable salt-minion
+
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ] || [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ yum -y install salt-master-2019.2.2 python3 python36-m2crypto salt-minion-2019.2.2 python36-dateutil python36-mysql python36-docker
+ systemctl enable salt-master
+ else
+ yum -y install salt-minion-2019.2.2 python3 python36-m2crypto python36-dateutil python36-docker
+ fi
+ echo "exclude=salt*" >> /etc/yum.conf
+
+ # Our OS is not CentOS
+ else
+ ADDUSER=useradd
+ DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade
+
+ # Add the pre-requisites for installing docker-ce
+ apt-get -y install ca-certificates curl software-properties-common apt-transport-https openssl >> $SETUPLOG 2>&1
+
+ # Grab the version from the os-release file
+ UVER=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
+
+ # Nasty hack but required for now
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+
+ # Install the repo for salt
+ wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
+ wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2019.2/SALTSTACK-GPG-KEY.pub | apt-key add -
+ echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
+ echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2019.2 xenial main" > /etc/apt/sources.list.d/saltstack2019.list
+
+ # Lets get the docker repo added
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+
+ # Create a place for the keys
+ mkdir -p /opt/so/gpg
+ wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest/SALTSTACK-GPG-KEY.pub
+ wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
+
+ # Get key and install wazuh
+ curl -s https://packages.wazuh.com/key/GPG-KEY-WAZUH | apt-key add -
+ # Add repo
+ echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list
+
+ # Initialize the new repos
+ apt-get update >> $SETUPLOG 2>&1
+ # Need to add python packages here
+ apt-get -y install salt-minion=2019.2.2+ds-1 salt-common=2019.2.2+ds-1 python-dateutil python-m2crypto >> $SETUPLOG 2>&1
+ apt-mark hold salt-minion salt-common
+
+ else
+
+ # Copy down the gpg keys and install them from the master
+ mkdir $TMP/gpg
+ echo "scp the gpg keys and install them from the master"
+ scp -v -i /root/.ssh/so.key socore@$MSRV:/opt/so/gpg/* $TMP/gpg
+ echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH"
+ apt-key add $TMP/gpg/SALTSTACK-GPG-KEY.pub
+ apt-key add $TMP/gpg/GPG-KEY-WAZUH
+ echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
+ echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list
+ # Initialize the new repos
+ apt-get update >> $SETUPLOG 2>&1
+ apt-get -y install salt-minion=2019.2.2+ds-1 salt-common=2019.2.2+ds-1 python-dateutil python-m2crypto >> $SETUPLOG 2>&1
+ apt-mark hold salt-minion salt-common
+
+ fi
+
+ fi
+
+}
+
+salt_checkin() {
+ # Master State to Fix Mine Usage
+ if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ] || [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ echo "Building Certificate Authority"
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ echo " *** Restarting Salt to fix any SSL errors. ***"
+ service salt-master restart >> $SETUPLOG 2>&1
+ sleep 5
+ service salt-minion restart >> $SETUPLOG 2>&1
+ sleep 15
+ echo " Applyng a mine hack "
+ sudo salt '*' mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt >> $SETUPLOG 2>&1
+ echo " Applying SSL state "
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo "Still Working... Hang in there"
+ #salt-call state.highstate
+
+ else
+
+ # Run Checkin
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ #salt-call state.highstate >> $SETUPLOG 2>&1
+
+ fi
+
+}
+
+salt_firstcheckin() {
+
+ #First Checkin
+ salt-call state.highstate >> $SETUPLOG 2>&1
+
+}
+
+salt_master_directories() {
+
+ # Create salt paster directories
+ mkdir -p /opt/so/saltstack/salt
+ mkdir -p /opt/so/saltstack/pillar
+
+ # Copy over the salt code and templates
+ if [ $INSTALLMETHOD == 'iso' ]; then
+ cp -R /home/onion/SecurityOnion/pillar/* /opt/so/saltstack/pillar/
+ cp -R /home/onion/SecurityOnion/salt/* /opt/so/saltstack/salt/
+ else
+ cp -R $SCRIPTDIR/../pillar/* /opt/so/saltstack/pillar/
+ cp -R $SCRIPTDIR/../salt/* /opt/so/saltstack/salt/
+ fi
+
+ chmod +x /opt/so/saltstack/pillar/firewall/addfirewall.sh
+ chmod +x /opt/so/saltstack/pillar/data/addtotab.sh
+
+}
+
+salt_install_mysql_deps() {
+
+ if [ $OS == 'centos' ]; then
+ yum -y install mariadb-devel
+ elif [ $OS == 'ubuntu' ]; then
+ apt-get -y install python-mysqldb
+ fi
+
+}
+
+sensor_pillar() {
+ if [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ SENSORPILLARPATH=/opt/so/saltstack/pillar/sensors
+ mkdir -p $TMP
+ mkdir -p $SENSORPILLARPATH
+ else
+ SENSORPILLARPATH=$TMP/pillar/sensors
+ fi
+ if [ ! -d $SENSORPILLARPATH ]; then
+ mkdir -p $SENSORPILLARPATH
+ fi
+
+ # Create the sensor pillar
+ touch $SENSORPILLARPATH/$MINION_ID.sls
+ echo "sensor:" > $SENSORPILLARPATH/$MINION_ID.sls
+ echo " interface: bond0" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " mainip: $MAINIP" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " mainint: $MAININT" >> $SENSORPILLARPATH/$MINION_ID.sls
+ if [ $NSMSETUP == 'ADVANCED' ]; then
+ echo " bro_pins:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ for PIN in $BROPINS; do
+ PIN=$(echo $PIN | cut -d\" -f2)
+ echo " - $PIN" >> $SENSORPILLARPATH/$MINION_ID.sls
+ done
+ echo " suripins:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ for SPIN in $SURIPINS; do
+ SPIN=$(echo $SPIN | cut -d\" -f2)
+ echo " - $SPIN" >> $SENSORPILLARPATH/$MINION_ID.sls
+ done
+ elif [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ echo " bro_lbprocs: $LBPROCS" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " suriprocs: $LBPROCS" >> $SENSORPILLARPATH/$MINION_ID.sls
+ else
+ echo " bro_lbprocs: $BASICBRO" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " suriprocs: $BASICSURI" >> $SENSORPILLARPATH/$MINION_ID.sls
+ fi
+ echo " brobpf:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " pcapbpf:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " nidsbpf:" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " master: $MSRV" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " mtu: $MTU" >> $SENSORPILLARPATH/$MINION_ID.sls
+ if [ $HNSENSOR != 'inherit' ]; then
+ echo " hnsensor: $HNSENSOR" >> $SENSORPILLARPATH/$MINION_ID.sls
+ fi
+ echo " access_key: $ACCESS_KEY" >> $SENSORPILLARPATH/$MINION_ID.sls
+ echo " access_secret: $ACCESS_SECRET" >> $SENSORPILLARPATH/$MINION_ID.sls
+
+}
+
+set_environment_var() {
+
+ echo "Setting environment variable: $1"
+
+ export "$1"
+ echo "$1" >> /etc/environment
+
+}
+
+set_hostname() {
+
+ hostnamectl set-hostname --static $HOSTNAME
+ echo "127.0.0.1 $HOSTNAME $HOSTNAME.localdomain localhost localhost.localdomain localhost4 localhost4.localdomain" > /etc/hosts
+ echo "::1 localhost localhost.localdomain localhost6 localhost6.localdomain6" >> /etc/hosts
+ echo $HOSTNAME > /etc/hostname
+ HOSTNAME=$(cat /etc/hostname)
+ MINION_ID=$(echo $HOSTNAME | awk -F. {'print $1'})
+ if [ $INSTALLTYPE != 'MASTERONLY' ] || [ $INSTALLTYPE != 'EVALMODE' ] || [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ if [[ $TESTHOST = *"not found"* ]] || [[ $TESTHOST = *"connection timed out"* ]]; then
+ if ! grep -q $MSRVIP /etc/hosts; then
+ echo "$MSRVIP $MSRV" >> /etc/hosts
+ fi
+ fi
+ fi
+
+}
+
+set_hostname_iso() {
+
+ hostnamectl set-hostname --static $HOSTNAME
+ echo "127.0.0.1 $HOSTNAME $HOSTNAME.localdomain localhost localhost.localdomain localhost4 localhost4.localdomain" > /etc/hosts
+ echo "::1 localhost localhost.localdomain localhost6 localhost6.localdomain6" >> /etc/hosts
+ echo $HOSTNAME > /etc/hostname
+
+}
+
+set_initial_firewall_policy() {
+
+ get_main_ip
+ if [ $INSTALLTYPE == 'MASTERONLY' ]; then
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/minions.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
+ /opt/so/saltstack/pillar/data/addtotab.sh mastertab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
+ fi
+
+ if [ $INSTALLTYPE == 'EVALMODE' ]; then
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/minions.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/forward_nodes.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/storage_nodes.sls
+ /opt/so/saltstack/pillar/data/addtotab.sh evaltab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
+ fi
+
+ if [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/minions.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
+ printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/forward_nodes.sls
+ fi
+
+ if [ $INSTALLTYPE == 'SENSORONLY' ]; then
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes $MAINIP
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
+ fi
+
+ if [ $INSTALLTYPE == 'STORAGENODE' ]; then
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh storage_nodes $MAINIP
+ ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $MINION_ID $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
+ fi
+
+ if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
+ echo "blah"
+ fi
+
+ if [ $INSTALLTYPE == 'HOTNODE' ]; then
+ echo "blah"
+ fi
+
+ if [ $INSTALLTYPE == 'WARMNODE' ]; then
+ echo "blah"
+ fi
+
+}
+
+# Set up the management interface on the ISO
+set_management_interface() {
+
+ if [ $ADDRESSTYPE == 'DHCP' ]; then
+ /usr/bin/nmcli con up $MNIC
+ /usr/bin/nmcli con mod $MNIC connection.autoconnect yes
+ else
+ # Set Static IP
+ /usr/bin/nmcli con mod $MNIC ipv4.addresses $MIP/$MMASK ipv4.gateway $MGATEWAY \
+ ipv4.dns $MDNS ipv4.dns-search $MSEARCH ipv4.method manual
+ /usr/bin/nmcli con up $MNIC
+ /usr/bin/nmcli con mod $MNIC connection.autoconnect yes
+ fi
+
+}
+
+set_node_type() {
+
+ # Determine the node type based on whiplash choice
+ if [ $INSTALLTYPE == 'STORAGENODE' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
+ NODETYPE='storage'
+ fi
+ if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
+ NODETYPE='parser'
+ fi
+ if [ $INSTALLTYPE == 'HOTNODE' ]; then
+ NODETYPE='hot'
+ fi
+ if [ $INSTALLTYPE == 'WARMNODE' ]; then
+ NODETYPE='warm'
+ fi
+
+}
+
+set_updates() {
+ echo "MASTERUPDATES is $MASTERUPDATES"
+ if [ $MASTERUPDATES == 'MASTER' ]; then
+ if [ $OS == 'centos' ]; then
+ if ! grep -q $MSRV /etc/yum.conf; then
+ echo "proxy=http://$MSRV:3142" >> /etc/yum.conf
+ fi
+
+ else
+
+ # Set it up so the updates roll through the master
+ echo "Acquire::http::Proxy \"http://$MSRV:3142\";" > /etc/apt/apt.conf.d/00Proxy
+ echo "Acquire::https::Proxy \"http://$MSRV:3142\";" >> /etc/apt/apt.conf.d/00Proxy
+
+ fi
+ fi
+}
+
+update_sudoers() {
+
+ if ! grep -qE '^socore\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then
+ # Update Sudoers so that socore can accept keys without a password
+ echo "socore ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | sudo tee -a /etc/sudoers
+ echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | sudo tee -a /etc/sudoers
+ echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/data/addtotab.sh" | sudo tee -a /etc/sudoers
+ else
+ echo "User socore already granted sudo privileges"
+ fi
+
+}
diff --git a/install_scripts/disable-checksum-offload.sh b/setup/install_scripts/disable-checksum-offload.sh
similarity index 98%
rename from install_scripts/disable-checksum-offload.sh
rename to setup/install_scripts/disable-checksum-offload.sh
index 9cc0b5d5b..32b8d46e6 100644
--- a/install_scripts/disable-checksum-offload.sh
+++ b/setup/install_scripts/disable-checksum-offload.sh
@@ -6,4 +6,4 @@ if [ "$NM_DISPATCHER_ACTION" == "pre-up" ]; then
ethtool -K $DEVICE_IFACE $i off;
done
fi
-fi
\ No newline at end of file
+fi
diff --git a/setup/so-setup.sh b/setup/so-setup.sh
new file mode 100644
index 000000000..81cde370b
--- /dev/null
+++ b/setup/so-setup.sh
@@ -0,0 +1,770 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# Source the other pieces of the setup
+SCRIPTDIR=$(dirname "$0")
+source $SCRIPTDIR/functions.sh
+source $SCRIPTDIR/whiptail.sh
+
+# See if this is an ISO install
+OPTIONS=$1
+
+if [[ $OPTIONS = 'iso' ]]; then
+ INSTALLMETHOD="iso"
+else
+ INSTALLMETHOD="network"
+fi
+
+# Global Variables
+HOSTNAME=$(cat /etc/hostname)
+MINION_ID=$(echo $HOSTNAME | awk -F. {'print $1'})
+TOTAL_MEM=`grep MemTotal /proc/meminfo | awk '{print $2}' | sed -r 's/.{3}$//'`
+NICS=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
+CPUCORES=$(cat /proc/cpuinfo | grep processor | wc -l)
+LISTCORES=$(cat /proc/cpuinfo | grep processor | awk '{print $3 " \"" "core" "\""}')
+RANDOMUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1)
+NODE_ES_PORT="9200"
+SETUPLOG="/root/sosetup.log"
+# End Global Variables
+
+# Reset the Install Log
+date -u >$SETUPLOG 2>&1
+echo "stty size is: $(stty size)" >> $SETUPLOG 2>&1
+
+# Check for prerequisites
+got_root
+detect_os
+
+if [ $OS == ubuntu ]; then
+ # Override the horrible Ubuntu whiptail color pallete
+ update-alternatives --set newt-palette /etc/newt/palette.original
+fi
+
+# Question Time
+echo "Asking user if they are sure they want to proceed" >> $SETUPLOG 2>&1
+if (whiptail_you_sure) ; then
+
+ # Create a temp dir to get started
+ install_prep
+ setterm -blank 0
+
+ if [ $INSTALLMETHOD == network ]; then
+ # Let folks know they need their management interface already set up.
+ whiptail_network_notice
+
+ # Set the hostname to reduce errors
+ whiptail_set_hostname
+
+ # Set management nic
+ whiptail_management_nic
+
+# whiptail_create_socore_user
+# SCMATCH=no
+# while [ $SCMATCH != yes ]; do
+# whiptail_create_socore_user_password1
+# whiptail_create_socore_user_password2
+# check_socore_pass
+# done
+
+ else
+
+ # Set the hostname
+ whiptail_set_hostname
+ whiptail_management_nic
+
+ # Ask if you want dhcp or static
+ whiptail_dhcp_or_static
+
+ # Do this if it static is selected
+ if [ $ADDRESSTYPE != 'DHCP' ]; then
+ whiptail_management_interface_ip
+ whiptail_management_interface_mask
+ whiptail_management_interface_gateway
+ whiptail_management_interface_dns
+ whiptail_management_interface_dns_search
+ fi
+
+ # Go ahead and bring up networking so other parts of the install work
+ set_hostname_iso
+ set_management_interface
+
+ # Add an admin user
+ whiptail_create_admin_user
+
+ # Get a password for the admin user
+ APMATCH=no
+ while [ $APMATCH != yes ]; do
+ whiptail_create_admin_user_password1
+ whiptail_create_admin_user_password2
+ check_admin_pass
+ done
+
+ fi
+
+ # Go ahead and gen the keys so we can use them for any sensor type - Disabled for now
+ #minio_generate_keys
+
+ # What kind of install are we doing?
+ whiptail_install_type
+
+ # How do we want to handle OS patching? manual, auto or scheduled days and hours
+ whiptail_patch_schedule
+ case $PATCHSCHEDULE in
+ 'New Schedule')
+ whiptail_patch_schedule_select_days
+ whiptail_patch_schedule_select_hours
+ whiptail_patch_name_new_schedule
+ patch_schedule_os_new
+ ;;
+ 'Import Schedule')
+ whiptail_patch_schedule_import
+ ;;
+ Automatic)
+ PATCHSCHEDULENAME=auto
+ ;;
+ Manual)
+ PATCHSCHEDULENAME=manual
+ ;;
+ esac
+
+ ####################
+ ## Helix ##
+ ####################
+ if [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
+ MASTERUPDATES=OPEN
+ filter_unused_nics
+ whiptail_bond_nics
+ whiptail_helix_apikey
+ whiptail_homenet_master
+ RULESETUP=ETOPEN
+ NSMSETUP=BASIC
+ HNSENSOR=inherit
+ LS_HEAP_SIZE="1000m"
+ calculate_useable_cores
+ whiptail_make_changes
+ set_hostname
+ clear_master
+ mkdir -p /nsm
+ get_filesystem_root
+ get_filesystem_nsm
+ get_main_ip
+ if [ $INSTALLMETHOD == iso ]; then
+ add_admin_user
+ disable_onion_user
+ fi
+ #add_socore_user_master
+ # Install salt and dependencies
+ {
+ sleep 0.5
+ echo -e "XXX\n0\nCreating Bond Interface... \nXXX"
+ create_sensor_bond >> $SETUPLOG 2>&1
+ echo -e "XXX\n1\nGenerating Sensor Pillar... \nXXX"
+ sensor_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n2\nInstalling and configuring Salt... \nXXX"
+ echo " ** Installing Salt and Dependencies **" >> $SETUPLOG
+ saltify >> $SETUPLOG 2>&1
+ echo -e "XXX\n5\nInstalling Docker... \nXXX"
+ docker_install >> $SETUPLOG 2>&1
+ echo -e "XXX\n10\nConfiguring Salt Master... \nXXX"
+ echo " ** Configuring Minion **" >> $SETUPLOG
+ configure_minion helix >> $SETUPLOG 2>&1
+ echo " ** Installing Salt Master **" >> $SETUPLOG
+ install_master >> $SETUPLOG 2>&1
+ salt_master_directories >> $SETUPLOG 2>&1
+ update_sudoers >> $SETUPLOG 2>&1
+ chown_salt_master >> $SETUPLOG 2>&1
+ es_heapsize >> $SETUPLOG 2>&1
+ ls_heapsize >> $SETUPLOG 2>&1
+ echo -e "XXX\n25\nConfiguring Default Pillars... \nXXX"
+ master_static >> $SETUPLOG 2>&1
+ echo "** Generating the master pillar **" >> $SETUPLOG
+ master_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the FireEye pillar **" >> $SETUPLOG
+ fireeye_pillar >> $SETUPLOG 2>&1
+ sensor_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
+ # Do a checkin to push the key up
+ echo "** Pushing the key up to Master **" >> $SETUPLOG
+ salt_firstcheckin >> $SETUPLOG 2>&1
+ # Accept the Master Key
+ echo "** Accepting the key on the master **" >> $SETUPLOG
+ accept_salt_key_local >> $SETUPLOG 2>&1
+ echo -e "XXX\n35\nConfiguring Firewall... \nXXX"
+ # Open the firewall
+ echo "** Setting the initial firewall policy **" >> $SETUPLOG
+ set_initial_firewall_policy >> $SETUPLOG 2>&1
+ echo -e "XXX\n40\nGenerating CA... \nXXX"
+ salt_checkin >> $SETUPLOG 2>&1
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo -e "XXX\n43\nInstalling Common Components... \nXXX"
+ salt-call state.apply common >> $SETUPLOG 2>&1
+ echo -e "XXX\n45\nApplying firewall rules... \nXXX"
+ salt-call state.apply firewall >> $SETUPLOG 2>&1
+ salt-call state.apply master >> $SETUPLOG 2>&1
+ salt-call state.apply idstools >> $SETUPLOG 2>&1
+ echo -e "XXX\n40\nInstalling Redis... \nXXX"
+ salt-call state.apply redis >> $SETUPLOG 2>&1
+ echo -e "XXX\n60\nInstalling Redis... \nXXX"
+ salt-call state.apply logstash >> $SETUPLOG 2>&1
+ echo -e "XXX\n75\nInstalling Filebeat... \nXXX"
+ salt-call state.apply filebeat >> $SETUPLOG 2>&1
+ salt-call state.apply utility >> $SETUPLOG 2>&1
+ salt-call state.apply schedule >> $SETUPLOG 2>&1
+ echo -e "XXX\n85\nEnabling Checking at Boot... \nXXX"
+ checkin_at_boot >> $SETUPLOG 2>&1
+ echo -e "XX\n97\nFinishing touches... \nXXX"
+ filter_unused_nics >> $SETUPLOG 2>&1
+ network_setup >> $SETUPLOG 2>&1
+ echo -e "XXX\n98\nVerifying Setup... \nXXX"
+ salt-call state.highstate >> $SETUPLOG 2>&1
+ } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+ GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
+ if [[ $GOODSETUP == '0' ]]; then
+ whiptail_setup_complete
+ shutdown -r now
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+
+ fi
+
+ ####################
+ ## Master ##
+ ####################
+ if [ $INSTALLTYPE == 'MASTERONLY' ]; then
+
+ # Would you like to do an advanced install?
+ whiptail_master_adv
+
+ # Choose Zeek or Community NSM
+ whiptail_bro_version
+
+ # Select Snort or Suricata
+ whiptail_nids
+
+ # Snag the HOME_NET
+ whiptail_homenet_master
+
+ # Pick your Ruleset
+ whiptail_rule_setup
+
+ # Get the code if it isn't ET Open
+ if [ $RULESETUP != 'ETOPEN' ]; then
+ # Get the code
+ whiptail_oinkcode
+ fi
+
+ # Find out how to handle updates
+ whiptail_master_updates
+ whiptail_enable_components
+ process_components
+
+ # Do Advacned Setup if they chose it
+ if [ $MASTERADV == 'ADVANCED' ]; then
+ # Ask which bro logs to enable - Need to add Suricata check
+ if [ $BROVERSION != 'SURICATA' ]; then
+ whiptail_master_adv_service_brologs
+ fi
+ fi
+
+ # Get a password for the socore user
+ whiptail_create_socore_user
+ SCMATCH=no
+ while [ $SCMATCH != yes ]; do
+ whiptail_create_socore_user_password1
+ whiptail_create_socore_user_password2
+ check_socore_pass
+ done
+
+ # Last Chance to back out
+ whiptail_make_changes
+ set_hostname
+ generate_passwords
+ auth_pillar
+ clear_master
+ mkdir -p /nsm
+ get_filesystem_root
+ get_filesystem_nsm
+ # Enable Bro Logs
+ # comment this out since we already copy this file to the destination that this function writes to
+ #bro_logs_enabled
+
+ # Figure out the main IP address
+ get_main_ip
+ if [ $INSTALLMETHOD == iso ]; then
+ add_admin_user
+ disable_onion_user
+ fi
+
+ # Add the user so we can sit back and relax
+ #echo ""
+ #echo "**** Please set a password for socore. You will use this password when setting up other Nodes/Sensors"
+ #echo ""
+ add_socore_user_master
+
+ # Install salt and dependencies
+ {
+ sleep 0.5
+ echo -e "XXX\n1\nInstalling and configuring Salt... \nXXX"
+ echo " ** Installing Salt and Dependencies **" >> $SETUPLOG
+ saltify >> $SETUPLOG 2>&1
+ echo -e "XXX\n5\nInstalling Docker... \nXXX"
+ docker_install >> $SETUPLOG 2>&1
+ echo -e "XXX\n10\nConfiguring Salt Master... \nXXX"
+ echo " ** Configuring Minion **" >> $SETUPLOG
+ configure_minion master >> $SETUPLOG 2>&1
+ echo " ** Installing Salt Master **" >> $SETUPLOG
+ install_master >> $SETUPLOG 2>&1
+ salt_install_mysql_deps >> $SETUPLOG 2>&1
+ salt_master_directories >> $SETUPLOG 2>&1
+ update_sudoers >> $SETUPLOG 2>&1
+ chown_salt_master >> $SETUPLOG 2>&1
+ es_heapsize >> $SETUPLOG 2>&1
+ ls_heapsize >> $SETUPLOG 2>&1
+ echo -e "XXX\n25\nConfiguring Default Pillars... \nXXX"
+ master_static >> $SETUPLOG 2>&1
+ echo "** Generating the master pillar **" >> $SETUPLOG
+ master_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n30\nAccepting Salt Keys... \nXXX"
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
+ # Do a checkin to push the key up
+ echo "** Pushing the key up to Master **" >> $SETUPLOG
+ salt_firstcheckin >> $SETUPLOG 2>&1
+ # Accept the Master Key
+ echo "** Accepting the key on the master **" >> $SETUPLOG
+ accept_salt_key_local >> $SETUPLOG 2>&1
+ echo -e "XXX\n35\nConfiguring Firewall... \nXXX"
+ # Open the firewall
+ echo "** Setting the initial firewall policy **" >> $SETUPLOG
+ set_initial_firewall_policy >> $SETUPLOG 2>&1
+ # Do the big checkin but first let them know it will take a bit.
+ echo -e "XXX\n40\nGenerating CA... \nXXX"
+ salt_checkin >> $SETUPLOG 2>&1
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo -e "XXX\n43\nInstalling Common Components... \nXXX"
+ salt-call state.apply common >> $SETUPLOG 2>&1
+ echo -e "XXX\n45\nApplying firewall rules... \nXXX"
+ salt-call state.apply firewall >> $SETUPLOG 2>&1
+ salt-call state.apply master >> $SETUPLOG 2>&1
+ salt-call state.apply idstools >> $SETUPLOG 2>&1
+ echo -e "XXX\n40\nInstalling Redis... \nXXX"
+ salt-call state.apply redis >> $SETUPLOG 2>&1
+ if [[ $OSQUERY == '1' ]]; then
+ echo -e "XXX\n41\nInstalling MySQL... \nXXX"
+ salt-call state.apply mysql >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n45\nInstalling Elastic Components... \nXXX"
+ salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
+ salt-call state.apply logstash >> $SETUPLOG 2>&1
+ salt-call state.apply kibana >> $SETUPLOG 2>&1
+ salt-call state.apply elastalert >> $SETUPLOG 2>&1
+ if [[ $WAZUH == '1' ]]; then
+ echo -e "XXX\n68\nInstalling Wazuh... \nXXX"
+ salt-call state.apply wazuh >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n75\nInstalling Filebeat... \nXXX"
+ salt-call state.apply filebeat >> $SETUPLOG 2>&1
+ salt-call state.apply utility >> $SETUPLOG 2>&1
+ salt-call state.apply schedule >> $SETUPLOG 2>&1
+ if [[ $OSQUERY == '1' ]]; then
+ echo -e "XXX\n79\nInstalling Fleet... \nXXX"
+ salt-call state.apply fleet >> $SETUPLOG 2>&1
+ salt-call state.apply launcher >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n85\nConfiguring SOctopus... \nXXX"
+ salt-call state.apply soctopus >> $SETUPLOG 2>&1
+ if [[ $THEHIVE == '1' ]]; then
+ echo -e "XXX\n87\nInstalling TheHive... \nXXX"
+ salt-call state.apply hive >> $SETUPLOG 2>&1
+ fi
+ if [[ $PLAYBOOK == '1' ]]; then
+ echo -e "XXX\n89\nInstalling Playbook... \nXXX"
+ salt-call state.apply playbook >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n75\nEnabling Checking at Boot... \nXXX"
+ checkin_at_boot >> $SETUPLOG 2>&1
+ echo -e "XX\n97\nFinishing touches... \nXXX"
+ filter_unused_nics >> $SETUPLOG 2>&1
+ network_setup >> $SETUPLOG 2>&1
+ echo -e "XXX\n98\nVerifying Setup... \nXXX"
+ salt-call state.highstate >> $SETUPLOG 2>&1
+ } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+ GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
+ if [[ $GOODSETUP == '0' ]]; then
+ whiptail_setup_complete
+ if [[ $THEHIVE == '1' ]]; then
+ check_hive_init_then_reboot
+ else
+ shutdown -r now
+ fi
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+
+ fi
+
+ ####################
+ ## Sensor ##
+ ####################
+
+ if [ $INSTALLTYPE == 'SENSORONLY' ]; then
+ filter_unused_nics
+ whiptail_bond_nics
+ whiptail_management_server
+ whiptail_master_updates
+ set_updates
+ whiptail_homenet_sensor
+ whiptail_sensor_config
+ # Calculate lbprocs so we can call it in the prompts
+ calculate_useable_cores
+ if [ $NSMSETUP == 'ADVANCED' ]; then
+ whiptail_bro_pins
+ whiptail_suricata_pins
+ whiptail_bond_nics_mtu
+ else
+ whiptail_basic_bro
+ whiptail_basic_suri
+ fi
+ whiptail_make_changes
+ set_hostname
+ clear_master
+ mkdir -p /nsm
+ get_filesystem_root
+ get_filesystem_nsm
+ if [ $INSTALLMETHOD == iso ]; then
+ add_admin_user
+ disable_onion_user
+ fi
+ copy_ssh_key >> $SETUPLOG 2>&1
+ {
+ sleep 0.5
+ echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
+ set_initial_firewall_policy >> $SETUPLOG 2>&1
+ echo -e "XXX\n1\nInstalling pip3... \nXXX"
+ echo -e "XXX\n3\nCreating Bond Interface... \nXXX"
+ create_sensor_bond >> $SETUPLOG 2>&1
+ echo -e "XXX\n4\nGenerating Sensor Pillar... \nXXX"
+ sensor_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n5\nInstalling Salt Components... \nXXX"
+ saltify >> $SETUPLOG 2>&1
+ echo -e "XXX\n20\nInstalling Docker... \nXXX"
+ docker_install >> $SETUPLOG 2>&1
+ echo -e "XXX\n22\nConfiguring Salt Minion... \nXXX"
+ configure_minion sensor >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
+ echo -e "XXX\n25\nSending Salt Key to Master... \nXXX"
+ salt_firstcheckin >> $SETUPLOG 2>&1
+ echo -e "XXX\n26\nTelling the Master to Accept Key... \nXXX"
+ # Accept the Salt Key
+ accept_salt_key_remote >> $SETUPLOG 2>&1
+ echo -e "XXX\n27\nApplying SSL Certificates... \nXXX"
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo -e "XXX\n35\nInstalling Core Components... \nXXX"
+ salt-call state.apply common >> $SETUPLOG 2>&1
+ salt-call state.apply firewall >> $SETUPLOG 2>&1
+ echo -e "XXX\n50\nInstalling PCAP... \nXXX"
+ salt-call state.apply pcap >> $SETUPLOG 2>&1
+ echo -e "XXX\n60\nInstalling IDS components... \nXXX"
+ salt-call state.apply suricata >> $SETUPLOG 2>&1
+ checkin_at_boot >> $SETUPLOG 2>&1
+ echo -e "XX\n97\nFinishing touches... \nXXX"
+ filter_unused_nics >> $SETUPLOG 2>&1
+ network_setup >> $SETUPLOG 2>&1
+ echo -e "XXX\n98\nVerifying Setup... \nXXX"
+ salt-call state.highstate >> $SETUPLOG 2>&1
+ } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+ GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
+ if [[ $GOODSETUP == '0' ]]; then
+ whiptail_setup_complete
+ shutdown -r now
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+ fi
+
+ #######################
+ ## Eval Mode ##
+ #######################
+
+ if [ $INSTALLTYPE == 'EVALMODE' ]; then
+
+ # Filter out the management NIC
+ filter_unused_nics
+
+ # Select which NICs are in the bond
+ whiptail_bond_nics
+
+ # Snag the HOME_NET
+ whiptail_homenet_master
+ whiptail_eval_adv_warning
+ whiptail_enable_components
+
+ # Set a bunch of stuff since this is eval
+ es_heapsize
+ ls_heapsize
+ NODE_ES_HEAP_SIZE="600m"
+ NODE_LS_HEAP_SIZE="500m"
+ LSPIPELINEWORKERS=1
+ LSPIPELINEBATCH=125
+ LSINPUTTHREADS=1
+ LSINPUTBATCHCOUNT=125
+ RULESETUP=ETOPEN
+ NSMSETUP=BASIC
+ NIDS=Suricata
+ BROVERSION=ZEEK
+ CURCLOSEDAYS=30
+ process_components
+ whiptail_make_changes
+ set_hostname
+ generate_passwords
+ auth_pillar
+ clear_master
+ mkdir -p /nsm
+ get_filesystem_root
+ get_filesystem_nsm
+ get_log_size_limit
+ get_main_ip
+ if [ $INSTALLMETHOD == iso ]; then
+ add_admin_user
+ disable_onion_user
+ fi
+ # Add the user so we can sit back and relax
+ add_socore_user_master
+ {
+ sleep 0.5
+ echo -e "XXX\n0\nCreating Bond Interface... \nXXX"
+ create_sensor_bond >> $SETUPLOG 2>&1
+ echo -e "XXX\n1\nInstalling Python 3... \nXXX"
+ echo -e "XXX\n2\nInstalling saltstack... \nXXX"
+ saltify >> $SETUPLOG 2>&1
+ echo -e "XXX\n3\nInstalling docker... \nXXX"
+ docker_install >> $SETUPLOG 2>&1
+ echo -e "XXX\n5\nInstalling master code... \nXXX"
+ install_master >> $SETUPLOG 2>&1
+ echo -e "XXX\n5\nInstalling mysql dependencies for saltstack... \nXXX"
+ salt_install_mysql_deps >> $SETUPLOG 2>&1
+ echo -e "XXX\n6\nCopying salt code... \nXXX"
+ salt_master_directories >> $SETUPLOG 2>&1
+ echo -e "XXX\n6\nupdating suduers... \nXXX"
+ update_sudoers >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nFixing some permissions... \nXXX"
+ chown_salt_master >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nCreating the static pillar... \nXXX"
+ # Set the static values
+ master_static >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nCreating the master pillar... \nXXX"
+ master_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nConfiguring minion... \nXXX"
+ configure_minion eval >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nSetting the node type to eval... \nXXX"
+ set_node_type >> $SETUPLOG 2>&1
+ echo -e "XXX\n7\nStorage node pillar... \nXXX"
+ node_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n8\nCreating firewall policies... \nXXX"
+ set_initial_firewall_policy >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
+ echo -e "XXX\n10\nRegistering agent... \nXXX"
+ salt_firstcheckin >> $SETUPLOG 2>&1
+ echo -e "XXX\n11\nAccepting Agent... \nXXX"
+ accept_salt_key_local >> $SETUPLOG 2>&1
+ echo -e "XXX\n12\nRunning the SSL states... \nXXX"
+ salt_checkin >> $SETUPLOG 2>&1
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo -e "XXX\n15\nInstalling core components... \nXXX"
+ salt-call state.apply common >> $SETUPLOG 2>&1
+ echo -e "XXX\n18\nInitializing firewall rules... \nXXX"
+ salt-call state.apply firewall >> $SETUPLOG 2>&1
+ echo -e "XXX\n25\nInstalling master components... \nXXX"
+ salt-call state.apply master >> $SETUPLOG 2>&1
+ salt-call state.apply idstools >> $SETUPLOG 2>&1
+ if [[ $OSQUERY == '1' ]]; then
+ salt-call state.apply mysql >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n35\nInstalling ElasticSearch... \nXXX"
+ salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
+ echo -e "XXX\n40\nInstalling Logstash... \nXXX"
+ salt-call state.apply logstash >> $SETUPLOG 2>&1
+ echo -e "XXX\n45\nInstalling Kibana... \nXXX"
+ salt-call state.apply kibana >> $SETUPLOG 2>&1
+ echo -e "XXX\n50\nInstalling pcap... \nXXX"
+ salt-call state.apply pcap >> $SETUPLOG 2>&1
+ echo -e "XXX\n52\nInstalling Suricata... \nXXX"
+ salt-call state.apply suricata >> $SETUPLOG 2>&1
+ echo -e "XXX\n54\nInstalling Zeek... \nXXX"
+ salt-call state.apply bro >> $SETUPLOG 2>&1
+ echo -e "XXX\n56\nInstalling curator... \nXXX"
+ salt-call state.apply curator >> $SETUPLOG 2>&1
+ echo -e "XXX\n58\nInstalling elastalert... \nXXX"
+ salt-call state.apply elastalert >> $SETUPLOG 2>&1
+ if [[ $OSQUERY == '1' ]]; then
+ echo -e "XXX\n60\nInstalling fleet... \nXXX"
+ salt-call state.apply fleet >> $SETUPLOG 2>&1
+ salt-call state.apply redis >> $SETUPLOG 2>&1
+ fi
+ if [[ $WAZUH == '1' ]]; then
+ echo -e "XXX\n65\nInstalling Wazuh components... \nXXX"
+ salt-call state.apply wazuh >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n85\nInstalling filebeat... \nXXX"
+ salt-call state.apply filebeat >> $SETUPLOG 2>&1
+ salt-call state.apply utility >> $SETUPLOG 2>&1
+ echo -e "XXX\n90\nInstalling misc components... \nXXX"
+ salt-call state.apply schedule >> $SETUPLOG 2>&1
+ salt-call state.apply soctopus >> $SETUPLOG 2>&1
+ if [[ $THEHIVE == '1' ]]; then
+ echo -e "XXX\n91\nInstalling The Hive... \nXXX"
+ salt-call state.apply hive >> $SETUPLOG 2>&1
+ fi
+ if [[ $PLAYBOOK == '1' ]]; then
+ echo -e "XXX\n93\nInstalling Playbook... \nXXX"
+ salt-call state.apply playbook >> $SETUPLOG 2>&1
+ fi
+ echo -e "XXX\n95\nSetting checkin to run on boot... \nXXX"
+ checkin_at_boot >> $SETUPLOG 2>&1
+ echo -e "XX\n97\nFinishing touches... \nXXX"
+ filter_unused_nics >> $SETUPLOG 2>&1
+ network_setup >> $SETUPLOG 2>&1
+ echo -e "XXX\n98\nVerifying Setup... \nXXX"
+ salt-call state.highstate >> $SETUPLOG 2>&1
+ } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+ GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
+ if [[ $GOODSETUP == '0' ]]; then
+ whiptail_setup_complete
+ if [[ $THEHIVE == '1' ]]; then
+ check_hive_init_then_reboot
+ else
+ shutdown -r now
+ fi
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+ fi
+
+ ###################
+ ## Nodes ##
+ ###################
+
+ if [ $INSTALLTYPE == 'STORAGENODE' ] || [ $INSTALLTYPE == 'PARSINGNODE' ] || [ $INSTALLTYPE == 'HOTNODE' ] || [ $INSTALLTYPE == 'WARMNODE' ]; then
+ whiptail_management_server
+ whiptail_master_updates
+ set_updates
+ get_log_size_limit
+ CURCLOSEDAYS=30
+ es_heapsize
+ ls_heapsize
+ whiptail_node_advanced
+ if [ $NODESETUP == 'NODEADVANCED' ]; then
+ whiptail_node_es_heap
+ whiptail_node_ls_heap
+ whiptail_node_ls_pipeline_worker
+ whiptail_node_ls_pipline_batchsize
+ whiptail_node_ls_input_threads
+ whiptail_node_ls_input_batch_count
+ whiptail_cur_close_days
+ whiptail_log_size_limit
+ else
+ NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE
+ NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE
+ LSPIPELINEWORKERS=$CPUCORES
+ LSPIPELINEBATCH=125
+ LSINPUTTHREADS=1
+ LSINPUTBATCHCOUNT=125
+ fi
+ whiptail_make_changes
+ set_hostname
+ clear_master
+ mkdir -p /nsm
+ get_filesystem_root
+ get_filesystem_nsm
+ if [ $INSTALLMETHOD == iso ]; then
+ add_admin_user
+ disable_onion_user
+ fi
+ copy_ssh_key >> $SETUPLOG 2>&1
+ {
+ sleep 0.5
+ echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
+ set_initial_firewall_policy >> $SETUPLOG 2>&1
+ echo -e "XXX\n1\nInstalling pip3... \nXXX"
+ echo -e "XXX\n5\nInstalling Salt Packages... \nXXX"
+ saltify >> $SETUPLOG 2>&1
+ echo -e "XXX\n20\nInstalling Docker... \nXXX"
+ docker_install >> $SETUPLOG 2>&1
+ echo -e "XXX\n30\nInitializing Minion... \nXXX"
+ configure_minion node >> $SETUPLOG 2>&1
+ set_node_type >> $SETUPLOG 2>&1
+ node_pillar >> $SETUPLOG 2>&1
+ echo "** Generating the patch pillar **" >> $SETUPLOG
+ patch_pillar >> $SETUPLOG 2>&1
+ echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
+ copy_minion_tmp_files >> $SETUPLOG 2>&1
+ echo -e "XXX\n35\nSending and Accepting Salt Key... \nXXX"
+ salt_firstcheckin >> $SETUPLOG 2>&1
+ # Accept the Salt Key
+ accept_salt_key_remote >> $SETUPLOG 2>&1
+ echo -e "XXX\n40\nApplying SSL Certificates... \nXXX"
+ salt-call state.apply ca >> $SETUPLOG 2>&1
+ salt-call state.apply ssl >> $SETUPLOG 2>&1
+ echo -e "XXX\n50\nConfiguring Firewall... \nXXX"
+ salt-call state.apply common >> $SETUPLOG 2>&1
+ salt-call state.apply firewall >> $SETUPLOG 2>&1
+ echo -e "XXX\n70\nInstalling Elastic Components... \nXXX"
+ salt-call state.apply logstash >> $SETUPLOG 2>&1
+ salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
+ salt-call state.apply curator >> $SETUPLOG 2>&1
+ salt-call state.apply filebeat >> $SETUPLOG 2>&1
+ checkin_at_boot >> $SETUPLOG 2>&1
+ echo -e "XX\n97\nFinishing touches... \nXXX"
+ filter_unused_nics >> $SETUPLOG 2>&1
+ network_setup >> $SETUPLOG 2>&1
+ echo -e "XXX\n98\nVerifying Setup... \nXXX"
+ } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+ GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
+ if [[ $GOODSETUP == '0' ]]; then
+ whiptail_setup_complete
+ shutdown -r now
+ else
+ whiptail_setup_failed
+ shutdown -r now
+ fi
+
+ fi
+
+else
+ echo "User not sure. Cancelling setup.">> $SETUPLOG 2>&1
+ whiptail_cancel
+fi
diff --git a/setup/whiptail.sh b/setup/whiptail.sh
new file mode 100644
index 000000000..8497635c5
--- /dev/null
+++ b/setup/whiptail.sh
@@ -0,0 +1,751 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+whiptail_basic_bro() {
+
+ BASICBRO=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the number of bro processes:" 10 75 $LBPROCS 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_basic_suri() {
+
+ BASICSURI=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the number of Suricata Processes:" 10 75 $LBPROCS 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_bro_pins() {
+
+ BROPINS=$(whiptail --noitem --title "Pin Bro CPUS" --checklist "Please Select $LBPROCS cores to pin Bro to:" 20 75 12 ${LISTCORES[@]} 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_bro_version() {
+
+ BROVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate meta data?" 20 75 4 "ZEEK" "Install Zeek (aka Bro)" ON \
+ "SURICATA" "SUPER EXPERIMENTAL" OFF 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_bond_nics() {
+
+ local nic_list=()
+ for FNIC in ${FNICS[@]}; do
+ nic_list+=($FNIC "Interface" "OFF")
+ done
+
+ BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 75 12 ${nic_list[@]} 3>&1 1>&2 2>&3 )
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ while [ -z "$BNICS" ]
+ do
+ BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 75 12 ${nic_list[@]} 3>&1 1>&2 2>&3 )
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ done
+
+}
+
+whiptail_bond_nics_mtu() {
+
+ # Set the MTU on the monitor interface
+ MTU=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the MTU for the monitor NICs" 10 75 1500 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_cancel() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Cancelling Setup. No changes have been made." 8 75
+ if [ -d "/root/installtmp" ]; then
+ echo "/root/installtmp exists" >> $SETUPLOG 2>&1
+ install_cleanup
+ echo "/root/installtmp removed" >> $SETUPLOG 2>&1
+ fi
+ exit
+
+}
+
+whiptail_check_exitstatus() {
+
+ if [ $1 == '1' ]; then
+ echo "They hit cancel"
+ whiptail_cancel
+ fi
+
+}
+
+whiptail_create_admin_user() {
+
+ ADMINUSER=$(whiptail --title "Security Onion Install" --inputbox \
+ "Please enter a username for your new admin user" 10 60 3>&1 1>&2 2>&3)
+
+}
+
+whiptail_create_admin_user_password1() {
+
+ ADMINPASS1=$(whiptail --title "Security Onion Install" --passwordbox \
+ "Enter a password for $ADMINUSER" 10 60 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
+whiptail_create_admin_user_password2() {
+
+ ADMINPASS2=$(whiptail --title "Security Onion Install" --passwordbox \
+ "Re-enter a password for $ADMINUSER" 10 60 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_create_socore_user() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Set a password for the socore user. This account is used for adding sensors remotely." 8 75
+
+}
+
+whiptail_create_socore_user_password1() {
+
+ COREPASS1=$(whiptail --title "Security Onion Install" --passwordbox \
+ "Enter a password for user socore" 10 75 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_create_socore_user_password2() {
+
+ COREPASS2=$(whiptail --title "Security Onion Install" --passwordbox \
+ "Re-enter a password for user socore" 10 75 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_cur_close_days() {
+
+ CURCLOSEDAYS=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Please specify the threshold (in days) at which Elasticsearch indices will be closed" 10 75 $CURCLOSEDAYS 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_dhcp_or_static() {
+
+ ADDRESSTYPE=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose how to set up your management interface:" 20 78 4 \
+ "STATIC" "Set a static IPv4 address" ON \
+ "DHCP" "Use DHCP to configure the Management Interface" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
+whiptail_enable_components() {
+ COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
+ "Select Components to install" 20 75 8 \
+ "GRAFANA" "Enable Grafana for system monitoring" ON \
+ "OSQUERY" "Enable Fleet with osquery" ON \
+ "WAZUH" "Enable Wazuh" ON \
+ "THEHIVE" "Enable TheHive" ON \
+ "PLAYBOOK" "Enable Playbook" ON 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_eval_adv() {
+ EVALADVANCED=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose your eval install:" 20 75 4 \
+ "BASIC" "Install basic components for evaluation" ON \
+ "ADVANCED" "Choose additional components to be installed" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
+whiptail_eval_adv_warning() {
+ whiptail --title "Security Onion Setup" --msgbox "Please keep in mind the more services that you enable the more RAM that is required." 8 75
+}
+
+whiptail_helix_apikey() {
+ HELIXAPIKEY=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your Helix API Key: \n \nThis can be set later using so-helix-apikey" 10 75 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus
+
+}
+
+whiptail_homenet_master() {
+
+ # Ask for the HOME_NET on the master
+ HNMASTER=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your HOME_NET separated by ," 10 75 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_homenet_sensor() {
+
+ # Ask to inherit from master
+ whiptail --title "Security Onion Setup" --yesno "Do you want to inherit the HOME_NET from the Master?" 8 75
+
+ local exitstatus=$?
+ if [ $exitstatus == 0 ]; then
+ HNSENSOR=inherit
+ else
+ HNSENSOR=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your HOME_NET separated by ," 10 75 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ fi
+
+}
+
+whiptail_install_type() {
+
+ # What kind of install are we doing?
+ INSTALLTYPE=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose Install Type:" 20 75 13 \
+ "SENSORONLY" "Create a forward only sensor" ON \
+ "STORAGENODE" "Add a Storage Hot Node with parsing" OFF \
+ "MASTERONLY" "Start a new grid" OFF \
+ "EVALMODE" "Evaluate all the things" OFF \
+ "HELIXSENSOR" "Connect this sensor to FireEye Helix" OFF \
+ "PARSINGNODE" "TODO Add a dedicated Parsing Node" OFF \
+ "HOTNODE" "TODO Add Hot Node (Storage Node without Parsing)" OFF \
+ "WARMNODE" "TODO Add Warm Node to existing Hot or Storage node" OFF \
+ "WAZUH" "TODO Stand Alone Wazuh Node" OFF \
+ "STRELKA" "TODO Stand Alone Strelka Node" OFF \
+ "FLEET" "TODO Stand Alone Fleet OSQuery Node" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_log_size_limit() {
+
+ LOG_SIZE_LIMIT=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
+ By default, this is set to 85% of the disk space allotted for /nsm." 10 75 $LOG_SIZE_LIMIT 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_management_interface_dns() {
+
+ MDNS=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your DNS server using space between multiple" 10 60 8.8.8.8 8.8.4.4 3>&1 1>&2 2>&3)
+
+}
+
+whiptail_management_interface_dns_search() {
+
+ MSEARCH=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your DNS search domain" 10 60 searchdomain.local 3>&1 1>&2 2>&3)
+
+}
+
+whiptail_management_interface_gateway() {
+
+ MGATEWAY=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your gateway" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
+
+}
+
+whiptail_management_interface_ip() {
+
+ MIP=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your IP address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
+
+}
+
+whiptail_management_interface_mask() {
+
+ MMASK=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the bit mask for your subnet" 10 60 24 3>&1 1>&2 2>&3)
+
+}
+
+whiptail_management_nic() {
+
+ MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 75 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ while [ -z "$MNIC" ]
+ do
+ MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 75 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ done
+
+}
+
+whiptail_nids() {
+
+ NIDS=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose which IDS to run:" 20 75 4 \
+ "Suricata" "Suricata 4.X" ON \
+ "Snort" "Snort 3.0 Beta" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_oinkcode() {
+
+ OINKCODE=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your oinkcode" 10 75 XXXXXXX 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_make_changes() {
+
+ whiptail --title "Security Onion Setup" --yesno "We are going to set this machine up as a $INSTALLTYPE. Please hit YES to make changes or NO to cancel." 8 75
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_management_server() {
+
+ MSRV=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter your Master Server HOSTNAME. It is CASE SENSITIVE!" 10 75 XXXX 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ # See if it resolves. Otherwise prompt to add to host file
+ TESTHOST=$(host $MSRV)
+
+ if [[ $TESTHOST = *"not found"* ]] || [[ $TESTHOST = *"connection timed out"* ]]; then
+ add_master_hostfile
+ fi
+
+}
+
+# Ask if you want to do advanced setup of the Master
+whiptail_master_adv() {
+
+ MASTERADV=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose what type of master install:" 20 75 4 \
+ "BASIC" "Install master with recommended settings" ON \
+ "ADVANCED" "Do additional configuration to the master" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+# Ask which additional components to install
+whiptail_master_adv_service_brologs() {
+
+ BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 75 12 \
+ "conn" "Connection Logging" ON \
+ "dce_rpc" "RPC Logs" ON \
+ "dhcp" "DHCP Logs" ON \
+ "dhcpv6" "DHCP IPv6 Logs" ON \
+ "dnp3" "DNP3 Logs" ON \
+ "dns" "DNS Logs" ON \
+ "dpd" "DPD Logs" ON \
+ "files" "Files Logs" ON \
+ "ftp" "FTP Logs" ON \
+ "http" "HTTP Logs" ON \
+ "intel" "Intel Hits Logs" ON \
+ "irc" "IRC Chat Logs" ON \
+ "kerberos" "Kerberos Logs" ON \
+ "modbus" "MODBUS Logs" ON \
+ "mqtt" "MQTT Logs" ON \
+ "notice" "Zeek Notice Logs" ON \
+ "ntlm" "NTLM Logs" ON \
+ "openvpn" "OPENVPN Logs" ON \
+ "pe" "PE Logs" ON \
+ "radius" "Radius Logs" ON \
+ "rfb" "RFB Logs" ON \
+ "rdp" "RDP Logs" ON \
+ "signatures" "Signatures Logs" ON \
+ "sip" "SIP Logs" ON \
+ "smb_files" "SMB Files Logs" ON \
+ "smb_mapping" "SMB Mapping Logs" ON \
+ "smtp" "SMTP Logs" ON \
+ "snmp" "SNMP Logs" ON \
+ "software" "Software Logs" ON \
+ "ssh" "SSH Logs" ON \
+ "ssl" "SSL Logs" ON \
+ "syslog" "Syslog Logs" ON \
+ "telnet" "Telnet Logs" ON \
+ "tunnel" "Tunnel Logs" ON \
+ "weird" "Zeek Weird Logs" ON \
+ "mysql" "MySQL Logs" ON \
+ "socks" "SOCKS Logs" ON \
+ "x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_network_notice() {
+
+ whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Hit YES to continue." 8 75
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_advanced() {
+
+ NODESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
+ "What type of config would you like to use?:" 20 75 4 \
+ "NODEBASIC" "Install Storage Node with recommended settings" ON \
+ "NODEADVANCED" "Advanced Node Setup" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_es_heap() {
+
+ es_heapsize
+ NODE_ES_HEAP_SIZE=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter ES Heap Size: \n \n(Recommended value is pre-populated)" 10 75 $ES_HEAP_SIZE 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_heap() {
+
+ ls_heapsize
+ NODE_LS_HEAP_SIZE=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Heap Size: \n \n(Recommended value is pre-populated)" 10 75 $LS_HEAP_SIZE 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_pipeline_worker() {
+
+ LSPIPELINEWORKERS=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Pipeline Workers: \n \n(Recommended value is pre-populated)" 10 75 $CPUCORES 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_pipline_batchsize() {
+
+ LSPIPELINEBATCH=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Pipeline Batch Size: \n \n(Default value is pre-populated)" 10 75 125 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_input_threads() {
+
+ LSINPUTTHREADS=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Input Threads: \n \n(Default value is pre-populated)" 10 75 1 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_ls_input_batch_count() {
+
+ LSINPUTBATCHCOUNT=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter LogStash Input Batch Count: \n \n(Default value is pre-populated)" 10 75 125 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_passwords_dont_match() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Passwords don't match. Please re-enter." 8 75
+
+}
+
+whiptail_patch_name_new_schedule() {
+
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ while [[ -z "$PATCHSCHEDULENAME" ]]; do
+ whiptail --title "Security Onion Setup" --msgbox "Please enter a name for this OS patch schedule." 8 75
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ done
+
+
+}
+
+whiptail_patch_schedule() {
+
+ # What kind of patch schedule are we doing?
+ PATCHSCHEDULE=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose OS patch schedule. This will NOT update Security Onion related tools such as Zeek, Elasticsearch, Kibana, SaltStack, etc." 15 75 5 \
+ "Automatic" "Updates installed every 8 hours if available" ON \
+ "Manual" "Updates will be installed manually" OFF \
+ "Import Schedule" "Import named schedule on following screen" OFF \
+ "New Schedule" "Configure and name new schedule on next screen" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_patch_schedule_import() {
+
+ unset PATCHSCHEDULENAME
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ while [[ -z "$PATCHSCHEDULENAME" ]]; do
+ whiptail --title "Security Onion Setup" --msgbox "Please enter a name for the OS patch schedule you want to inherit." 8 75
+ PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ done
+
+}
+
+whiptail_patch_schedule_select_days() {
+ # Select the days to patch
+ PATCHSCHEDULEDAYS=($(whiptail --title "Security Onion Setup" --checklist \
+ "Which days do you want to apply OS patches?" 15 75 8 \
+ "Monday" "" OFF \
+ "Tuesday" "" ON \
+ "Wednesday" "" OFF \
+ "Thursday" "" OFF \
+ "Friday" "" OFF \
+ "Saturday" "" OFF \
+ "Sunday" "" OFF 3>&1 1>&2 2>&3 ))
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
+whiptail_patch_schedule_select_hours() {
+ # Select the hours to patch
+ PATCHSCHEDULEHOURS=($(whiptail --title "Security Onion Setup" --checklist \
+ "At which time, UTC, do you want to apply OS patches on the selected days? Hours 12 through 23 can be selected on the next screen." 22 75 13 \
+ "00:00" "" OFF \
+ "01:00" "" OFF \
+ "02:00" "" OFF \
+ "03:00" "" OFF \
+ "04:00" "" OFF \
+ "05:00" "" OFF \
+ "06:00" "" OFF \
+ "07:00" "" OFF \
+ "08:00" "" OFF \
+ "09:00" "" OFF \
+ "10:00" "" OFF \
+ "11:00" "" OFF 3>&1 1>&2 2>&3 ))
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ # Select the hours to patch
+ PATCHSCHEDULEHOURS+=($(whiptail --title "Security Onion Setup" --checklist \
+ "At which time, UTC, do you want to apply OS patches on the selected days?" 22 75 13 \
+ "12:00" "" OFF \
+ "13:00" "" OFF \
+ "14:00" "" OFF \
+ "15:00" "" ON \
+ "16:00" "" OFF \
+ "17:00" "" OFF \
+ "18:00" "" OFF \
+ "19:00" "" OFF \
+ "20:00" "" OFF \
+ "21:00" "" OFF \
+ "22:00" "" OFF \
+ "23:00" "" OFF 3>&1 1>&2 2>&3 ))
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+}
+
+whiptail_rule_setup() {
+
+ # Get pulled pork info
+ RULESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Which IDS ruleset would you like to use?\n\nThis master server is responsible for downloading the IDS ruleset from the Internet.\n\nSensors then pull a copy of this ruleset from the master server.\n\nIf you select a commercial ruleset, it is your responsibility to purchase enough licenses for all of your sensors in compliance with your vendor's policies." 20 75 4 \
+ "ETOPEN" "Emerging Threats Open" ON \
+ "ETPRO" "Emerging Threats PRO" OFF \
+ "TALOSET" "Snort Subscriber (Talos) and ET NoGPL rulesets" OFF \
+ "TALOS" "Snort Subscriber (Talos) ruleset and set a policy" OFF \
+ 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_sensor_config() {
+
+ NSMSETUP=$(whiptail --title "Security Onion Setup" --radiolist \
+ "What type of configuration would you like to use?:" 20 75 4 \
+ "BASIC" "Install NSM components with recommended settings" ON \
+ "ADVANCED" "Configure each component individually" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_set_hostname() {
+
+ HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the Hostname you would like to set." 10 75 $HOSTNAME 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+ while [[ "$HOSTNAME" == 'localhost' ]] ; do
+ whiptail --title "Security Onion Setup" --msgbox "Please choose a hostname that isn't localhost." 8 75
+ HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Enter the Hostname you would like to set." 10 75 $HOSTNAME 3>&1 1>&2 2>&3)
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+ done
+
+}
+
+whiptail_setup_complete() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Finished installing this as an $INSTALLTYPE. Press Enter to reboot." 8 75
+ install_cleanup
+
+}
+
+whiptail_setup_failed() {
+
+ whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $SETUPLOG for details. Press Enter to reboot." 8 75
+ install_cleanup
+
+}
+
+whiptail_shard_count() {
+
+ SHARDCOUNT=$(whiptail --title "Security Onion Setup" --inputbox \
+ "\nEnter ES Shard Count: \n \n(Default value is pre-populated)" 10 75 125 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_suricata_pins() {
+
+ FILTEREDCORES=$(echo ${LISTCORES[@]} ${BROPINS[@]} | tr -d '"' | tr ' ' '\n' | sort | uniq -u | awk '{print $1 " \"" "core" "\""}')
+ SURIPINS=$(whiptail --noitem --title "Pin Suricata CPUS" --checklist "Please Select $LBPROCS cores to pin Suricata to:" 20 75 12 ${FILTEREDCORES[@]} 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_master_updates() {
+
+ MASTERUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
+ "How would you like to download updates for your grid?:" 20 75 4 \
+ "MASTER" "Master node is proxy for OS/Docker updates." ON \
+ "OPEN" "Each node connect to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_node_updates() {
+
+ NODEUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
+ "How would you like to download updates for this node?:" 20 75 4 \
+ "MASTER" "Download OS/Docker updates from the Master." ON \
+ "OPEN" "Download updates directly from the Internet" OFF 3>&1 1>&2 2>&3 )
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+whiptail_you_sure() {
+
+ echo "whiptail_you_sure called" >> $SETUPLOG 2>&1
+ whiptail --title "Security Onion Setup" --yesno "Are you sure you want to install Security Onion over the internet?" 8 75
+
+ local exitstatus=$?
+ echo "whiptail_you_sure returning $exitstatus" >> $SETUPLOG 2>&1
+ return $exitstatus
+
+}
diff --git a/so-setup-network.sh b/so-setup-network.sh
index aea465f0d..d12ad6181 100644
--- a/so-setup-network.sh
+++ b/so-setup-network.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-# Copyright 2014,2015,2016,2017,2018, 2019 Security Onion Solutions, LLC
+# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -15,2105 +15,4 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-# Global Variable Section
-HOSTNAME=$(cat /etc/hostname)
-TOTAL_MEM=`grep MemTotal /proc/meminfo | awk '{print $2}' | sed -r 's/.{3}$//'`
-NICS=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
-CPUCORES=$(cat /proc/cpuinfo | grep processor | wc -l)
-LISTCORES=$(cat /proc/cpuinfo | grep processor | awk '{print $3 " \"" "core" "\""}')
-RANDOMUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1)
-NODE_ES_PORT="9200"
-SETUPLOG="/root/sosetup.log"
-
-# Reset the Install Log
-date -u >$SETUPLOG 2>&1
-
-# End Global Variable Section
-
-# Functions
-
-accept_salt_key_local() {
- echo "Accept the key locally on the master" >> $SETUPLOG 2>&1
- # Accept the key locally on the master
- salt-key -ya $HOSTNAME
-
-}
-
-accept_salt_key_remote() {
- echo "Accept the key remotely on the master" >> $SETUPLOG 2>&1
- # Delete the key just in case.
- ssh -i /root/.ssh/so.key socore@$MSRV sudo salt-key -d $HOSTNAME -y
- salt-call state.apply ca
- ssh -i /root/.ssh/so.key socore@$MSRV sudo salt-key -a $HOSTNAME -y
-
-}
-
-add_master_hostfile() {
- echo "Checking if I can resolve master. If not add to hosts file" >> $SETUPLOG 2>&1
- # Pop up an input to get the IP address
- local MSRVIP=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter your Master Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
-
- # Add the master to the host file if it doesn't resolve
- if ! grep -q $MSRVIP /etc/hosts; then
- echo "$MSRVIP $MSRV" >> /etc/hosts
- fi
-}
-
-add_socore_user_master() {
-
- echo "Add socore on the master" >>~/sosetup.log 2>&1
- # Add user "socore" to the master. This will be for things like accepting keys.
- if [ $OS == 'centos' ]; then
- local ADDUSER=adduser
- else
- local ADDUSER=useradd
- fi
- groupadd --gid 939 socore
- $ADDUSER --uid 939 --gid 939 --home-dir /opt/so socore
- # Set the password for socore that we got during setup
- echo socore:$COREPASS1 | chpasswd --crypt-method=SHA512
-
-}
-
-#add_socore_user_master() {
-# echo "Add socore on the master" >> $SETUPLOG 2>&1
-# if [ $OS == 'centos' ]; then
-# local ADDUSER=adduser
-# else
-# local ADDUSER=useradd
-# fi
-# # Add user "socore" to the master. This will be for things like accepting keys.
-# groupadd --gid 939 socore
-# $ADDUSER --uid 939 --gid 939 --home-dir /opt/so socore
-# # Prompt the user to set a password for the user
-# passwd socore
-
-#}
-
-add_socore_user_notmaster() {
- echo "Add socore user on non master" >> $SETUPLOG 2>&1
- # Add socore user to the non master system. Probably not a bad idea to make system user
- groupadd --gid 939 socore
- $ADDUSER --uid 939 --gid 939 --home-dir /opt/so --no-create-home socore
-
-}
-
-# Create an auth pillar so that passwords survive re-install
-auth_pillar(){
-
- if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
- echo "Creating Auth Pillar" >> $SETUPLOG 2>&1
- mkdir -p /opt/so/saltstack/pillar
- echo "auth:" >> /opt/so/saltstack/pillar/auth.sls
- echo " mysql: $MYSQLPASS" >> /opt/so/saltstack/pillar/auth.sls
- echo " fleet: $FLEETPASS" >> /opt/so/saltstack/pillar/auth.sls
- fi
-
-}
-
-# Enable Bro Logs
-bro_logs_enabled() {
- echo "Enabling Bro Logs" >> $SETUPLOG 2>&1
-
- echo "brologs:" > pillar/brologs.sls
- echo " enabled:" >> pillar/brologs.sls
-
- if [ $MASTERADV == 'ADVANCED' ]; then
- for BLOG in ${BLOGS[@]}; do
- echo " - $BLOG" | tr -d '"' >> pillar/brologs.sls
- done
- else
- echo " - conn" >> pillar/brologs.sls
- echo " - dce_rpc" >> pillar/brologs.sls
- echo " - dhcp" >> pillar/brologs.sls
- echo " - dhcpv6" >> pillar/brologs.sls
- echo " - dnp3" >> pillar/brologs.sls
- echo " - dns" >> pillar/brologs.sls
- echo " - dpd" >> pillar/brologs.sls
- echo " - files" >> pillar/brologs.sls
- echo " - ftp" >> pillar/brologs.sls
- echo " - http" >> pillar/brologs.sls
- echo " - intel" >> pillar/brologs.sls
- echo " - irc" >> pillar/brologs.sls
- echo " - kerberos" >> pillar/brologs.sls
- echo " - modbus" >> pillar/brologs.sls
- echo " - mqtt" >> pillar/brologs.sls
- echo " - notice" >> pillar/brologs.sls
- echo " - ntlm" >> pillar/brologs.sls
- echo " - openvpn" >> pillar/brologs.sls
- echo " - pe" >> pillar/brologs.sls
- echo " - radius" >> pillar/brologs.sls
- echo " - rfb" >> pillar/brologs.sls
- echo " - rdp" >> pillar/brologs.sls
- echo " - signatures" >> pillar/brologs.sls
- echo " - sip" >> pillar/brologs.sls
- echo " - smb_files" >> pillar/brologs.sls
- echo " - smb_mapping" >> pillar/brologs.sls
- echo " - smtp" >> pillar/brologs.sls
- echo " - snmp" >> pillar/brologs.sls
- echo " - software" >> pillar/brologs.sls
- echo " - ssh" >> pillar/brologs.sls
- echo " - ssl" >> pillar/brologs.sls
- echo " - syslog" >> pillar/brologs.sls
- echo " - telnet" >> pillar/brologs.sls
- echo " - tunnel" >> pillar/brologs.sls
- echo " - weird" >> pillar/brologs.sls
- echo " - mysql" >> pillar/brologs.sls
- echo " - socks" >> pillar/brologs.sls
- echo " - x509" >> pillar/brologs.sls
- fi
-}
-
-calculate_useable_cores() {
-
- # Calculate reasonable core usage
- local CORES4BRO=$(( $CPUCORES/2 - 1 ))
- LBPROCSROUND=$(printf "%.0f\n" $CORES4BRO)
- # We don't want it to be 0
- if [ "$LBPROCSROUND" -lt 1 ]; then
- LBPROCS=1
- else
- LBPROCS=$LBPROCSROUND
- fi
-
-}
-
-checkin_at_boot() {
- echo "Enabling checkin at boot" >> $SETUPLOG 2>&1
- echo "startup_states: highstate" >> /etc/salt/minion
-}
-
-check_hive_init_then_reboot() {
- WAIT_STEP=0
- MAX_WAIT=100
- until [ -f /opt/so/state/thehive.txt ] ; do
- WAIT_STEP=$(( ${WAIT_STEP} + 1 ))
- echo "Waiting on the_hive to init...Attempt #$WAIT_STEP"
- if [ ${WAIT_STEP} -gt ${MAX_WAIT} ]; then
- echo "ERROR: We waited ${MAX_WAIT} seconds but the_hive is not working."
- exit 5
- fi
- sleep 1s;
- done
- docker stop so-thehive
- docker rm so-thehive
- shutdown -r now
-}
-
-check_socore_pass() {
-
- if [ $COREPASS1 == $COREPASS2 ]; then
- SCMATCH=yes
- else
- whiptail_passwords_dont_match
- fi
-
-}
-
-chown_salt_master() {
-
- echo "Chown the salt dirs on the master for socore" >> $SETUPLOG 2>&1
- chown -R socore:socore /opt/so
-
-}
-
-clear_master() {
- # Clear out the old master public key in case this is a re-install.
- # This only happens if you re-install the master.
- if [ -f /etc/salt/pki/minion/minion_master.pub ]; then
- echo "Clearing old master key" >> $SETUPLOG 2>&1
- rm /etc/salt/pki/minion/minion_master.pub
- service salt-minion restart
- fi
-
-}
-
-configure_minion() {
-
- # You have to pass the TYPE to this function so it knows if its a master or not
- local TYPE=$1
- echo "Configuring minion type as $TYPE" >> $SETUPLOG 2>&1
- touch /etc/salt/grains
- echo "role: so-$TYPE" > /etc/salt/grains
- if [ $TYPE == 'master' ] || [ $TYPE == 'eval' ]; then
- echo "master: $HOSTNAME" > /etc/salt/minion
- echo "id: $HOSTNAME" >> /etc/salt/minion
- echo "mysql.host: '$MAINIP'" >> /etc/salt/minion
- echo "mysql.port: 3306" >> /etc/salt/minion
- echo "mysql.user: 'root'" >> /etc/salt/minion
- if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
- echo "mysql.pass: '$MYSQLPASS'" >> /etc/salt/minion
- else
- OLDPASS=$(cat /opt/so/saltstack/pillar/auth.sls | grep mysql | awk {'print $2'})
- echo "mysql.pass: '$OLDPASS'" >> /etc/salt/minion
- fi
- else
- echo "master: $MSRV" > /etc/salt/minion
- echo "id: $HOSTNAME" >> /etc/salt/minion
-
- fi
-
- service salt-minion restart
-
-}
-
-copy_master_config() {
-
- # Copy the master config template to the proper directory
- cp files/master /etc/salt/master
- # Restart the service so it picks up the changes -TODO Enable service on CentOS
- service salt-master restart
-
-}
-
-copy_minion_pillar() {
-
- # Pass the type so it knows where to copy the pillar
- local TYPE=$1
-
- # Copy over the pillar
- echo "Copying the pillar over" >> $SETUPLOG 2>&1
- scp -v -i /root/.ssh/so.key $TMP/$HOSTNAME.sls socore@$MSRV:/opt/so/saltstack/pillar/$TYPE/$HOSTNAME.sls
-
- }
-
-copy_ssh_key() {
-
- # Generate SSH key
- mkdir -p /root/.ssh
- cat /dev/zero | ssh-keygen -f /root/.ssh/so.key -t rsa -q -N ""
- chown -R $SUDO_USER:$SUDO_USER /root/.ssh
- #Copy the key over to the master
- ssh-copy-id -f -i /root/.ssh/so.key socore@$MSRV
-
-}
-
-network_setup() {
- echo "Setting up Bond" >> $SETUPLOG 2>&1
-
- # Set the MTU
- if [ "$NSMSETUP" != 'ADVANCED' ]; then
- MTU=1500
- fi
-
- # Create the bond interface
- nmcli con add ifname bond0 con-name "bond0" type bond mode 0 -- \
- ipv4.method disabled \
- ipv6.method link-local \
- ethernet.mtu $MTU \
- connection.autoconnect "yes" >> $SETUPLOG 2>&1
-
- for BNIC in ${BNICS[@]}; do
- # Strip the quotes from the NIC names
- BONDNIC="$(echo -e "${BNIC}" | tr -d '"')"
- # Turn off various offloading settings for the interface
- for i in rx tx sg tso ufo gso gro lro; do
- ethtool -K $BONDNIC $i off >> $SETUPLOG 2>&1
- done
- # Create the slave interface and assign it to the bond
- nmcli con add type ethernet ifname $BONDNIC con-name "bond0-slave-$BONDNIC" master bond0 -- \
- ethernet.mtu $MTU \
- connection.autoconnect "yes" >> $SETUPLOG 2>&1
- # Bring the slave interface up
- nmcli con up bond0-slave-$BONDNIC >> $SETUPLOG 2>&1
- done
- # Replace the variable string in the network script
- sed -i "s/\$MAININT/${MAININT}/g" ./install_scripts/disable-checksum-offload.sh >> $SETUPLOG 2>&1
- # Copy the checksum offload script to prevent issues with packet capture
- cp ./install_scripts/disable-checksum-offload.sh /etc/NetworkManager/dispatcher.d/disable-checksum-offload.sh >> $SETUPLOG 2>&1
-}
-
-detect_os() {
-
- # Detect Base OS
- echo "Detecting Base OS" >> $SETUPLOG 2>&1
- if [ -f /etc/redhat-release ]; then
- OS=centos
- yum -y install bind-utils
- elif [ -f /etc/os-release ]; then
- OS=ubuntu
- apt install -y network-manager
- /bin/systemctl enable network-manager
- /bin/systemctl start network-manager
- else
- echo "We were unable to determine if you are using a supported OS." >> $SETUPLOG 2>&1
- exit
- fi
-
-}
-
-docker_install() {
-
- if [ $OS == 'centos' ]; then
- yum clean expire-cache
- yum -y install yum-utils device-mapper-persistent-data lvm2 openssl
- yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
- yum -y update
- yum -y install docker-ce docker-python python-docker
- if [ $INSTALLTYPE != 'EVALMODE' ]; then
- docker_registry
- fi
- echo "Restarting Docker" >> $SETUPLOG 2>&1
- systemctl restart docker
- systemctl enable docker
-
- else
- if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
- apt-get update >> $SETUPLOG 2>&1
- apt-get -y install docker-ce >> $SETUPLOG 2>&1
- if [ $INSTALLTYPE != 'EVALMODE' ]; then
- docker_registry >> $SETUPLOG 2>&1
- fi
- echo "Restarting Docker" >> $SETUPLOG 2>&1
- systemctl restart docker >> $SETUPLOG 2>&1
- else
- apt-key add $TMP/gpg/docker.pub >> $SETUPLOG 2>&1
- add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" >> $SETUPLOG 2>&1
- apt-get update >> $SETUPLOG 2>&1
- apt-get -y install docker-ce >> $SETUPLOG 2>&1
- docker_registry >> $SETUPLOG 2>&1
- echo "Restarting Docker" >> $SETUPLOG 2>&1
- systemctl restart docker >> $SETUPLOG 2>&1
- fi
- fi
-
-}
-
-docker_registry() {
-
- echo "Setting up Docker Registry" >> $SETUPLOG 2>&1
- mkdir -p /etc/docker >> $SETUPLOG 2>&1
- # Make the host use the master docker registry
- echo "{" > /etc/docker/daemon.json
- echo " \"registry-mirrors\": [\"https://$MSRV:5000\"]" >> /etc/docker/daemon.json
- echo "}" >> /etc/docker/daemon.json
- echo "Docker Registry Setup - Complete" >> $SETUPLOG 2>&1
-
-}
-
-es_heapsize() {
-
- # Determine ES Heap Size
- if [ $TOTAL_MEM -lt 8000 ] ; then
- ES_HEAP_SIZE="600m"
- elif [ $TOTAL_MEM -ge 100000 ]; then
- # Set a max of 25GB for heap size
- # https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
- ES_HEAP_SIZE="25000m"
- else
- # Set heap size to 25% of available memory
- ES_HEAP_SIZE=$(($TOTAL_MEM / 4))"m"
- fi
-
-}
-
-eval_mode_hostsfile() {
-
- echo "127.0.0.1 $HOSTNAME" >> /etc/hosts
-
-}
-
-filter_nics() {
-
- # Filter the NICs that we don't want to see in setup
- FNICS=$(ip link | grep -vw $MNIC | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
-
-}
-
-generate_passwords(){
- # Generate Random Passwords for Things
- MYSQLPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
- FLEETPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
- HIVEKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
- SENSORONIKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
-}
-
-get_filesystem_nsm(){
- FSNSM=$(df /nsm | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
-}
-
-get_log_size_limit() {
-
- DISK_DIR="/"
- if [ -d /nsm ]; then
- DISK_DIR="/nsm"
- fi
- DISK_SIZE_K=`df $DISK_DIR |grep -v "^Filesystem" | awk '{print $2}'`
- PERCENTAGE=85
- DISK_SIZE=DISK_SIZE_K*1000
- PERCENTAGE_DISK_SPACE=`echo $(($DISK_SIZE*$PERCENTAGE/100))`
- LOG_SIZE_LIMIT=$(($PERCENTAGE_DISK_SPACE/1000000000))
-
-}
-
-get_filesystem_root(){
- FSROOT=$(df / | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
-}
-
-get_main_ip() {
-
- # Get the main IP address the box is using
- MAINIP=$(ip route get 1 | awk '{print $NF;exit}')
- MAININT=$(ip route get 1 | awk '{print $5;exit}')
-
-}
-
-got_root() {
-
- # Make sure you are root
- if [ "$(id -u)" -ne 0 ]; then
- echo "This script must be run using sudo!"
- exit 1
- fi
-
-}
-
-install_cleanup() {
-
- # Clean up after ourselves
- rm -rf /root/installtmp
-
-}
-
-install_prep() {
-
- # Create a tmp space that isn't in /tmp
- mkdir /root/installtmp
- TMP=/root/installtmp
-
-}
-
-install_master() {
-
- # Install the salt master package
- if [ $OS == 'centos' ]; then
- yum -y install wget salt-common salt-master >> $SETUPLOG 2>&1
-
- # Create a place for the keys for Ubuntu minions
- mkdir -p /opt/so/gpg
- wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub
- wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
- wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
-
- else
- apt-get install -y salt-common=2018.3.4+ds-1 salt-master=2018.3.4+ds-1 salt-minion=2018.3.4+ds-1 python-m2crypto
- apt-mark hold salt-common salt-master salt-minion
- apt-get install -y python-m2crypto
- fi
-
- copy_master_config
-
-}
-
-ls_heapsize() {
-
- # Determine LS Heap Size
- if [ $TOTAL_MEM -ge 32000 ] ; then
- LS_HEAP_SIZE="1000m"
- else
- # If minimal RAM, then set minimal heap
- LS_HEAP_SIZE="500m"
- fi
-
-}
-
-master_pillar() {
-
- # Create the master pillar
- touch /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo "master:" > /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " mainip: $MAINIP" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " mainint: $MAININT" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " esheap: $ES_HEAP_SIZE" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " esclustername: {{ grains.host }}" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- if [ $INSTALLTYPE == 'EVALMODE' ]; then
- echo " freq: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " domainstats: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " ls_pipeline_batch_size: 125" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " ls_input_threads: 1" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " ls_batch_count: 125" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " mtu: 1500" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
-
- else
- echo " freq: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " domainstats: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- fi
- echo " lsheap: $LS_HEAP_SIZE" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " lsaccessip: 127.0.0.1" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " elastalert: 1" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " ls_pipeline_workers: $CPUCORES" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " nids_rules: $RULESETUP" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " oinkcode: $OINKCODE" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- #echo " access_key: $ACCESS_KEY" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- #echo " access_secret: $ACCESS_SECRET" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " es_port: $NODE_ES_PORT" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " log_size_limit: $LOG_SIZE_LIMIT" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " cur_close_days: $CURCLOSEDAYS" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- #echo " mysqlpass: $MYSQLPASS" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- #echo " fleetpass: $FLEETPASS" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " grafana: $GRAFANA" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " osquery: $OSQUERY" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " wazuh: $WAZUH" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " thehive: $THEHIVE" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- echo " playbook: $PLAYBOOK" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
- }
-
-master_static() {
-
- # Create a static file for global values
- touch /opt/so/saltstack/pillar/static.sls
-
- echo "static:" > /opt/so/saltstack/pillar/static.sls
- echo " hnmaster: $HNMASTER" >> /opt/so/saltstack/pillar/static.sls
- echo " ntpserver: $NTPSERVER" >> /opt/so/saltstack/pillar/static.sls
- echo " proxy: $PROXY" >> /opt/so/saltstack/pillar/static.sls
- echo " broversion: $BROVERSION" >> /opt/so/saltstack/pillar/static.sls
- echo " ids: $NIDS" >> /opt/so/saltstack/pillar/static.sls
- echo " masterip: $MAINIP" >> /opt/so/saltstack/pillar/static.sls
- echo " hiveuser: hiveadmin" >> /opt/so/saltstack/pillar/static.sls
- echo " hivepassword: hivechangeme" >> /opt/so/saltstack/pillar/static.sls
- echo " hivekey: $HIVEKEY" >> /opt/so/saltstack/pillar/static.sls
- echo " fleetsetup: 0" >> /opt/so/saltstack/pillar/static.sls
- echo " sensoronikey: $SENSORONIKEY" >> /opt/so/saltstack/pillar/static.sls
- if [[ $MASTERUPDATES == 'MASTER' ]]; then
- echo " masterupdate: 1" >> /opt/so/saltstack/pillar/static.sls
- else
- echo " masterupdate: 0" >> /opt/so/saltstack/pillar/static.sls
- fi
-}
-
-minio_generate_keys() {
-
- local charSet="[:graph:]"
-
- ACCESS_KEY=$(cat /dev/urandom | tr -cd "$charSet" | tr -d \' | tr -d \" | head -c 20)
- ACCESS_SECRET=$(cat /dev/urandom | tr -cd "$charSet" | tr -d \' | tr -d \" | head -c 40)
-
-}
-
-node_pillar() {
-
- # Create the node pillar
- touch $TMP/$HOSTNAME.sls
- echo "node:" > $TMP/$HOSTNAME.sls
- echo " mainip: $MAINIP" >> $TMP/$HOSTNAME.sls
- echo " mainint: $MAININT" >> $TMP/$HOSTNAME.sls
- echo " esheap: $NODE_ES_HEAP_SIZE" >> $TMP/$HOSTNAME.sls
- echo " esclustername: {{ grains.host }}" >> $TMP/$HOSTNAME.sls
- echo " lsheap: $NODE_LS_HEAP_SIZE" >> $TMP/$HOSTNAME.sls
- echo " ls_pipeline_workers: $LSPIPELINEWORKERS" >> $TMP/$HOSTNAME.sls
- echo " ls_pipeline_batch_size: $LSPIPELINEBATCH" >> $TMP/$HOSTNAME.sls
- echo " ls_input_threads: $LSINPUTTHREADS" >> $TMP/$HOSTNAME.sls
- echo " ls_batch_count: $LSINPUTBATCHCOUNT" >> $TMP/$HOSTNAME.sls
- echo " es_shard_count: $SHARDCOUNT" >> $TMP/$HOSTNAME.sls
- echo " node_type: $NODETYPE" >> $TMP/$HOSTNAME.sls
- echo " es_port: $NODE_ES_PORT" >> $TMP/$HOSTNAME.sls
- echo " log_size_limit: $LOG_SIZE_LIMIT" >> $TMP/$HOSTNAME.sls
- echo " cur_close_days: $CURCLOSEDAYS" >> $TMP/$HOSTNAME.sls
-
-}
-
-process_components() {
- CLEAN=${COMPONENTS//\"}
- GRAFANA=0
- OSQUERY=0
- WAZUH=0
- THEHIVE=0
- PLAYBOOK=0
-
- IFS=$' '
- for item in $(echo "$CLEAN"); do
- let $item=1
- done
- unset IFS
-}
-
-saltify() {
-
- # Install updates and Salt
- if [ $OS == 'centos' ]; then
- ADDUSER=adduser
-
- if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
- yum -y install https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
- cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2018-3.repo
- sed -i 's/latest/2018.3/g' /etc/yum.repos.d/salt-2018-3.repo
- cat > /etc/yum.repos.d/wazuh.repo <<\EOF
-[wazuh_repo]
-gpgcheck=1
-gpgkey=https://packages.wazuh.com/key/GPG-KEY-WAZUH
-enabled=1
-name=Wazuh repository
-baseurl=https://packages.wazuh.com/3.x/yum/
-protect=1
-EOF
-
- else
-
- if [ $MASTERUPDATES == 'MASTER' ]; then
-
- # Create the GPG Public Key for the Salt Repo
- echo "-----BEGIN PGP PUBLIC KEY BLOCK-----" > /etc/pki/rpm-gpg/saltstack-signing-key
- echo "Version: GnuPG v2.0.22 (GNU/Linux)" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "mQENBFOpvpgBCADkP656H41i8fpplEEB8IeLhugyC2rTEwwSclb8tQNYtUiGdna9" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "m38kb0OS2DDrEdtdQb2hWCnswxaAkUunb2qq18vd3dBvlnI+C4/xu5ksZZkRj+fW" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "tArNR18V+2jkwcG26m8AxIrT+m4M6/bgnSfHTBtT5adNfVcTHqiT1JtCbQcXmwVw" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "WbqS6v/LhcsBE//SHne4uBCK/GHxZHhQ5jz5h+3vWeV4gvxS3Xu6v1IlIpLDwUts" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "kT1DumfynYnnZmWTGc6SYyIFXTPJLtnoWDb9OBdWgZxXfHEcBsKGha+bXO+m2tHA" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "gNneN9i5f8oNxo5njrL8jkCckOpNpng18BKXABEBAAG0MlNhbHRTdGFjayBQYWNr" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "YWdpbmcgVGVhbSA8cGFja2FnaW5nQHNhbHRzdGFjay5jb20+iQE4BBMBAgAiBQJT" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "qb6YAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRAOCKFJ3le/vhkqB/0Q" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "WzELZf4d87WApzolLG+zpsJKtt/ueXL1W1KA7JILhXB1uyvVORt8uA9FjmE083o1" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "yE66wCya7V8hjNn2lkLXboOUd1UTErlRg1GYbIt++VPscTxHxwpjDGxDB1/fiX2o" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "nK5SEpuj4IeIPJVE/uLNAwZyfX8DArLVJ5h8lknwiHlQLGlnOu9ulEAejwAKt9CU" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "4oYTszYM4xrbtjB/fR+mPnYh2fBoQO4d/NQiejIEyd9IEEMd/03AJQBuMux62tjA" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "/NwvQ9eqNgLw9NisFNHRWtP4jhAOsshv1WW+zPzu3ozoO+lLHixUIz7fqRk38q8Q" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "9oNR31KvrkSNrFbA3D89uQENBFOpvpgBCADJ79iH10AfAfpTBEQwa6vzUI3Eltqb" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "9aZ0xbZV8V/8pnuU7rqM7Z+nJgldibFk4gFG2bHCG1C5aEH/FmcOMvTKDhJSFQUx" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "uhgxttMArXm2c22OSy1hpsnVG68G32Nag/QFEJ++3hNnbyGZpHnPiYgej3FrerQJ" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "zv456wIsxRDMvJ1NZQB3twoCqwapC6FJE2hukSdWB5yCYpWlZJXBKzlYz/gwD/Fr" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "GL578WrLhKw3UvnJmlpqQaDKwmV2s7MsoZogC6wkHE92kGPG2GmoRD3ALjmCvN1E" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "PsIsQGnwpcXsRpYVCoW7e2nW4wUf7IkFZ94yOCmUq6WreWI4NggRcFC5ABEBAAGJ" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "AR8EGAECAAkFAlOpvpgCGwwACgkQDgihSd5Xv74/NggA08kEdBkiWWwJZUZEy7cK" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "WWcgjnRuOHd4rPeT+vQbOWGu6x4bxuVf9aTiYkf7ZjVF2lPn97EXOEGFWPZeZbH4" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "vdRFH9jMtP+rrLt6+3c9j0M8SIJYwBL1+CNpEC/BuHj/Ra/cmnG5ZNhYebm76h5f" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "T9iPW9fFww36FzFka4VPlvA4oB7ebBtquFg3sdQNU/MmTVV4jPFWXxh4oRDDR+8N" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "1bcPnbB11b5ary99F/mqr7RgQ+YFF0uKRE3SKa7a+6cIuHEZ7Za+zhPaQlzAOZlx" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "fuBmScum8uQTrEF5+Um5zkwC7EXTdH1co/+/V/fpOtxIg4XO4kcugZefVm5ERfVS" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "MA==" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "=dtMN" >> /etc/pki/rpm-gpg/saltstack-signing-key
- echo "-----END PGP PUBLIC KEY BLOCK-----" >> /etc/pki/rpm-gpg/saltstack-signing-key
-
- # Add the Wazuh Key
- cat > /etc/pki/rpm-gpg/GPG-KEY-WAZUH <<\EOF
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mQINBFeeyYwBEACyf4VwV8c2++J5BmCl6ofLCtSIW3UoVrF4F+P19k/0ngnSfjWb
-8pSWB11HjZ3Mr4YQeiD7yY06UZkrCXk+KXDlUjMK3VOY7oNPkqzNaP6+8bDwj4UA
-hADMkaXBvWooGizhCoBtDb1bSbHKcAnQ3PTdiuaqF5bcyKk8hv939CHulL2xH+BP
-mmTBi+PM83pwvR+VRTOT7QSzf29lW1jD79v4rtXHJs4KCz/amT/nUm/tBpv3q0sT
-9M9rH7MTQPdqvzMl122JcZST75GzFJFl0XdSHd5PAh2mV8qYak5NYNnwA41UQVIa
-+xqhSu44liSeZWUfRdhrQ/Nb01KV8lLAs11Sz787xkdF4ad25V/Rtg/s4UXt35K3
-klGOBwDnzPgHK/OK2PescI5Ve1z4x1C2bkGze+gk/3IcfGJwKZDfKzTtqkZ0MgpN
-7RGghjkH4wpFmuswFFZRyV+s7jXYpxAesElDSmPJ0O07O4lQXQMROE+a2OCcm0eF
-3+Cr6qxGtOp1oYMOVH0vOLYTpwOkAM12/qm7/fYuVPBQtVpTojjV5GDl2uGq7p0o
-h9hyWnLeNRbAha0px6rXcF9wLwU5n7mH75mq5clps3sP1q1/VtP/Fr84Lm7OGke4
-9eD+tPNCdRx78RNWzhkdQxHk/b22LCn1v6p1Q0qBco9vw6eawEkz1qwAjQARAQAB
-tDFXYXp1aC5jb20gKFdhenVoIFNpZ25pbmcgS2V5KSA8c3VwcG9ydEB3YXp1aC5j
-b20+iQI9BBMBCAAnBQJXnsmMAhsDBQkFo5qABQsJCAcDBRUKCQgLBRYCAwEAAh4B
-AheAAAoJEJaz7l8pERFFHEsQAIaslejcW2NgjgOZuvn1Bht4JFMbCIPOekg4Z5yF
-binRz0wmA7JNaawDHTBYa6L+A2Xneu/LmuRjFRMesqopUukVeGQgHBXbGMzY46eI
-rqq/xgvgWzHSbWweiOX0nn+exbEAM5IyW+efkWNz0e8xM1LcxdYZxkVOqFqkp3Wv
-J9QUKw6z9ifUOx++G8UO307O3hT2f+x4MUoGZeOF4q1fNy/VyBS2lMg2HF7GWy2y
-kjbSe0p2VOFGEZLuu2f5tpPNth9UJiTliZKmgSk/zbKYmSjiVY2eDqNJ4qjuqes0
-vhpUaBjA+DgkEWUrUVXG5yfQDzTiYIF84LknjSJBYSLZ4ABsMjNO+GApiFPcih+B
-Xc9Kx7E9RNsNTDqvx40y+xmxDOzVIssXeKqwO8r5IdG3K7dkt2Vkc/7oHOpcKwE5
-8uASMPiqqMo+t1RVa6Spckp3Zz8REILbotnnVwDIwo2HmgASirMGUcttEJzubaIa
-Mv43GKs8RUH9s5NenC02lfZG7D8WQCz5ZH7yEWrt5bCaQRNDXjhsYE17SZ/ToHi3
-OpWu050ECWOHdxlXNG3dOWIdFDdBJM7UfUNSSOe2Y5RLsWfwvMFGbfpdlgJcMSDV
-X+ienkrtXhBteTu0dwPu6HZTFOjSftvtAo0VIqGQrKMvKelkkdNGdDFLQw2mUDcw
-EQj6uQINBFeeyYwBEADD1Y3zW5OrnYZ6ghTd5PXDAMB8Z1ienmnb2IUzLM+i0yE2
-TpKSP/XYCTBhFa390rYgFO2lbLDVsiz7Txd94nHrdWXGEQfwrbxsvdlLLWk7iN8l
-Fb4B60OfRi3yoR96a/kIPNa0x26+n79LtDuWZ/DTq5JSHztdd9F1sr3h8i5zYmtv
-luj99ZorpwYejbBVUm0+gP0ioaXM37uO56UFVQk3po9GaS+GtLnlgoE5volgNYyO
-rkeIua4uZVsifREkHCKoLJip6P7S3kTyfrpiSLhouEZ7kV1lbMbFgvHXyjm+/AIx
-HIBy+H+e+HNt5gZzTKUJsuBjx44+4jYsOR67EjOdtPOpgiuJXhedzShEO6rbu/O4
-wM1rX45ZXDYa2FGblHCQ/VaS0ttFtztk91xwlWvjTR8vGvp5tIfCi+1GixPRQpbN
-Y/oq8Kv4A7vB3JlJscJCljvRgaX0gTBzlaF6Gq0FdcWEl5F1zvsWCSc/Fv5WrUPY
-5mG0m69YUTeVO6cZS1aiu9Qh3QAT/7NbUuGXIaAxKnu+kkjLSz+nTTlOyvbG7BVF
-a6sDmv48Wqicebkc/rCtO4g8lO7KoA2xC/K/6PAxDrLkVyw8WPsAendmezNfHU+V
-32pvWoQoQqu8ysoaEYc/j9fN4H3mEBCN3QUJYCugmHP0pu7VtpWwwMUqcGeUVwAR
-AQABiQIlBBgBCAAPBQJXnsmMAhsMBQkFo5qAAAoJEJaz7l8pERFFz8IP/jfBxJSB
-iOw+uML+C4aeYxuHSdxmSsrJclYjkw7Asha/fm4Kkve00YAW8TGxwH2kgS72ooNJ
-1Q7hUxNbVyrJjQDSMkRKwghmrPnUM3UyHmE0dq+G2NhaPdFo8rKifLOPgwaWAfSV
-wgMTK86o0kqRbGpXgVIG5eRwv2FcxM3xGfy7sub07J2VEz7Ba6rYQ3NTbPK42AtV
-+wRJDXcgS7y6ios4XQtSbIB5f6GI56zVlwfRd3hovV9ZAIJQ6DKM31wD6Kt/pRun
-DjwMZu0/82JMoqmxX/00sNdDT1S13guCfl1WhBu7y1ja9MUX5OpUzyEKg5sxme+L
-iY2Rhs6CjmbTm8ER4Uj8ydKyVTy8zbumbB6T8IwCAbEMtPxm6pKh/tgLpoJ+Bj0y
-AsGjmhV7R6PKZSDXg7/qQI98iC6DtWc9ibC/QuHLcvm3hz40mBgXAemPJygpxGst
-mVtU7O3oHw9cIUpkbMuVqSxgPFmSSq5vEYkka1CYeg8bOz6aCTuO5J0GDlLrpjtx
-6lyImbZAF/8zKnW19aq5lshT2qJlTQlZRwwDZX5rONhA6T8IEUnUyD4rAIQFwfJ+
-gsXa4ojD/tA9NLdiNeyEcNfyX3FZwXWCtVLXflzdRN293FKamcdnMjVRjkCnp7iu
-7eO7nMgcRoWddeU+2aJFqCoQtKCp/5EKhFey
-=UIVm
------END PGP PUBLIC KEY BLOCK-----
-EOF
-
- # Proxy is hating on me.. Lets just set it manually
- echo "[salt-latest]" > /etc/yum.repos.d/salt-latest.repo
- echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-latest.repo
- echo "baseurl=https://repo.saltstack.com/yum/redhat/7/\$basearch/latest" >> /etc/yum.repos.d/salt-latest.repo
- echo "failovermethod=priority" >> /etc/yum.repos.d/salt-latest.repo
- echo "enabled=1" >> /etc/yum.repos.d/salt-latest.repo
- echo "gpgcheck=1" >> /etc/yum.repos.d/salt-latest.repo
- echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-latest.repo
-
- # Proxy is hating on me.. Lets just set it manually
- echo "[salt-2018.3]" > /etc/yum.repos.d/salt-2018-3.repo
- echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "baseurl=https://repo.saltstack.com/yum/redhat/7/\$basearch/2018.3" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "failovermethod=priority" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "enabled=1" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "gpgcheck=1" >> /etc/yum.repos.d/salt-2018-3.repo
- echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-2018-3.repo
-
- cat > /etc/yum.repos.d/wazuh.repo <<\EOF
-[wazuh_repo]
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
-enabled=1
-name=Wazuh repository
-baseurl=https://packages.wazuh.com/3.x/yum/
-protect=1
-EOF
- else
- yum -y install https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
- cp /etc/yum.repos.d/salt-latest.repo /etc/yum.repos.d/salt-2018-3.repo
- sed -i 's/latest/2018.3/g' /etc/yum.repos.d/salt-2018-3.repo
-cat > /etc/yum.repos.d/wazuh.repo <<\EOF
-[wazuh_repo]
-gpgcheck=1
-gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
-enabled=1
-name=Wazuh repository
-baseurl=https://packages.wazuh.com/3.x/yum/
-protect=1
-EOF
- fi
- fi
-
- yum clean expire-cache
- yum -y install salt-minion-2018.3.4 yum-utils device-mapper-persistent-data lvm2 openssl
- yum -y update exclude=salt*
- systemctl enable salt-minion
-
- # Nasty hack but required for now
- if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
- yum -y install salt-master-2018.3.4 python-m2crypto salt-minion-2018.3.4 m2crypto
- systemctl enable salt-master
- else
- yum -y install salt-minion-2018.3.4 python-m2m2crypto m2crypto
- fi
- echo "exclude=salt*" >> /etc/yum.conf
-
- else
- ADDUSER=useradd
- DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade
-
- # Add the pre-requisites for installing docker-ce
- apt-get -y install ca-certificates curl software-properties-common apt-transport-https openssl >> $SETUPLOG 2>&1
-
- # Grab the version from the os-release file
- UVER=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
-
- # Nasty hack but required for now
- if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
-
- # Install the repo for salt
- wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
- wget --inet4-only -O - https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2018.3/SALTSTACK-GPG-KEY.pub | apt-key add -
- echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
- echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/2018.3 xenial main" > /etc/apt/sources.list.d/saltstack2018.list
-
- # Lets get the docker repo added
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
-
- # Create a place for the keys
- mkdir -p /opt/so/gpg
- wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest/SALTSTACK-GPG-KEY.pub
- wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
- wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
-
- # Get key and install wazuh
- curl -s https://packages.wazuh.com/key/GPG-KEY-WAZUH | apt-key add -
- # Add repo
- echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list
-
- # Initialize the new repos
- apt-get update >> $SETUPLOG 2>&1
- apt-get -y install salt-minion=2018.3.4+ds-1 salt-common=2018.3.4+ds-1 python-m2crypto >> $SETUPLOG 2>&1
- apt-mark hold salt-minion salt-common
-
- else
-
- # Copy down the gpg keys and install them from the master
- mkdir $TMP/gpg
- scp socore@$MSRV:/opt/so/gpg/* $TMP/gpg
- apt-key add $TMP/gpg/SALTSTACK-GPG-KEY.pub
- apt-key add $TMP/gpg/GPG-KEY-WAZUH
- echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
- echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list
- # Initialize the new repos
- apt-get update >> $SETUPLOG 2>&1
- apt-get -y install salt-minion=2018.3.4+ds-1 salt-common=2018.3.4+ds-1 python-m2crypto >> $SETUPLOG 2>&1
- apt-mark hold salt-minion salt-common
-
- fi
-
- fi
-
-}
-
-salt_checkin() {
- # Master State to Fix Mine Usage
- if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
- echo "Building Certificate Authority"
- salt-call state.apply ca >> $SETUPLOG 2>&1
- echo " *** Restarting Salt to fix any SSL errors. ***"
- service salt-master restart >> $SETUPLOG 2>&1
- sleep 5
- service salt-minion restart >> $SETUPLOG 2>&1
- sleep 15
- echo " Applyng a mine hack "
- sudo salt '*' mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt >> $SETUPLOG 2>&1
- echo " Applying SSL state "
- salt-call state.apply ssl >> $SETUPLOG 2>&1
- echo "Still Working... Hang in there"
- #salt-call state.highstate
-
- else
-
- # Run Checkin
- salt-call state.apply ca >> $SETUPLOG 2>&1
- salt-call state.apply ssl >> $SETUPLOG 2>&1
- #salt-call state.highstate >> $SETUPLOG 2>&1
-
- fi
-
-}
-
-salt_checkin_message() {
-
- # Warn the user that this might take a while
- echo "####################################################"
- echo "## ##"
- echo "## Applying and Installing everything ##"
- echo "## (This will take a while) ##"
- echo "## ##"
- echo "####################################################"
-
-}
-
-salt_firstcheckin() {
-
- #First Checkin
- salt-call state.highstate >> $SETUPLOG 2>&1
-
-}
-
-salt_master_directories() {
-
- # Create salt paster directories
- mkdir -p /opt/so/saltstack/salt
- mkdir -p /opt/so/saltstack/pillar
-
- # Copy over the salt code and templates
- cp -R pillar/* /opt/so/saltstack/pillar/
- chmod +x /opt/so/saltstack/pillar/firewall/addfirewall.sh
- chmod +x /opt/so/saltstack/pillar/data/addtotab.sh
- cp -R salt/* /opt/so/saltstack/salt/
-
-}
-
-sensor_pillar() {
-
- # Create the sensor pillar
- touch $TMP/$HOSTNAME.sls
- echo "sensor:" > $TMP/$HOSTNAME.sls
- echo " interface: bond0" >> $TMP/$HOSTNAME.sls
- echo " mainip: $MAINIP" >> $TMP/$HOSTNAME.sls
- echo " mainint: $MAININT" >> $TMP/$HOSTNAME.sls
- if [ $NSMSETUP == 'ADVANCED' ]; then
- echo " bro_pins:" >> $TMP/$HOSTNAME.sls
- for PIN in $BROPINS; do
- PIN=$(echo $PIN | cut -d\" -f2)
- echo " - $PIN" >> $TMP/$HOSTNAME.sls
- done
- echo " suripins:" >> $TMP/$HOSTNAME.sls
- for SPIN in $SURIPINS; do
- SPIN=$(echo $SPIN | cut -d\" -f2)
- echo " - $SPIN" >> $TMP/$HOSTNAME.sls
- done
- else
- echo " bro_lbprocs: $BASICBRO" >> $TMP/$HOSTNAME.sls
- echo " suriprocs: $BASICSURI" >> $TMP/$HOSTNAME.sls
- fi
- echo " brobpf:" >> $TMP/$HOSTNAME.sls
- echo " pcapbpf:" >> $TMP/$HOSTNAME.sls
- echo " nidsbpf:" >> $TMP/$HOSTNAME.sls
- echo " master: $MSRV" >> $TMP/$HOSTNAME.sls
- echo " mtu: $MTU" >> $TMP/$HOSTNAME.sls
- if [ $HNSENSOR != 'inherit' ]; then
- echo " hnsensor: $HNSENSOR" >> $TMP/$HOSTNAME.sls
- fi
- echo " access_key: $ACCESS_KEY" >> $TMP/$HOSTNAME.sls
- echo " access_secret: $ACCESS_SECRET" >> $TMP/$HOSTNAME.sls
-
-}
-
-set_hostname() {
-
- hostnamectl set-hostname --static $HOSTNAME
- echo "127.0.0.1 $HOSTNAME $HOSTNAME.localdomain localhost localhost.localdomain localhost4 localhost4.localdomain" > /etc/hosts
- echo "::1 localhost localhost.localdomain localhost6 localhost6.localdomain6" >> /etc/hosts
- echo $HOSTNAME > /etc/hostname
-
-}
-
-set_initial_firewall_policy() {
-
- get_main_ip
- if [ $INSTALLTYPE == 'MASTERONLY' ]; then
- printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/minions.sls
- printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
- /opt/so/saltstack/pillar/data/addtotab.sh mastertab $HOSTNAME $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
- fi
-
- if [ $INSTALLTYPE == 'EVALMODE' ]; then
- printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/minions.sls
- printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
- printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/forward_nodes.sls
- printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/storage_nodes.sls
- /opt/so/saltstack/pillar/data/addtotab.sh evaltab $HOSTNAME $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
- fi
-
- if [ $INSTALLTYPE == 'SENSORONLY' ]; then
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab $HOSTNAME $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
- fi
-
- if [ $INSTALLTYPE == 'STORAGENODE' ]; then
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh storage_nodes $MAINIP
- ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $HOSTNAME $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
- fi
-
- if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
- echo "blah"
- fi
-
- if [ $INSTALLTYPE == 'HOTNODE' ]; then
- echo "blah"
- fi
-
- if [ $INSTALLTYPE == 'WARMNODE' ]; then
- echo "blah"
- fi
-
-}
-
-set_node_type() {
-
- # Determine the node type based on whiplash choice
- if [ $INSTALLTYPE == 'STORAGENODE' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
- NODETYPE='storage'
- fi
- if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
- NODETYPE='parser'
- fi
- if [ $INSTALLTYPE == 'HOTNODE' ]; then
- NODETYPE='hot'
- fi
- if [ $INSTALLTYPE == 'WARMNODE' ]; then
- NODETYPE='warm'
- fi
-
-}
-
-set_updates() {
- echo "MASTERUPDATES is $MASTERUPDATES"
- if [ $MASTERUPDATES == 'MASTER' ]; then
- if [ $OS == 'centos' ]; then
- if ! grep -q $MSRV /etc/yum.conf; then
- echo "proxy=http://$MSRV:3142" >> /etc/yum.conf
- fi
-
- else
-
- # Set it up so the updates roll through the master
- echo "Acquire::http::Proxy \"http://$MSRV:3142\";" > /etc/apt/apt.conf.d/00Proxy
- echo "Acquire::https::Proxy \"http://$MSRV:3142\";" >> /etc/apt/apt.conf.d/00Proxy
-
- fi
- fi
-}
-
-update_sudoers() {
-
- if ! grep -qE '^socore\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then
- # Update Sudoers so that socore can accept keys without a password
- echo "socore ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | sudo tee -a /etc/sudoers
- echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/firewall/addfirewall.sh" | sudo tee -a /etc/sudoers
- echo "socore ALL=(ALL) NOPASSWD:/opt/so/saltstack/pillar/data/addtotab.sh" | sudo tee -a /etc/sudoers
- else
- echo "User socore already granted sudo privileges"
- fi
-
-}
-
-###########################################
-## ##
-## Whiptail Menu Section ##
-## ##
-###########################################
-
-whiptail_basic_bro() {
-
- BASICBRO=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter the number of bro processes:" 10 60 $LBPROCS 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_basic_suri() {
-
- BASICSURI=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter the number of Suricata Processes:" 10 60 $LBPROCS 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_bro_pins() {
-
- BROPINS=$(whiptail --noitem --title "Pin Bro CPUS" --checklist "Please Select $LBPROCS cores to pin Bro to:" 20 78 12 ${LISTCORES[@]} 3>&1 1>&2 2>&3 )
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-
-}
-
-whiptail_bro_version() {
-
- BROVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate meta data?" 20 78 4 "ZEEK" "Install Zeek (aka Bro)" ON \
- "COMMUNITY" "Install Community NSM" OFF "SURICATA" "SUPER EXPERIMENTAL" OFF 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_bond_nics() {
-
- BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 78 12 ${FNICS[@]} 3>&1 1>&2 2>&3 )
-
- while [ -z "$BNICS" ]
- do
- BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 78 12 ${FNICS[@]} 3>&1 1>&2 2>&3 )
- done
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_bond_nics_mtu() {
-
- # Set the MTU on the monitor interface
- MTU=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter the MTU for the monitor NICs" 10 60 1500 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_cancel() {
-
- whiptail --title "Security Onion Setup" --msgbox "Cancelling Setup. No changes have been made." 8 78
- install_cleanup
- exit
-
-}
-
-whiptail_check_exitstatus() {
-
- if [ $1 == '1' ]; then
- echo "They hit cancel"
- whiptail_cancel
- fi
-
-}
-
-whiptail_create_socore_user() {
-
- whiptail --title "Security Onion Setup" --msgbox "Set a password for the socore user. This account is used for adding sensors remotely." 8 78
-
-}
-
-whiptail_create_socore_user_password1() {
-
- COREPASS1=$(whiptail --title "Security Onion Install" --passwordbox \
- "Enter a password for user socore" 10 60 3>&1 1>&2 2>&3)
-
-}
-
-whiptail_create_socore_user_password2() {
-
- COREPASS2=$(whiptail --title "Security Onion Install" --passwordbox \
- "Re-enter a password for user socore" 10 60 3>&1 1>&2 2>&3)
-
-}
-
-whiptail_cur_close_days() {
-
- CURCLOSEDAYS=$(whiptail --title "Security Onion Setup" --inputbox \
- "Please specify the threshold (in days) at which Elasticsearch indices will be closed" 10 60 $CURCLOSEDAYS 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-whiptail_enable_components() {
- COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
- "Select Components to install" 20 78 8 \
- "GRAFANA" "Enable Grafana for system monitoring" ON \
- "OSQUERY" "Enable Fleet with osquery" ON \
- "WAZUH" "Enable Wazuh" ON \
- "THEHIVE" "Enable TheHive" ON \
- "PLAYBOOK" "Enable Playbook" ON 3>&1 1>&2 2>&3 )
-}
-
-whiptail_eval_adv() {
- EVALADVANCED=$(whiptail --title "Security Onion Setup" --radiolist \
- "Choose your eval install:" 20 78 4 \
- "BASIC" "Install basic components for evaluation" ON \
- "ADVANCED" "Choose additional components to be installed" OFF 3>&1 1>&2 2>&3 )
-}
-
-whiptail_eval_adv_warning() {
- whiptail --title "Security Onion Setup" --msgbox "Please keep in mind the more services that you enable the more RAM that is required." 8 78
-}
-
-whiptail_homenet_master() {
-
- # Ask for the HOME_NET on the master
- HNMASTER=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter your HOME_NET separated by ," 10 60 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_homenet_sensor() {
-
- # Ask to inherit from master
- whiptail --title "Security Onion Setup" --yesno "Do you want to inherit the HOME_NET from the Master?" 8 78
-
- local exitstatus=$?
- if [ $exitstatus == 0 ]; then
- HNSENSOR=inherit
- else
- HNSENSOR=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter your HOME_NET separated by ," 10 60 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
- fi
-
-}
-
-whiptail_install_type() {
-
- # What kind of install are we doing?
- INSTALLTYPE=$(whiptail --title "Security Onion Setup" --radiolist \
- "Choose Install Type:" 20 78 14 \
- "SENSORONLY" "Create a forward only sensor" ON \
- "STORAGENODE" "Add a Storage Hot Node with parsing" OFF \
- "MASTERONLY" "Start a new grid" OFF \
- "PARSINGNODE" "TODO Add a dedicated Parsing Node" OFF \
- "HOTNODE" "TODO Add a Hot Node (Storage Node without Parsing)" OFF \
- "WARMNODE" "TODO Add a Warm Node to an existing Hot or Storage node" OFF \
- "EVALMODE" "Evaluate all the things" OFF \
- "WAZUH" "TODO Stand Alone Wazuh Node" OFF \
- "STRELKA" "TODO Stand Alone Strelka Node" OFF \
- "FLEET" "TODO Stand Alone Fleet OSQuery Node" OFF 3>&1 1>&2 2>&3 )
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_log_size_limit() {
-
- LOG_SIZE_LIMIT=$(whiptail --title "Security Onion Setup" --inputbox \
- "Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
- By default, this is set to 85% of the disk space allotted for /nsm." 10 60 $LOG_SIZE_LIMIT 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-
-whiptail_management_nic() {
-
- MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 78 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
-
- while [ -z "$MNIC" ]
- do
- MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 78 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
- done
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_nids() {
-
- NIDS=$(whiptail --title "Security Onion Setup" --radiolist \
- "Choose which IDS to run:" 20 78 4 \
- "Suricata" "Suricata 4.X" ON \
- "Snort" "Snort 3.0 Beta" OFF 3>&1 1>&2 2>&3 )
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_oinkcode() {
-
- OINKCODE=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter your oinkcode" 10 60 XXXXXXX 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_make_changes() {
-
- whiptail --title "Security Onion Setup" --yesno "We are going to set this machine up as a $INSTALLTYPE. Please hit YES to make changes or NO to cancel." 8 78
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_management_server() {
-
- MSRV=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter your Master Server HOSTNAME. It is CASE SENSITIVE!" 10 60 XXXX 3>&1 1>&2 2>&3)
-
- # See if it resolves. Otherwise prompt to add to host file
- TESTHOST=$(host $MSRV)
-
- if [[ $TESTHOST = *"not found"* ]]; then
- add_master_hostfile
- fi
-
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-# Ask if you want to do advanced setup of the Master
-whiptail_master_adv() {
- MASTERADV=$(whiptail --title "Security Onion Setup" --radiolist \
- "Choose what type of master install:" 20 78 4 \
- "BASIC" "Install master with recommended settings" ON \
- "ADVANCED" "Do additional configuration to the master" OFF 3>&1 1>&2 2>&3 )
-}
-
-# Ask which additional components to install
-whiptail_master_adv_service_brologs() {
-
- BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
- "conn" "Connection Logging" ON \
- "dce_rpc" "RPC Logs" ON \
- "dhcp" "DHCP Logs" ON \
- "dhcpv6" "DHCP IPv6 Logs" ON \
- "dnp3" "DNP3 Logs" ON \
- "dns" "DNS Logs" ON \
- "dpd" "DPD Logs" ON \
- "files" "Files Logs" ON \
- "ftp" "FTP Logs" ON \
- "http" "HTTP Logs" ON \
- "intel" "Intel Hits Logs" ON \
- "irc" "IRC Chat Logs" ON \
- "kerberos" "Kerberos Logs" ON \
- "modbus" "MODBUS Logs" ON \
- "mqtt" "MQTT Logs" ON \
- "notice" "Zeek Notice Logs" ON \
- "ntlm" "NTLM Logs" ON \
- "openvpn" "OPENVPN Logs" ON \
- "pe" "PE Logs" ON \
- "radius" "Radius Logs" ON \
- "rfb" "RFB Logs" ON \
- "rdp" "RDP Logs" ON \
- "signatures" "Signatures Logs" ON \
- "sip" "SIP Logs" ON \
- "smb_files" "SMB Files Logs" ON \
- "smb_mapping" "SMB Mapping Logs" ON \
- "smtp" "SMTP Logs" ON \
- "snmp" "SNMP Logs" ON \
- "software" "Software Logs" ON \
- "ssh" "SSH Logs" ON \
- "ssl" "SSL Logs" ON \
- "syslog" "Syslog Logs" ON \
- "telnet" "Telnet Logs" ON \
- "tunnel" "Tunnel Logs" ON \
- "weird" "Zeek Weird Logs" ON \
- "mysql" "MySQL Logs" ON \
- "socks" "SOCKS Logs" ON \
- "x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
-}
-
-whiptail_network_notice() {
-
- whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Hit YES to continue." 8 78
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_node_advanced() {
-
- NODESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
- "What type of config would you like to use?:" 20 78 4 \
- "NODEBASIC" "Install Storage Node with recommended settings" ON \
- "NODEADVANCED" "Advanced Node Setup" OFF 3>&1 1>&2 2>&3 )
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_node_es_heap() {
-
- es_heapsize
- NODE_ES_HEAP_SIZE=$(whiptail --title "Security Onion Setup" --inputbox \
- "\nEnter ES Heap Size: \n \n(Recommended value is pre-populated)" 10 60 $ES_HEAP_SIZE 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_node_ls_heap() {
-
- ls_heapsize
- NODE_LS_HEAP_SIZE=$(whiptail --title "Security Onion Setup" --inputbox \
- "\nEnter LogStash Heap Size: \n \n(Recommended value is pre-populated)" 10 60 $LS_HEAP_SIZE 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_node_ls_pipeline_worker() {
-
- LSPIPELINEWORKERS=$(whiptail --title "Security Onion Setup" --inputbox \
- "\nEnter LogStash Pipeline Workers: \n \n(Recommended value is pre-populated)" 10 60 $CPUCORES 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_node_ls_pipline_batchsize() {
-
- LSPIPELINEBATCH=$(whiptail --title "Security Onion Setup" --inputbox \
- "\nEnter LogStash Pipeline Batch Size: \n \n(Default value is pre-populated)" 10 60 125 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_node_ls_input_threads() {
-
- LSINPUTTHREADS=$(whiptail --title "Security Onion Setup" --inputbox \
- "\nEnter LogStash Input Threads: \n \n(Default value is pre-populated)" 10 60 1 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_node_ls_input_batch_count() {
-
- LSINPUTBATCHCOUNT=$(whiptail --title "Security Onion Setup" --inputbox \
- "\nEnter LogStash Input Batch Count: \n \n(Default value is pre-populated)" 10 60 125 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_passwords_dont_match() {
-
- whiptail --title "Security Onion Setup" --msgbox "Passwords don't match. Please re-enter." 8 78
-
-}
-
-whiptail_rule_setup() {
-
- # Get pulled pork info
- RULESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
- "What IDS rules to use?:" 20 140 4 \
- "ETOPEN" "Emerging Threats Open - no oinkcode required" ON \
- "ETPRO" "Emerging Threats PRO - requires ETPRO oinkcode" OFF \
- "TALOSET" "Snort Subscriber (Talos) ruleset and Emerging Threats NoGPL ruleset - requires Snort Subscriber oinkcode" OFF \
- "TALOS" "Snort Subscriber (Talos) ruleset only and set a Snort Subscriber policy - requires Snort Subscriber oinkcode" OFF 3>&1 1>&2 2>&3 )
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_sensor_config() {
-
- NSMSETUP=$(whiptail --title "Security Onion Setup" --radiolist \
- "What type of configuration would you like to use?:" 20 78 4 \
- "BASIC" "Install NSM components with recommended settings" ON \
- "ADVANCED" "Configure each component individually" OFF 3>&1 1>&2 2>&3 )
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_set_hostname() {
-
- HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter the Hostname you would like to set." 10 60 localhost 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_setup_complete() {
-
- whiptail --title "Security Onion Setup" --msgbox "Finished installing this as an $INSTALLTYPE. Press Enter to reboot." 8 78
- install_cleanup
-
-}
-
-whiptail_setup_failed() {
-
- whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $SETUPLOG for details. Press Enter to reboot." 8 78
- install_cleanup
-
-}
-
-whiptail_shard_count() {
-
- SHARDCOUNT=$(whiptail --title "Security Onion Setup" --inputbox \
- "\nEnter ES Shard Count: \n \n(Default value is pre-populated)" 10 60 125 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_suricata_pins() {
-
- FILTEREDCORES=$(echo ${LISTCORES[@]} ${BROPINS[@]} | tr -d '"' | tr ' ' '\n' | sort | uniq -u | awk '{print $1 " \"" "core" "\""}')
- SURIPINS=$(whiptail --noitem --title "Pin Suricata CPUS" --checklist "Please Select $LBPROCS cores to pin Suricata to:" 20 78 12 ${FILTEREDCORES[@]} 3>&1 1>&2 2>&3 )
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_master_updates() {
-
- MASTERUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
- "How would you like to download updates for your grid?:" 20 78 4 \
- "MASTER" "Have the master node act as a proxy for OS/Docker updates." ON \
- "OPEN" "Have each node connect to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_node_updates() {
-
- NODEUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
- "How would you like to download updates for this node?:" 20 78 4 \
- "MASTER" "Download OS/Docker updates from the Master." ON \
- "OPEN" "Download updates directly from the Internet" OFF 3>&1 1>&2 2>&3 )
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-
-}
-
-whiptail_you_sure() {
-
- whiptail --title "Security Onion Setup" --yesno "Are you sure you want to install Security Onion over the internet?" 8 78
-
-}
-
-########################
-## ##
-## End Functions ##
-## ##
-########################
-
-#####################
-## ##
-## Let's Go! ##
-## ##
-#####################
-
-# Check for prerequisites
-got_root
-detect_os
-
-if [ $OS == ubuntu ]; then
- # Override the horrible Ubuntu whiptail color pallete
- update-alternatives --set newt-palette /etc/newt/palette.original
-fi
-
-# Question Time
-if (whiptail_you_sure); then
-
- # Create a temp dir to get started
- install_prep
-
- # Let folks know they need their management interface already set up.
- whiptail_network_notice
-
- # Set the hostname to reduce errors
- whiptail_set_hostname
-
- # Go ahead and gen the keys so we can use them for any sensor type - Disabled for now
- #minio_generate_keys
-
- # What kind of install are we doing?
- whiptail_install_type
-
- ####################
- ## Master ##
- ####################
-
- if [ $INSTALLTYPE == 'MASTERONLY' ]; then
-
- # Would you like to do an advanced install?
- whiptail_master_adv
-
- # Pick the Management NIC
- whiptail_management_nic
-
- # Choose Zeek or Community NSM
- whiptail_bro_version
-
- # Select Snort or Suricata
- whiptail_nids
-
- # Snag the HOME_NET
- whiptail_homenet_master
-
- # Pick your Ruleset
- whiptail_rule_setup
-
- # Get the code if it isn't ET Open
- if [ $RULESETUP != 'ETOPEN' ]; then
- # Get the code
- whiptail_oinkcode
- fi
-
- # Find out how to handle updates
- whiptail_master_updates
- whiptail_enable_components
- process_components
-
- # Do Advacned Setup if they chose it
- if [ $MASTERADV == 'ADVANCED' ]; then
- # Ask which bro logs to enable - Need to add Suricata check
- if [ $BROVERSION != 'SURICATA' ]; then
- whiptail_master_adv_service_brologs
- fi
- fi
-
- whiptail_create_socore_user
- SCMATCH=no
- while [ $SCMATCH != yes ]; do
- whiptail_create_socore_user_password1
- whiptail_create_socore_user_password2
- check_socore_pass
- done
-
- # Last Chance to back out
- whiptail_make_changes
- set_hostname
- generate_passwords
- auth_pillar
- clear_master
- mkdir -p /nsm
- get_filesystem_root
- get_filesystem_nsm
- # Enable Bro Logs
- bro_logs_enabled
-
- # Figure out the main IP address
- get_main_ip
-
- # Add the user so we can sit back and relax
- #echo ""
- #echo "**** Please set a password for socore. You will use this password when setting up other Nodes/Sensors"
- #echo ""
- add_socore_user_master
-
- # Install salt and dependencies
- {
- sleep 0.5
- echo -e "XXX\n0\nInstalling and configuring Salt... \nXXX"
- echo " ** Installing Salt and Dependencies **" >> $SETUPLOG
- saltify >> $SETUPLOG 2>&1
- echo -e "XXX\n5\nInstalling Docker... \nXXX"
- docker_install >> $SETUPLOG 2>&1
- echo -e "XXX\n10\nConfiguring Salt Master... \nXXX"
- echo " ** Configuring Minion **" >> $SETUPLOG
- configure_minion master >> $SETUPLOG 2>&1
- echo " ** Installing Salt Master **" >> $SETUPLOG
- install_master >> $SETUPLOG 2>&1
- salt_master_directories >> $SETUPLOG 2>&1
- update_sudoers >> $SETUPLOG 2>&1
- chown_salt_master >> $SETUPLOG 2>&1
- es_heapsize >> $SETUPLOG 2>&1
- ls_heapsize >> $SETUPLOG 2>&1
- echo -e "XXX\n25\nConfiguring Default Pillars... \nXXX"
- master_static >> $SETUPLOG 2>&1
- echo "** Generating the master pillar **" >> $SETUPLOG
- master_pillar >> $SETUPLOG 2>&1
- echo -e "XXX\n30\nAccepting Salt Keys... \nXXX"
- # Do a checkin to push the key up
- echo "** Pushing the key up to Master **" >> $SETUPLOG
- salt_firstcheckin >> $SETUPLOG 2>&1
- # Accept the Master Key
- echo "** Accepting the key on the master **" >> $SETUPLOG
- accept_salt_key_local >> $SETUPLOG 2>&1
- echo -e "XXX\n35\nConfiguring Firewall... \nXXX"
- # Open the firewall
- echo "** Setting the initial firewall policy **" >> $SETUPLOG
- set_initial_firewall_policy >> $SETUPLOG 2>&1
- # Do the big checkin but first let them know it will take a bit.
- echo -e "XXX\n40\nGenerating CA... \nXXX"
- salt_checkin >> $SETUPLOG 2>&1
- salt-call state.apply ca >> $SETUPLOG 2>&1
- salt-call state.apply ssl >> $SETUPLOG 2>&1
- echo -e "XXX\n43\nInstalling Common Components... \nXXX"
- salt-call state.apply common >> $SETUPLOG 2>&1
- echo -e "XXX\n45\nApplying firewall rules... \nXXX"
- salt-call state.apply firewall >> $SETUPLOG 2>&1
- salt-call state.apply master >> $SETUPLOG 2>&1
- salt-call state.apply idstools >> $SETUPLOG 2>&1
- echo -e "XXX\n40\nInstalling Redis... \nXXX"
- salt-call state.apply redis >> $SETUPLOG 2>&1
- if [[ $OSQUERY == '1' ]]; then
- echo -e "XXX\n41\nInstalling MySQL... \nXXX"
- salt-call state.apply mysql >> $SETUPLOG 2>&1
- fi
- echo -e "XXX\n45\nInstalling Elastic Components... \nXXX"
- salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
- salt-call state.apply logstash >> $SETUPLOG 2>&1
- salt-call state.apply kibana >> $SETUPLOG 2>&1
- salt-call state.apply elastalert >> $SETUPLOG 2>&1
- if [[ $WAZUH == '1' ]]; then
- echo -e "XXX\n68\nInstalling Wazuh... \nXXX"
- salt-call state.apply wazuh >> $SETUPLOG 2>&1
- fi
- echo -e "XXX\n75\nInstalling Filebeat... \nXXX"
- salt-call state.apply filebeat >> $SETUPLOG 2>&1
- salt-call state.apply utility >> $SETUPLOG 2>&1
- salt-call state.apply schedule >> $SETUPLOG 2>&1
- if [[ $OSQUERY == '1' ]]; then
- echo -e "XXX\n79\nInstalling Fleet... \nXXX"
- salt-call state.apply fleet >> $SETUPLOG 2>&1
- salt-call state.apply launcher >> $SETUPLOG 2>&1
- fi
- echo -e "XXX\n85\nConfiguring SOctopus... \nXXX"
- salt-call state.apply soctopus >> $SETUPLOG 2>&1
- if [[ $THEHIVE == '1' ]]; then
- echo -e "XXX\n87\nInstalling TheHive... \nXXX"
- salt-call state.apply hive >> $SETUPLOG 2>&1
- fi
- if [[ $PLAYBOOK == '1' ]]; then
- echo -e "XXX\n89\nInstalling Playbook... \nXXX"
- salt-call state.apply playbook >> $SETUPLOG 2>&1
- fi
- echo -e "XXX\n75\nEnabling Checking at Boot... \nXXX"
- checkin_at_boot >> $SETUPLOG 2>&1
- echo -e "XXX\n95\nVerifying Install... \nXXX"
- salt-call state.highstate >> $SETUPLOG 2>&1
-
- } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
- GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
- if [[ $GOODSETUP == '0' ]]; then
- whiptail_setup_complete
- if [[ $THEHIVE == '1' ]]; then
- check_hive_init_then_reboot
- else
- shutdown -r now
- fi
- else
- whiptail_setup_failed
- shutdown -r now
- fi
-
- fi
-
- ####################
- ## Sensor ##
- ####################
-
- if [ $INSTALLTYPE == 'SENSORONLY' ]; then
- whiptail_management_nic
- filter_nics
- whiptail_bond_nics
- whiptail_management_server
- whiptail_master_updates
- set_updates
- whiptail_homenet_sensor
- whiptail_sensor_config
- # Calculate lbprocs so we can call it in the prompts
- calculate_useable_cores
- if [ $NSMSETUP == 'ADVANCED' ]; then
- whiptail_bro_pins
- whiptail_suricata_pins
- whiptail_bond_nics_mtu
- else
- whiptail_basic_bro
- whiptail_basic_suri
- fi
- whiptail_make_changes
- set_hostname
- clear_master
- mkdir -p /nsm
- get_filesystem_root
- get_filesystem_nsm
- copy_ssh_key
- {
- sleep 0.5
- echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
- set_initial_firewall_policy >> $SETUPLOG 2>&1
- echo -e "XXX\n3\nCreating Bond Interface... \nXXX"
- network_setup >> $SETUPLOG 2>&1
- echo -e "XXX\n4\nGenerating Sensor Pillar... \nXXX"
- sensor_pillar >> $SETUPLOG 2>&1
- echo -e "XXX\n5\nInstalling Salt Components... \nXXX"
- saltify >> $SETUPLOG 2>&1
- echo -e "XXX\n20\nInstalling Docker... \nXXX"
- docker_install >> $SETUPLOG 2>&1
- echo -e "XXX\n22\nConfiguring Salt Minion... \nXXX"
- configure_minion sensor >> $SETUPLOG 2>&1
- echo -e "XXX\n24\nCopying Sensor Pillar to Master... \nXXX"
- copy_minion_pillar sensors >> $SETUPLOG 2>&1
- echo -e "XXX\n25\nSending Salt Key to Master... \nXXX"
- salt_firstcheckin >> $SETUPLOG 2>&1
- echo -e "XXX\n26\nTelling the Master to Accept Key... \nXXX"
- # Accept the Salt Key
- accept_salt_key_remote >> $SETUPLOG 2>&1
- echo -e "XXX\n27\nApplying SSL Certificates... \nXXX"
- salt-call state.apply ca >> $SETUPLOG 2>&1
- salt-call state.apply ssl >> $SETUPLOG 2>&1
- echo -e "XXX\n35\nInstalling Core Components... \nXXX"
- salt-call state.apply common >> $SETUPLOG 2>&1
- salt-call state.apply firewall >> $SETUPLOG 2>&1
- echo -e "XXX\n50\nInstalling PCAP... \nXXX"
- salt-call state.apply pcap >> $SETUPLOG 2>&1
- echo -e "XXX\n60\nInstalling IDS components... \nXXX"
- salt-call state.apply suricata >> $SETUPLOG 2>&1
- echo -e "XXX\n80\nVerifying Install... \nXXX"
- salt-call state.highstate >> $SETUPLOG 2>&1
- checkin_at_boot >> $SETUPLOG 2>&1
- } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
- GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
- if [[ $GOODSETUP == '0' ]]; then
- whiptail_setup_complete
- shutdown -r now
- else
- whiptail_setup_failed
- shutdown -r now
- fi
- fi
-
- #######################
- ## Eval Mode ##
- #######################
-
- if [ $INSTALLTYPE == 'EVALMODE' ]; then
- # Select the management NIC
- whiptail_management_nic
-
- # Filter out the management NIC
- filter_nics
-
- # Select which NICs are in the bond
- whiptail_bond_nics
-
- # Snag the HOME_NET
- whiptail_homenet_master
- whiptail_eval_adv_warning
- whiptail_enable_components
-
- # Set a bunch of stuff since this is eval
- es_heapsize
- ls_heapsize
- NODE_ES_HEAP_SIZE="600m"
- NODE_LS_HEAP_SIZE="500m"
- LSPIPELINEWORKERS=1
- LSPIPELINEBATCH=125
- LSINPUTTHREADS=1
- LSINPUTBATCHCOUNT=125
- RULESETUP=ETOPEN
- NSMSETUP=BASIC
- NIDS=Suricata
- BROVERSION=ZEEK
- CURCLOSEDAYS=30
- process_components
- whiptail_create_socore_user
- SCMATCH=no
- while [ $SCMATCH != yes ]; do
- whiptail_create_socore_user_password1
- whiptail_create_socore_user_password2
- check_socore_pass
- done
- whiptail_make_changes
- set_hostname
- generate_passwords
- auth_pillar
- clear_master
- mkdir -p /nsm
- get_filesystem_root
- get_filesystem_nsm
- get_log_size_limit
- get_main_ip
- # Add the user so we can sit back and relax
- add_socore_user_master
- {
- sleep 0.5
- echo -e "XXX\n0\nCreating Bond Interface... \nXXX"
- network_setup >> $SETUPLOG 2>&1
- echo -e "XXX\n1\nInstalling saltstack... \nXXX"
- saltify >> $SETUPLOG 2>&1
- echo -e "XXX\n3\nInstalling docker... \nXXX"
- docker_install >> $SETUPLOG 2>&1
- echo -e "XXX\n5\nInstalling master code... \nXXX"
- install_master >> $SETUPLOG 2>&1
- echo -e "XXX\n6\nCopying salt code... \nXXX"
- salt_master_directories >> $SETUPLOG 2>&1
- echo -e "XXX\n6\nupdating suduers... \nXXX"
- update_sudoers >> $SETUPLOG 2>&1
- echo -e "XXX\n7\nFixing some permissions... \nXXX"
- chown_salt_master >> $SETUPLOG 2>&1
- echo -e "XXX\n7\nCreating the static pillar... \nXXX"
- # Set the static values
- master_static >> $SETUPLOG 2>&1
- echo -e "XXX\n7\nCreating the master pillar... \nXXX"
- master_pillar >> $SETUPLOG 2>&1
- echo -e "XXX\n7\nConfiguring minion... \nXXX"
- configure_minion eval >> $SETUPLOG 2>&1
- echo -e "XXX\n7\nSetting the node type to eval... \nXXX"
- set_node_type >> $SETUPLOG 2>&1
- echo -e "XXX\n7\nStorage node pillar... \nXXX"
- node_pillar >> $SETUPLOG 2>&1
- echo -e "XXX\n8\nCreating firewall policies... \nXXX"
- set_initial_firewall_policy >> $SETUPLOG 2>&1
- echo -e "XXX\n10\nRegistering agent... \nXXX"
- salt_firstcheckin >> $SETUPLOG 2>&1
- echo -e "XXX\n11\nAccepting Agent... \nXXX"
- accept_salt_key_local >> $SETUPLOG 2>&1
- echo -e "XXX\n12\nRunning the SSL states... \nXXX"
- salt_checkin >> $SETUPLOG 2>&1
- salt-call state.apply ca >> $SETUPLOG 2>&1
- salt-call state.apply ssl >> $SETUPLOG 2>&1
- echo -e "XXX\n15\nInstalling core components... \nXXX"
- salt-call state.apply common >> $SETUPLOG 2>&1
- echo -e "XXX\n18\nInitializing firewall rules... \nXXX"
- salt-call state.apply firewall >> $SETUPLOG 2>&1
- echo -e "XXX\n25\nInstalling master components... \nXXX"
- salt-call state.apply master >> $SETUPLOG 2>&1
- salt-call state.apply idstools >> $SETUPLOG 2>&1
- if [[ $OSQUERY == '1' ]]; then
- salt-call state.apply mysql >> $SETUPLOG 2>&1
- fi
- echo -e "XXX\n35\nInstalling ElasticSearch... \nXXX"
- salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
- echo -e "XXX\n40\nInstalling Logstash... \nXXX"
- salt-call state.apply logstash >> $SETUPLOG 2>&1
- echo -e "XXX\n45\nInstalling Kibana... \nXXX"
- salt-call state.apply kibana >> $SETUPLOG 2>&1
- echo -e "XXX\n50\nInstalling pcap... \nXXX"
- salt-call state.apply pcap >> $SETUPLOG 2>&1
- echo -e "XXX\n52\nInstalling Suricata... \nXXX"
- salt-call state.apply suricata >> $SETUPLOG 2>&1
- echo -e "XXX\n54\nInstalling Zeek... \nXXX"
- salt-call state.apply bro >> $SETUPLOG 2>&1
- echo -e "XXX\n56\nInstalling curator... \nXXX"
- salt-call state.apply curator >> $SETUPLOG 2>&1
- echo -e "XXX\n58\nInstalling elastalert... \nXXX"
- salt-call state.apply elastalert >> $SETUPLOG 2>&1
- if [[ $OSQUERY == '1' ]]; then
- echo -e "XXX\n60\nInstalling fleet... \nXXX"
- salt-call state.apply fleet >> $SETUPLOG 2>&1
- salt-call state.apply redis >> $SETUPLOG 2>&1
- fi
- if [[ $WAZUH == '1' ]]; then
- echo -e "XXX\n65\nInstalling Wazuh components... \nXXX"
- salt-call state.apply wazuh >> $SETUPLOG 2>&1
- fi
- echo -e "XXX\n85\nInstalling filebeat... \nXXX"
- salt-call state.apply filebeat >> $SETUPLOG 2>&1
- salt-call state.apply utility >> $SETUPLOG 2>&1
- echo -e "XXX\n95\nInstalling misc components... \nXXX"
- salt-call state.apply schedule >> $SETUPLOG 2>&1
- salt-call state.apply soctopus >> $SETUPLOG 2>&1
- if [[ $THEHIVE == '1' ]]; then
- echo -e "XXX\n96\nInstalling The Hive... \nXXX"
- salt-call state.apply hive >> $SETUPLOG 2>&1
- fi
- if [[ $PLAYBOOK == '1' ]]; then
- echo -e "XXX\n97\nInstalling Playbook... \nXXX"
- salt-call state.apply playbook >> $SETUPLOG 2>&1
- fi
- echo -e "XXX\n98\nSetting checkin to run on boot... \nXXX"
- checkin_at_boot >> $SETUPLOG 2>&1
- echo -e "XXX\n99\nVerifying Setup... \nXXX"
- salt-call state.highstate >> $SETUPLOG 2>&1
-
- } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
- GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
- if [ $OS == 'centos' ]; then
- if [[ $GOODSETUP == '1' ]]; then
- whiptail_setup_complete
- if [[ $THEHIVE == '1' ]]; then
- check_hive_init_then_reboot
- else
- shutdown -r now
- fi
- else
- whiptail_setup_failed
- shutdown -r now
- fi
- else
- if [[ $GOODSETUP == '0' ]]; then
- whiptail_setup_complete
- if [[ $THEHIVE == '1' ]]; then
- check_hive_init_then_reboot
- else
- shutdown -r now
- fi
- else
- whiptail_setup_failed
- shutdown -r now
- fi
- fi
- fi
-
- ###################
- ## Nodes ##
- ###################
-
- if [ $INSTALLTYPE == 'STORAGENODE' ] || [ $INSTALLTYPE == 'PARSINGNODE' ] || [ $INSTALLTYPE == 'HOTNODE' ] || [ $INSTALLTYPE == 'WARMNODE' ]; then
- whiptail_management_nic
- whiptail_management_server
- whiptail_master_updates
- set_updates
- get_log_size_limit
- CURCLOSEDAYS=30
- es_heapsize
- ls_heapsize
- whiptail_node_advanced
- if [ $NODESETUP == 'NODEADVANCED' ]; then
- whiptail_node_es_heap
- whiptail_node_ls_heap
- whiptail_node_ls_pipeline_worker
- whiptail_node_ls_pipline_batchsize
- whiptail_node_ls_input_threads
- whiptail_node_ls_input_batch_count
- whiptail_cur_close_days
- whiptail_log_size_limit
- else
- NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE
- NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE
- LSPIPELINEWORKERS=$CPUCORES
- LSPIPELINEBATCH=125
- LSINPUTTHREADS=1
- LSINPUTBATCHCOUNT=125
- fi
- whiptail_make_changes
- set_hostname
- clear_master
- mkdir -p /nsm
- get_filesystem_root
- get_filesystem_nsm
- copy_ssh_key
- {
- sleep 0.5
- echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
- set_initial_firewall_policy >> $SETUPLOG 2>&1
- echo -e "XXX\n5\nInstalling Salt Packages... \nXXX"
- saltify >> $SETUPLOG 2>&1
- echo -e "XXX\n20\nInstalling Docker... \nXXX"
- docker_install >> $SETUPLOG 2>&1
- echo -e "XXX\n30\nInitializing Minion... \nXXX"
- configure_minion node >> $SETUPLOG 2>&1
- set_node_type >> $SETUPLOG 2>&1
- node_pillar >> $SETUPLOG 2>&1
- copy_minion_pillar nodes >> $SETUPLOG 2>&1
- echo -e "XXX\n35\nSending and Accepting Salt Key... \nXXX"
- salt_firstcheckin >> $SETUPLOG 2>&1
- # Accept the Salt Key
- accept_salt_key_remote >> $SETUPLOG 2>&1
- echo -e "XXX\n40\nApplying SSL Certificates... \nXXX"
- salt-call state.apply ca >> $SETUPLOG 2>&1
- salt-call state.apply ssl >> $SETUPLOG 2>&1
- echo -e "XXX\n50\nConfiguring Firewall... \nXXX"
- salt-call state.apply common >> $SETUPLOG 2>&1
- salt-call state.apply firewall >> $SETUPLOG 2>&1
- echo -e "XXX\n70\nInstalling Elastic Components... \nXXX"
- salt-call state.apply logstash >> $SETUPLOG 2>&1
- salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
- salt-call state.apply curator >> $SETUPLOG 2>&1
- salt-call state.apply filebeat >> $SETUPLOG 2>&1
- echo -e "XXX\n90\nVerifying Install... \nXXX"
- salt-call state.highstate >> $SETUPLOG 2>&1
- checkin_at_boot >> $SETUPLOG 2>&1
-
- } |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
- GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
- if [[ $GOODSETUP == '0' ]]; then
- whiptail_setup_complete
- shutdown -r now
- else
- whiptail_setup_failed
- shutdown -r now
- fi
-
- #set_initial_firewall_policy
- #saltify
- #docker_install
- #configure_minion node
- #set_node_type
- #node_pillar
- #copy_minion_pillar nodes
- #salt_checkin
- # Accept the Salt Key
- #accept_salt_key_remote
- # Do the big checkin but first let them know it will take a bit.
- #salt_checkin_message
- #salt_checkin
- #checkin_at_boot
-
- #whiptail_setup_complete
- fi
-
-else
- exit
-fi
+cd setup && bash so-setup.sh network