mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-07 17:52:46 +01:00
49
README.md
49
README.md
@@ -1,32 +1,35 @@
|
||||
## Hybrid Hunter Alpha 1.1.2
|
||||
## Hybrid Hunter Alpha 1.1.3
|
||||
|
||||
- Quick firewall fix to address latest docker version.
|
||||
- Added the option to install playbook from the initial install.
|
||||
- Fixed an issue with multiple monitor interfaces not working properly.
|
||||
### ISO Download:
|
||||
|
||||
ISO Download: [HH 1.1.2-2](https://github.com/Security-Onion-Solutions/securityonion-hh-iso/releases/download/HH1.1.2/HH-1.1.2-2.iso)
|
||||
MD5 (HH-1.1.2-2.iso) = abbbae7b40a50623546ed3d7f8cda0ec
|
||||
[HH1.1.3-20.iso](https://github.com/Security-Onion-Solutions/securityonion-hh-iso/releases/download/HH1.1.3/HH-1.1.3-20.iso)
|
||||
MD5: 5A97980365A2A63EBFABB8C1DEB32BB6
|
||||
SHA1: 2A780B41903D907CED91D944569FD24FC131281F
|
||||
SHA256: 56FA65EB5957903B967C16E792B17386848101CD058E0289878373110446C4B2
|
||||
|
||||
|
||||
## Hybrid Hunter Alpha 1.1.1
|
||||
```
|
||||
Default Username: onion
|
||||
Default Password: V@daL1aZ
|
||||
```
|
||||
|
||||
### Changes:
|
||||
|
||||
- Alpha 2 is here!
|
||||
- Suricata 4.1.5.
|
||||
- Bro/Zeek 2.6.4.
|
||||
- TheHive 3.4.0 (Includes ES 6.8.3 for TheHive only).
|
||||
- Fixed Bro/Zeek packet loss calculation for Grafana.
|
||||
- Updated to latest Sensoroni which includes websockets support for job status updates without having to refresh the page.
|
||||
- NIDS and HIDS dashboard updates.
|
||||
- Playbook and ATT&CK Navigator features are now included.
|
||||
- Filebeat now logs to a file, instead of stdout.
|
||||
- Elastalert has been updated to use Python 3 and allow for use of custom alerters.
|
||||
- Moved Bro/Zeek log parsing from Logstash to Elasticsearch Ingest for higher performance and lower memory usage!
|
||||
- Several changes to the setup script have been made to improve stability of the setup process:
|
||||
- Setup now modifies your hosts file so that the install works better in environments without DNS.
|
||||
- You are now prompted for setting a password for the socore user.
|
||||
- The install now forces a reboot at the end of the install. This fixes an issue with some of the Docker containers being in the wrong state from a manual reboot. Manual reboots are fine after the initial reboot.
|
||||
- Overhaul of the setup script to support both ISO and network based setups.
|
||||
- ISO will now boot properly from a USB stick.
|
||||
- Python 3 is now default.
|
||||
- Fix Filebeat from restarting every check in due to x509 refresh issue.
|
||||
- Cortex installed and integrated with TheHive.
|
||||
- Switched to using vanilla Kolide Fleet and upgraded to latest version (2.4) .
|
||||
- Playbook changes:
|
||||
- Now preloaded with Plays generated from Sysmon Sigma signatures in the [Sigma community repo](https://github.com/Neo23x0/sigma/tree/master/rules/windows/sysmon).
|
||||
- New update script that updates / pulls in new Sigma signatures from the community repo .
|
||||
- Bulk enable / disable plays from the webui .
|
||||
- Updated sigmac mapping template & configuration (backend is now `elastalert`) .
|
||||
- Updated TheHive alerts formatting .
|
||||
- OS patch scheduling:
|
||||
- During setup, choose between auto, manual, or scheduled OS patch interval
|
||||
- For scheduled, create a new or import an existing named schedule
|
||||
|
||||
|
||||
|
||||
### Warnings and Disclaimers
|
||||
|
||||
2
pillar/patch/needs_restarting.sls
Normal file
2
pillar/patch/needs_restarting.sls
Normal file
@@ -0,0 +1,2 @@
|
||||
mine_functions:
|
||||
needs_restarting.check: []
|
||||
@@ -1,19 +1,22 @@
|
||||
base:
|
||||
'*':
|
||||
- patch.needs_restarting
|
||||
|
||||
'G@role:so-sensor':
|
||||
- sensors.{{ grains.host }}
|
||||
- sensors.{{ grains.id }}
|
||||
- static
|
||||
- firewall.*
|
||||
- brologs
|
||||
|
||||
'G@role:so-master':
|
||||
- masters.{{ grains.host }}
|
||||
- masters.{{ grains.id }}
|
||||
- static
|
||||
- firewall.*
|
||||
- data.*
|
||||
- auth
|
||||
|
||||
'G@role:so-eval':
|
||||
- masters.{{ grains.host }}
|
||||
- masters.{{ grains.id }}
|
||||
- static
|
||||
- firewall.*
|
||||
- data.*
|
||||
@@ -21,6 +24,15 @@ base:
|
||||
- auth
|
||||
|
||||
'G@role:so-node':
|
||||
- nodes.{{ grains.host }}
|
||||
- nodes.{{ grains.id }}
|
||||
- static
|
||||
- firewall.*
|
||||
|
||||
'G@role:so-helix':
|
||||
- masters.{{ grains.id }}
|
||||
- sensors.{{ grains.id }}
|
||||
- static
|
||||
- firewall.*
|
||||
- fireeye
|
||||
- static
|
||||
- brologs
|
||||
|
||||
24
salt/_modules/needs_restarting.py
Normal file
24
salt/_modules/needs_restarting.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from os import path
|
||||
import subprocess
|
||||
|
||||
def check():
|
||||
|
||||
os = __grains__['os']
|
||||
retval = 'False'
|
||||
|
||||
if os == 'Ubuntu':
|
||||
if path.exists('/var/run/reboot-required'):
|
||||
retval = 'True'
|
||||
|
||||
elif os == 'CentOS':
|
||||
cmd = 'needs-restarting -r > /dev/null 2>&1'
|
||||
|
||||
try:
|
||||
needs_restarting = subprocess.check_call(cmd, shell=True)
|
||||
except subprocess.CalledProcessError:
|
||||
retval = 'True'
|
||||
|
||||
else:
|
||||
retval = 'Unsupported OS: %s' % os
|
||||
|
||||
return retval
|
||||
@@ -92,13 +92,13 @@ localbrosync:
|
||||
|
||||
so-communitybroimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-communitybro:HH1.0.3
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-communitybro:HH1.0.3
|
||||
|
||||
so-bro:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-communitybroimage
|
||||
- image: soshybridhunter/so-communitybro:HH1.0.3
|
||||
- image: docker.io/soshybridhunter/so-communitybro:HH1.0.3
|
||||
- privileged: True
|
||||
- binds:
|
||||
- /nsm/bro/logs:/nsm/bro/logs:rw
|
||||
@@ -125,13 +125,13 @@ localbrosync:
|
||||
|
||||
so-broimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-bro:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-bro:HH1.1.1
|
||||
|
||||
so-bro:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-broimage
|
||||
- image: soshybridhunter/so-bro:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-bro:HH1.1.1
|
||||
- privileged: True
|
||||
- binds:
|
||||
- /nsm/bro/logs:/nsm/bro/logs:rw
|
||||
|
||||
@@ -10,7 +10,7 @@ x509_signing_policies:
|
||||
- keyUsage: "digitalSignature, nonRepudiation"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- days_valid: 3000
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
registry:
|
||||
- minions: '*'
|
||||
@@ -23,7 +23,8 @@ x509_signing_policies:
|
||||
- keyUsage: "critical keyEncipherment"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- days_valid: 3000
|
||||
- extendedKeyUsage: serverAuth
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
masterssl:
|
||||
- minions: '*'
|
||||
@@ -36,7 +37,8 @@ x509_signing_policies:
|
||||
- keyUsage: "critical keyEncipherment"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- days_valid: 3000
|
||||
- extendedKeyUsage: serverAuth
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
influxdb:
|
||||
- minions: '*'
|
||||
@@ -49,7 +51,8 @@ x509_signing_policies:
|
||||
- keyUsage: "critical keyEncipherment"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- days_valid: 3000
|
||||
- extendedKeyUsage: serverAuth
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
fleet:
|
||||
- minions: '*'
|
||||
@@ -62,5 +65,6 @@ x509_signing_policies:
|
||||
- keyUsage: "critical keyEncipherment"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- days_valid: 3000
|
||||
- extendedKeyUsage: serverAuth
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
|
||||
@@ -39,10 +39,10 @@ pki_private_key:
|
||||
- require:
|
||||
- file: /etc/pki
|
||||
|
||||
mine.send:
|
||||
send_x509_pem_entries_to_mine:
|
||||
module.run:
|
||||
- func: x509.get_pem_entries
|
||||
- kwargs:
|
||||
glob_path: /etc/pki/ca.crt
|
||||
- mine.send:
|
||||
- func: x509.get_pem_entries
|
||||
- glob_path: /etc/pki/ca.crt
|
||||
- onchanges:
|
||||
- x509: /etc/pki/ca.crt
|
||||
|
||||
@@ -38,6 +38,7 @@ sensorpkgs:
|
||||
- pkgs:
|
||||
- docker-ce
|
||||
- wget
|
||||
- jq
|
||||
{% if grains['os'] != 'CentOS' %}
|
||||
- python-docker
|
||||
- python-m2crypto
|
||||
@@ -116,13 +117,13 @@ nginxtmp:
|
||||
# Start the core docker
|
||||
so-coreimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-core:HH1.1.2
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-core:HH1.1.3
|
||||
|
||||
so-core:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-coreimage
|
||||
- image: soshybridhunter/so-core:HH1.1.2
|
||||
- image: docker.io/soshybridhunter/so-core:HH1.1.3
|
||||
- hostname: so-core
|
||||
- user: socore
|
||||
- binds:
|
||||
@@ -176,13 +177,13 @@ tgrafconf:
|
||||
|
||||
so-telegrafimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-telegraf:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-telegraf:HH1.1.0
|
||||
|
||||
so-telegraf:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-telegrafimage
|
||||
- image: soshybridhunter/so-telegraf:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-telegraf:HH1.1.0
|
||||
- environment:
|
||||
- HOST_PROC=/host/proc
|
||||
- HOST_ETC=/host/etc
|
||||
@@ -213,7 +214,7 @@ so-telegraf:
|
||||
- /opt/so/conf/telegraf/etc/telegraf.conf
|
||||
- /opt/so/conf/telegraf/scripts
|
||||
|
||||
# If its a master or eval lets install the back end for now
|
||||
# If its a master or eval lets install the back end for now
|
||||
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' and GRAFANA == 1 %}
|
||||
|
||||
# Influx DB
|
||||
@@ -237,13 +238,13 @@ influxdbconf:
|
||||
|
||||
so-influximage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-influxdb:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-influxdb:HH1.1.0
|
||||
|
||||
so-influxdb:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-influximage
|
||||
- image: soshybridhunter/so-influxdb:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-influxdb:HH1.1.0
|
||||
- hostname: influxdb
|
||||
- environment:
|
||||
- INFLUXDB_HTTP_LOG_ENABLED=false
|
||||
@@ -316,7 +317,7 @@ grafanaconf:
|
||||
- source: salt://common/grafana/etc
|
||||
|
||||
{% if salt['pillar.get']('mastertab', False) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).iteritems() %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %}
|
||||
dashboard-master:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json
|
||||
@@ -337,7 +338,7 @@ dashboard-master:
|
||||
{% endif %}
|
||||
|
||||
{% if salt['pillar.get']('sensorstab', False) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).iteritems() %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).items() %}
|
||||
dashboard-{{ SN }}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/forward_nodes/{{ SN }}-Sensor.json
|
||||
@@ -358,7 +359,7 @@ dashboard-{{ SN }}:
|
||||
{% endif %}
|
||||
|
||||
{% if salt['pillar.get']('nodestab', False) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
dashboard-{{ SN }}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/storage_nodes/{{ SN }}-Node.json
|
||||
@@ -379,7 +380,7 @@ dashboard-{{ SN }}:
|
||||
{% endif %}
|
||||
|
||||
{% if salt['pillar.get']('evaltab', False) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).iteritems() %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).items() %}
|
||||
dashboard-{{ SN }}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/eval/{{ SN }}-Node.json
|
||||
@@ -402,11 +403,11 @@ dashboard-{{ SN }}:
|
||||
# Install the docker. This needs to be behind nginx at some point
|
||||
so-grafanaimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-grafana:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-grafana:HH1.1.0
|
||||
|
||||
so-grafana:
|
||||
docker_container.running:
|
||||
- image: soshybridhunter/so-grafana:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-grafana:HH1.1.0
|
||||
- hostname: grafana
|
||||
- user: socore
|
||||
- binds:
|
||||
|
||||
@@ -152,10 +152,7 @@ http {
|
||||
}
|
||||
|
||||
location /fleet/ {
|
||||
auth_basic "Security Onion";
|
||||
auth_basic_user_file /opt/so/conf/nginx/.htpasswd;
|
||||
rewrite /fleet/(.*) /$1 break;
|
||||
proxy_pass https://{{ masterip }}:8080/;
|
||||
proxy_pass https://{{ masterip }}:8080/fleet/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -177,6 +174,30 @@ http {
|
||||
|
||||
}
|
||||
|
||||
location /cortex/ {
|
||||
proxy_pass http://{{ masterip }}:9001/cortex/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /cyberchef/ {
|
||||
proxy_pass http://{{ masterip }}:9080/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /soctopus/ {
|
||||
proxy_pass http://{{ masterip }}:7000/;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
89
salt/common/nginx/nginx.conf.so-helix
Normal file
89
salt/common/nginx/nginx.conf.so-helix
Normal file
@@ -0,0 +1,89 @@
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /run/nginx.pid;
|
||||
|
||||
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
|
||||
include /usr/share/nginx/modules/*.conf;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
# Load modular configuration files from the /etc/nginx/conf.d directory.
|
||||
# See http://nginx.org/en/docs/ngx_core_module.html#include
|
||||
# for more information.
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
root /usr/share/nginx/html;
|
||||
|
||||
# Load configuration files for the default server block.
|
||||
include /etc/nginx/default.d/*.conf;
|
||||
|
||||
location / {
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
location = /40x.html {
|
||||
}
|
||||
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
}
|
||||
}
|
||||
|
||||
# Settings for a TLS enabled server.
|
||||
#
|
||||
# server {
|
||||
# listen 443 ssl http2 default_server;
|
||||
# listen [::]:443 ssl http2 default_server;
|
||||
# server_name _;
|
||||
# root /usr/share/nginx/html;
|
||||
#
|
||||
# ssl_certificate "/etc/pki/nginx/server.crt";
|
||||
# ssl_certificate_key "/etc/pki/nginx/private/server.key";
|
||||
# ssl_session_cache shared:SSL:1m;
|
||||
# ssl_session_timeout 10m;
|
||||
# ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
# ssl_prefer_server_ciphers on;
|
||||
#
|
||||
# # Load configuration files for the default server block.
|
||||
# include /etc/nginx/default.d/*.conf;
|
||||
#
|
||||
# location / {
|
||||
# }
|
||||
#
|
||||
# error_page 404 /404.html;
|
||||
# location = /40x.html {
|
||||
# }
|
||||
#
|
||||
# error_page 500 502 503 504 /50x.html;
|
||||
# location = /50x.html {
|
||||
# }
|
||||
# }
|
||||
|
||||
}
|
||||
@@ -176,6 +176,30 @@ http {
|
||||
|
||||
}
|
||||
|
||||
location /cortex/ {
|
||||
proxy_pass http://{{ masterip }}:9001/cortex/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /cyberchef/ {
|
||||
proxy_pass http://{{ masterip }}:9080/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /soctopus/ {
|
||||
proxy_pass http://{{ masterip }}:7000/;
|
||||
proxy_read_timeout 90;
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
logfile = "/var/log/telegraf/telegraf.log"
|
||||
|
||||
## Override default hostname, if empty use os.Hostname()
|
||||
hostname = "{{ grains.host }}"
|
||||
hostname = "{{ grains.id }}"
|
||||
## If set to true, do no set the "host" tag in the telegraf agent.
|
||||
omit_hostname = false
|
||||
|
||||
|
||||
24
salt/common/tools/sbin/so-helix-apikey
Normal file
24
salt/common/tools/sbin/so-helix-apikey
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
got_root() {
|
||||
|
||||
# Make sure you are root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
got_root
|
||||
if [ ! -f /opt/so/saltstack/pillar/fireeye/init.sls ]; then
|
||||
echo "This is nto configured for Helix Mode. Please re-install."
|
||||
exit
|
||||
else
|
||||
echo "Enter your Helix API Key: "
|
||||
read APIKEY
|
||||
sed -i "s/^ api_key.*/ api_key: $APIKEY/g" /opt/so/saltstack/pillar/fireeye/init.sls
|
||||
docker stop so-logstash
|
||||
docker rm so-logstash
|
||||
echo "Restarting Logstash for updated key"
|
||||
salt-call state.apply logstash queue=True
|
||||
fi
|
||||
1
salt/common/tools/sbin/so-playbook-ruleupdate
Normal file
1
salt/common/tools/sbin/so-playbook-ruleupdate
Normal file
@@ -0,0 +1 @@
|
||||
sudo docker exec so-soctopus python3 playbook_bulk-update.py
|
||||
1
salt/common/tools/sbin/so-playbook-sync
Normal file
1
salt/common/tools/sbin/so-playbook-sync
Normal file
@@ -0,0 +1 @@
|
||||
sudo docker exec so-soctopus python3 playbook_play-sync.py
|
||||
1
salt/common/tools/sbin/so-redis-count
Normal file
1
salt/common/tools/sbin/so-redis-count
Normal file
@@ -0,0 +1 @@
|
||||
sudo docker exec -it so-redis redis-cli llen logstash:unparsed
|
||||
@@ -114,13 +114,13 @@ curdel:
|
||||
|
||||
so-curatorimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-curator:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-curator:HH1.1.0
|
||||
|
||||
so-curator:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-curatorimage
|
||||
- image: soshybridhunter/so-curator:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-curator:HH1.1.0
|
||||
- hostname: curator
|
||||
- name: so-curator
|
||||
- user: curator
|
||||
|
||||
53
salt/cyberchef/init.sls
Normal file
53
salt/cyberchef/init.sls
Normal file
@@ -0,0 +1,53 @@
|
||||
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Create the cyberchef group
|
||||
cyberchefgroup:
|
||||
group.present:
|
||||
- name: cyberchef
|
||||
- gid: 946
|
||||
|
||||
# Add the cyberchef user
|
||||
cyberchef:
|
||||
user.present:
|
||||
- uid: 946
|
||||
- gid: 946
|
||||
- home: /opt/so/conf/cyberchef
|
||||
|
||||
cyberchefconfdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/cyberchef
|
||||
- user: 946
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
cybercheflog:
|
||||
file.directory:
|
||||
- name: /opt/so/log/cyberchef
|
||||
- user: 946
|
||||
- group: 946
|
||||
- makedirs: True
|
||||
|
||||
so-cyberchefimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-cyberchef:HH1.1.3
|
||||
|
||||
so-cyberchef:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-cyberchefimage
|
||||
- image: docker.io/soshybridhunter/so-cyberchef:HH1.1.3
|
||||
- port_bindings:
|
||||
- 0.0.0.0:9080:8080
|
||||
@@ -8,6 +8,11 @@ rules_folder: /etc/elastalert/rules/
|
||||
# the rules directory - true or false
|
||||
scan_subdirectories: true
|
||||
|
||||
# Do not disable a rule when an uncaught exception is thrown -
|
||||
# This setting should be tweaked once the following issue has been fixed
|
||||
# https://github.com/Security-Onion-Solutions/securityonion-saltstack/issues/98
|
||||
disable_rules_on_error: false
|
||||
|
||||
# How often ElastAlert will query Elasticsearch
|
||||
# The unit can be anything from weeks to seconds
|
||||
run_every:
|
||||
|
||||
@@ -15,7 +15,7 @@ timeframe:
|
||||
buffer_time:
|
||||
minutes: 10
|
||||
allow_buffer_time_overlap: true
|
||||
query_key: alert
|
||||
query_key: ["alert", "ips"]
|
||||
realert:
|
||||
days: 1
|
||||
|
||||
@@ -36,11 +36,11 @@ hive_proxies:
|
||||
|
||||
hive_alert_config:
|
||||
title: '{match[alert]}'
|
||||
type: 'external'
|
||||
type: 'NIDS'
|
||||
source: 'SecurityOnion'
|
||||
description: "`NIDS Dashboard:` \n\n <https://{{es}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:{match[sid]}')),sort:!('@timestamp',desc))> \n\n `IPs: `{match[source_ip]}:{match[source_port]} --> {match[destination_ip]}:{match[destination_port]} \n\n `Signature:` {match[rule_signature]}"
|
||||
severity: 2
|
||||
tags: ['elastalert', 'SecurityOnion', 'NIDS']
|
||||
tags: ['{match[sid]}','{match[source_ip]}','{match[destination_ip]}']
|
||||
tlp: 3
|
||||
status: 'New'
|
||||
follow: True
|
||||
|
||||
@@ -111,13 +111,13 @@ elastaconf:
|
||||
|
||||
so-elastalertimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-elastalert:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-elastalert:HH1.1.1
|
||||
|
||||
so-elastalert:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-elastalertimage
|
||||
- image: soshybridhunter/so-elastalert:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-elastalert:HH1.1.1
|
||||
- hostname: elastalert
|
||||
- name: so-elastalert
|
||||
- user: elastalert
|
||||
|
||||
@@ -19,7 +19,7 @@ ELASTICSEARCH_HOST=$1
|
||||
ELASTICSEARCH_PORT=9200
|
||||
|
||||
# Define a default directory to load pipelines from
|
||||
ELASTICSEARCH_INGEST_PIPELINES="/opt/so/saltstack/salt/elasticsearch/files/ingest/"
|
||||
ELASTICSEARCH_INGEST_PIPELINES="/opt/so/conf/elasticsearch/ingest/"
|
||||
|
||||
# Wait for ElasticSearch to initialize
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
@@ -39,7 +39,7 @@ while [[ "$COUNT" -le 240 ]]; do
|
||||
done
|
||||
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
echo
|
||||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
echo
|
||||
fi
|
||||
|
||||
|
||||
@@ -60,6 +60,20 @@ esconfdir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
esingestdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elasticsearch/ingest
|
||||
- user: 930
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
esingestconf:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/elasticsearch/ingest
|
||||
- source: salt://elasticsearch/files/ingest
|
||||
- user: 930
|
||||
- group: 939
|
||||
|
||||
eslog4jfile:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/log4j2.properties
|
||||
@@ -92,13 +106,13 @@ eslogdir:
|
||||
|
||||
so-elasticsearchimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-elasticsearch:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-elasticsearch:HH1.1.0
|
||||
|
||||
so-elasticsearch:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-elasticsearchimage
|
||||
- image: soshybridhunter/so-elasticsearch:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-elasticsearch:HH1.1.0
|
||||
- hostname: elasticsearch
|
||||
- name: so-elasticsearch
|
||||
- user: elasticsearch
|
||||
@@ -121,9 +135,17 @@ so-elasticsearch:
|
||||
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
|
||||
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
|
||||
|
||||
so-elasticsearch-pipelines-file:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines
|
||||
- source: salt://elasticsearch/files/so-elasticsearch-pipelines
|
||||
- user: 930
|
||||
- group: 939
|
||||
- mode: 754
|
||||
|
||||
so-elasticsearch-pipelines:
|
||||
cmd.run:
|
||||
- name: /opt/so/saltstack/salt/elasticsearch/files/so-elasticsearch-pipelines {{ esclustername }}
|
||||
- name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines {{ esclustername }}
|
||||
|
||||
# Tell the main cluster I am here
|
||||
#curl -XPUT http://\$ELASTICSEARCH_HOST:\$ELASTICSEARCH_PORT/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"$HOSTNAME": {"skip_unavailable": "true", "seeds": ["$DOCKER_INTERFACE:$REVERSE_PORT"]}}}}}'
|
||||
@@ -155,13 +177,13 @@ freqlogdir:
|
||||
|
||||
so-freqimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-freqserver:HH1.0.3
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-freqserver:HH1.0.3
|
||||
|
||||
so-freq:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-freqimage
|
||||
- image: soshybridhunter/so-freqserver:HH1.0.3
|
||||
- image: docker.io/soshybridhunter/so-freqserver:HH1.0.3
|
||||
- hostname: freqserver
|
||||
- name: so-freqserver
|
||||
- user: freqserver
|
||||
@@ -197,13 +219,13 @@ dstatslogdir:
|
||||
|
||||
so-domainstatsimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-domainstats:HH1.0.3
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-domainstats:HH1.0.3
|
||||
|
||||
so-domainstats:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-domainstatsimage
|
||||
- image: soshybridhunter/so-domainstats:HH1.0.3
|
||||
- image: docker.io/soshybridhunter/so-domainstats:HH1.0.3
|
||||
- hostname: domainstats
|
||||
- name: so-domainstats
|
||||
- user: domainstats
|
||||
|
||||
@@ -12,7 +12,7 @@ name: {{ HOSTNAME }}
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: error, warning, info, debug
|
||||
logging.level: debug
|
||||
logging.level: error
|
||||
|
||||
# Enable debug output for selected components. To enable all selectors use ["*"]
|
||||
# Other available selectors are "beat", "publish", "service"
|
||||
@@ -66,7 +66,7 @@ filebeat.modules:
|
||||
# List of prospectors to fetch data.
|
||||
filebeat.prospectors:
|
||||
#------------------------------ Log prospector --------------------------------
|
||||
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" %}
|
||||
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" %}
|
||||
{%- if BROVER != 'SURICATA' %}
|
||||
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
|
||||
- type: log
|
||||
|
||||
@@ -39,9 +39,9 @@ filebeatpkidir:
|
||||
|
||||
# This needs to be owned by root
|
||||
filebeatconfsync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/filebeat/etc
|
||||
- source: salt://filebeat/etc
|
||||
file.managed:
|
||||
- name: /opt/so/conf/filebeat/etc/filebeat.yml
|
||||
- source: salt://filebeat/etc/filebeat.yml
|
||||
- user: 0
|
||||
- group: 0
|
||||
- template: jinja
|
||||
@@ -58,13 +58,13 @@ filebeatconfsync:
|
||||
|
||||
so-filebeatimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-filebeat:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-filebeat:HH1.1.1
|
||||
|
||||
so-filebeat:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-filebeatimage
|
||||
- image: soshybridhunter/so-filebeat:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-filebeat:HH1.1.1
|
||||
- hostname: so-filebeat
|
||||
- user: root
|
||||
- extra_hosts: {{ MASTER }}:{{ MASTERIP }}
|
||||
@@ -85,4 +85,4 @@ so-filebeat:
|
||||
{%- endif %}
|
||||
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
|
||||
- watch:
|
||||
- file: /opt/so/conf/filebeat/etc
|
||||
- file: /opt/so/conf/filebeat/etc/filebeat.yml
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Firewall Magic for the grid
|
||||
{%- if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
|
||||
{%- if grains['role'] in ['so-eval','so-master','so-helix'] %}
|
||||
{%- set ip = salt['pillar.get']('static:masterip', '') %}
|
||||
{%- elif grains['role'] == 'so-node' %}
|
||||
{%- set ip = salt['pillar.get']('node:mainip', '') %}
|
||||
@@ -20,7 +20,7 @@ iptables_fix_fwd:
|
||||
- jump: ACCEPT
|
||||
- position: 1
|
||||
- target: DOCKER-USER
|
||||
|
||||
|
||||
# Keep localhost in the game
|
||||
iptables_allow_localhost:
|
||||
iptables.append:
|
||||
@@ -131,7 +131,7 @@ enable_wazuh_manager_1514_udp_{{ip}}:
|
||||
- save: True
|
||||
|
||||
# Rules if you are a Master
|
||||
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
|
||||
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix'%}
|
||||
#This should be more granular
|
||||
iptables_allow_master_docker:
|
||||
iptables.insert:
|
||||
@@ -265,6 +265,29 @@ enable_master_navigator_4200_{{ip}}:
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_master_cortex_9001_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9001
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_master_cyberchef_9080_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9080
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Make it so all the minions can talk to salt and update etc.
|
||||
|
||||
@@ -61,13 +61,13 @@ fleetdbpriv:
|
||||
|
||||
so-fleetimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-fleet:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-fleet:HH1.1.3
|
||||
|
||||
so-fleet:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-fleetimage
|
||||
- image: soshybridhunter/so-fleet:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-fleet:HH1.1.3
|
||||
- hostname: so-fleet
|
||||
- port_bindings:
|
||||
- 0.0.0.0:8080:8080
|
||||
@@ -83,6 +83,7 @@ so-fleet:
|
||||
- KOLIDE_AUTH_JWT_KEY=thisisatest
|
||||
- KOLIDE_OSQUERY_STATUS_LOG_FILE=/var/log/osquery/status.log
|
||||
- KOLIDE_OSQUERY_RESULT_LOG_FILE=/var/log/osquery/result.log
|
||||
- KOLIDE_SERVER_URL_PREFIX=/fleet
|
||||
- binds:
|
||||
- /etc/pki/fleet.key:/ssl/server.key:ro
|
||||
- /etc/pki/fleet.crt:/ssl/server.cert:ro
|
||||
|
||||
@@ -7,7 +7,7 @@ fi
|
||||
|
||||
initpw=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
|
||||
|
||||
docker exec so-fleet fleetctl config set --address https://$1:443 --tls-skip-verify
|
||||
docker exec so-fleet fleetctl config set --address https://$1:443 --tls-skip-verify --url-prefix /fleet
|
||||
docker exec so-fleet fleetctl setup --email $2 --password $initpw
|
||||
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/options.yaml
|
||||
@@ -29,7 +29,7 @@ docker run \
|
||||
--rm \
|
||||
--mount type=bind,source=/opt/so/conf/fleet/packages,target=/output \
|
||||
--mount type=bind,source=/etc/pki/launcher.crt,target=/var/launcher/launcher.crt \
|
||||
soshybridhunter/so-fleet-launcher:HH1.1.0 "$esecret" "$1":8080
|
||||
docker.io/soshybridhunter/so-fleet-launcher:HH1.1.0 "$esecret" "$1":8080
|
||||
|
||||
cp /opt/so/conf/fleet/packages/launcher.* /opt/so/saltstack/salt/launcher/packages/
|
||||
#Update timestamp on packages webpage
|
||||
|
||||
@@ -21,6 +21,28 @@ hiveconf:
|
||||
- group: 939
|
||||
- template: jinja
|
||||
|
||||
cortexconfdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/cortex
|
||||
- makedirs: True
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
cortexlogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/cortex
|
||||
- makedirs: True
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
cortexconf:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/cortex
|
||||
- source: salt://hive/thehive/etc
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
|
||||
# Install Elasticsearch
|
||||
|
||||
# Made directory for ES data to live in
|
||||
@@ -33,13 +55,13 @@ hiveesdata:
|
||||
|
||||
so-thehive-esimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-thehive-es:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-thehive-es:HH1.1.1
|
||||
|
||||
so-thehive-es:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-thehive-esimage
|
||||
- image: soshybridhunter/so-thehive-es:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-thehive-es:HH1.1.1
|
||||
- hostname: so-thehive-es
|
||||
- name: so-thehive-es
|
||||
- user: 939
|
||||
@@ -66,27 +88,38 @@ so-thehive-es:
|
||||
|
||||
# Install Cortex
|
||||
|
||||
#so-corteximage:
|
||||
# cmd.run:
|
||||
# - name: docker pull --disable-content-trust=false soshybridhunter/so-cortex:HH1.0.3
|
||||
so-corteximage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-thehive-cortex:HH1.1.3
|
||||
|
||||
#so-cortex:
|
||||
# docker_container.running:
|
||||
# - image: thehiveproject/cortex:latest
|
||||
# - hostname: so-cortex
|
||||
# - name: so-cortex
|
||||
# - port_bindings:
|
||||
# - 0.0.0.0:9001:9001
|
||||
so-cortex:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-corteximage
|
||||
- image: docker.io/soshybridhunter/so-thehive-cortex:HH1.1.3
|
||||
- hostname: so-cortex
|
||||
- name: so-cortex
|
||||
- user: 939
|
||||
- binds:
|
||||
- /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
|
||||
- port_bindings:
|
||||
- 0.0.0.0:9001:9001
|
||||
|
||||
cortexscript:
|
||||
cmd.script:
|
||||
- source: salt://hive/thehive/scripts/cortex_init.sh
|
||||
- cwd: /opt/so
|
||||
- template: jinja
|
||||
|
||||
so-thehiveimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-thehive:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-thehive:HH1.1.1
|
||||
|
||||
so-thehive:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-thehiveimage
|
||||
- image: soshybridhunter/so-thehive:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-thehive:HH1.1.1
|
||||
- environment:
|
||||
- ELASTICSEARCH_HOST={{ MASTERIP }}
|
||||
- hostname: so-thehive
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
|
||||
|
||||
# Secret Key
|
||||
# The secret key is used to secure cryptographic functions.
|
||||
@@ -130,15 +131,15 @@ play.http.parser.maxDiskBuffer = 1G
|
||||
#
|
||||
# In order to use Cortex, first you need to enable the Cortex module by uncommenting the next line
|
||||
|
||||
#play.modules.enabled += connectors.cortex.CortexConnector
|
||||
play.modules.enabled += connectors.cortex.CortexConnector
|
||||
|
||||
cortex {
|
||||
#"CORTEX-SERVER-ID" {
|
||||
# url = ""
|
||||
# key = ""
|
||||
"CORTEX-SERVER-ID" {
|
||||
url = "http://{{ MASTERIP }}:9001/cortex/"
|
||||
key = "{{ CORTEXKEY }}"
|
||||
# # HTTP client configuration (SSL and proxy)
|
||||
# ws {}
|
||||
#}
|
||||
}
|
||||
}
|
||||
|
||||
# MISP
|
||||
@@ -207,3 +208,8 @@ misp {
|
||||
# purpose = ImportAndExport
|
||||
#} ## <-- Uncomment to complete the configuration
|
||||
}
|
||||
webhooks {
|
||||
SOCtopusWebHook {
|
||||
url = "http://{{ MASTERIP }}:7000/enrich"
|
||||
}
|
||||
}
|
||||
|
||||
130
salt/hive/thehive/etc/cortex-application.conf
Normal file
130
salt/hive/thehive/etc/cortex-application.conf
Normal file
@@ -0,0 +1,130 @@
|
||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||
|
||||
# Secret Key
|
||||
# The secret key is used to secure cryptographic functions.
|
||||
# WARNING: If you deploy your application on several servers, make sure to use the same key.
|
||||
play.http.secret.key="letsdewdis"
|
||||
play.http.context=/cortex/
|
||||
search.uri = "http://{{ MASTERIP }}:9400"
|
||||
|
||||
# Elasticsearch
|
||||
search {
|
||||
# Name of the index
|
||||
index = cortex
|
||||
# Name of the Elasticsearch cluster
|
||||
cluster = hive
|
||||
# Address of the Elasticsearch instance
|
||||
host = ["{{ MASTERIP }}:9500"]
|
||||
# Scroll keepalive
|
||||
keepalive = 1m
|
||||
# Size of the page for scroll
|
||||
pagesize = 50
|
||||
# Number of shards
|
||||
nbshards = 5
|
||||
# Number of replicas
|
||||
nbreplicas = 1
|
||||
# Arbitrary settings
|
||||
settings {
|
||||
# Maximum number of nested fields
|
||||
mapping.nested_fields.limit = 100
|
||||
}
|
||||
|
||||
## Authentication configuration
|
||||
#search.username = ""
|
||||
#search.password = ""
|
||||
|
||||
## SSL configuration
|
||||
#search.keyStore {
|
||||
# path = "/path/to/keystore"
|
||||
# type = "JKS" # or PKCS12
|
||||
# password = "keystore-password"
|
||||
#}
|
||||
#search.trustStore {
|
||||
# path = "/path/to/trustStore"
|
||||
# type = "JKS" # or PKCS12
|
||||
# password = "trustStore-password"
|
||||
#}
|
||||
}
|
||||
|
||||
## Cache
|
||||
#
|
||||
# If an analyzer is executed against the same observable, the previous report can be returned without re-executing the
|
||||
# analyzer. The cache is used only if the second job occurs within cache.job (the default is 10 minutes).
|
||||
cache.job = 10 minutes
|
||||
|
||||
## Authentication
|
||||
auth {
|
||||
# "provider" parameter contains the authentication provider(s). It can be multi-valued, which is useful
|
||||
# for migration.
|
||||
# The available auth types are:
|
||||
# - services.LocalAuthSrv : passwords are stored in the user entity within ElasticSearch). No
|
||||
# configuration are required.
|
||||
# - ad : use ActiveDirectory to authenticate users. The associated configuration shall be done in
|
||||
# the "ad" section below.
|
||||
# - ldap : use LDAP to authenticate users. The associated configuration shall be done in the
|
||||
# "ldap" section below.
|
||||
provider = [local]
|
||||
|
||||
ad {
|
||||
# The Windows domain name in DNS format. This parameter is required if you do not use
|
||||
# 'serverNames' below.
|
||||
#domainFQDN = "mydomain.local"
|
||||
|
||||
# Optionally you can specify the host names of the domain controllers instead of using 'domainFQDN
|
||||
# above. If this parameter is not set, TheHive uses 'domainFQDN'.
|
||||
#serverNames = [ad1.mydomain.local, ad2.mydomain.local]
|
||||
|
||||
# The Windows domain name using short format. This parameter is required.
|
||||
#domainName = "MYDOMAIN"
|
||||
|
||||
# If 'true', use SSL to connect to the domain controller.
|
||||
#useSSL = true
|
||||
}
|
||||
|
||||
ldap {
|
||||
# The LDAP server name or address. The port can be specified using the 'host:port'
|
||||
# syntax. This parameter is required if you don't use 'serverNames' below.
|
||||
#serverName = "ldap.mydomain.local:389"
|
||||
|
||||
# If you have multiple LDAP servers, use the multi-valued setting 'serverNames' instead.
|
||||
#serverNames = [ldap1.mydomain.local, ldap2.mydomain.local]
|
||||
|
||||
# Account to use to bind to the LDAP server. This parameter is required.
|
||||
#bindDN = "cn=thehive,ou=services,dc=mydomain,dc=local"
|
||||
|
||||
# Password of the binding account. This parameter is required.
|
||||
#bindPW = "***secret*password***"
|
||||
|
||||
# Base DN to search users. This parameter is required.
|
||||
#baseDN = "ou=users,dc=mydomain,dc=local"
|
||||
|
||||
# Filter to search user in the directory server. Please note that {0} is replaced
|
||||
# by the actual user name. This parameter is required.
|
||||
#filter = "(cn={0})"
|
||||
|
||||
# If 'true', use SSL to connect to the LDAP directory server.
|
||||
#useSSL = true
|
||||
}
|
||||
}
|
||||
|
||||
## ANALYZERS
|
||||
#
|
||||
analyzer {
|
||||
# Absolute path where you have pulled the Cortex-Analyzers repository.
|
||||
path = ["/Cortex-Analyzers/analyzers"]
|
||||
|
||||
# Sane defaults. Do not change unless you know what you are doing.
|
||||
fork-join-executor {
|
||||
|
||||
# Min number of threads available for analysis.
|
||||
parallelism-min = 2
|
||||
|
||||
# Parallelism (threads) ... ceil(available processors * factor).
|
||||
parallelism-factor = 2.0
|
||||
|
||||
# Max number of threads available for analysis.
|
||||
parallelism-max = 4
|
||||
}
|
||||
}
|
||||
|
||||
# It's the end my friend. Happy hunting!
|
||||
64
salt/hive/thehive/scripts/cortex_init.sh
Normal file
64
salt/hive/thehive/scripts/cortex_init.sh
Normal file
@@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||
{%- set CORTEXUSER = salt['pillar.get']('static:cortexuser', '') %}
|
||||
{%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', '') %}
|
||||
{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %}
|
||||
{%- set CORTEXORGNAME = salt['pillar.get']('static:cortexorgname', '') %}
|
||||
{%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %}
|
||||
{%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
|
||||
|
||||
cortex_init(){
|
||||
sleep 60
|
||||
CORTEX_IP="{{MASTERIP}}"
|
||||
CORTEX_USER="{{CORTEXUSER}}"
|
||||
CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
|
||||
CORTEX_KEY="{{CORTEXKEY}}"
|
||||
CORTEX_ORG_NAME="{{CORTEXORGNAME}}"
|
||||
CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup"
|
||||
CORTEX_ORG_USER="{{CORTEXORGUSER}}"
|
||||
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
|
||||
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
|
||||
|
||||
|
||||
# Migrate DB
|
||||
curl -v -k -XPOST "https://$CORTEX_IP:/cortex/api/maintenance/migrate"
|
||||
|
||||
# Create intial Cortex superadmin
|
||||
curl -v -k "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
|
||||
|
||||
# Create user-supplied org
|
||||
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
|
||||
|
||||
# Create user-supplied org user
|
||||
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
|
||||
|
||||
# Enable URLScan.io Analyzer
|
||||
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
|
||||
|
||||
# Enable Cert PassiveDNS Analyzer
|
||||
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
|
||||
|
||||
# Revoke $CORTEX_USER key
|
||||
curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" "https:///$CORTEX_IP/api/user/$CORTEX_USER/key"
|
||||
|
||||
# Update SOCtopus config with apikey value
|
||||
#sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG
|
||||
|
||||
touch /opt/so/state/cortex.txt
|
||||
|
||||
}
|
||||
|
||||
if [ -f /opt/so/state/cortex.txt ]; then
|
||||
exit 0
|
||||
else
|
||||
rm -f garbage_file
|
||||
while ! wget -O garbage_file {{MASTERIP}}:9500 2>/dev/null
|
||||
do
|
||||
echo "Waiting for Elasticsearch..."
|
||||
rm -f garbage_file
|
||||
sleep 1
|
||||
done
|
||||
rm -f garbage_file
|
||||
sleep 5
|
||||
cortex_init
|
||||
fi
|
||||
@@ -19,7 +19,13 @@ hive_init(){
|
||||
|
||||
# Create intial TheHive user
|
||||
curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
|
||||
|
||||
# Pre-load custom fields
|
||||
#
|
||||
# reputation
|
||||
curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
|
||||
|
||||
|
||||
# Update SOCtopus config with apikey value
|
||||
#sed -i "s/hive_key = .*/hive_key = $HIVE_KEY/" $SOCTOPUS_CONFIG
|
||||
|
||||
|
||||
@@ -63,13 +63,13 @@ ruleslink:
|
||||
|
||||
so-idstoolsimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-idstools:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-idstools:HH1.1.0
|
||||
|
||||
so-idstools:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-idstoolsimage
|
||||
- image: soshybridhunter/so-idstools:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-idstools:HH1.1.0
|
||||
- hostname: so-idstools
|
||||
- user: socore
|
||||
- binds:
|
||||
|
||||
@@ -56,14 +56,14 @@ synckibanacustom:
|
||||
|
||||
so-kibanaimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-kibana:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-kibana:HH1.1.1
|
||||
|
||||
# Start the kibana docker
|
||||
so-kibana:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-kibanaimage
|
||||
- image: soshybridhunter/so-kibana:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-kibana:HH1.1.1
|
||||
- hostname: kibana
|
||||
- user: kibana
|
||||
- environment:
|
||||
|
||||
47
salt/logstash/conf/conf.enabled.txt.so-helix
Normal file
47
salt/logstash/conf/conf.enabled.txt.so-helix
Normal file
@@ -0,0 +1,47 @@
|
||||
# This is where can specify which LogStash configs get loaded.
|
||||
#
|
||||
# The custom folder on the master gets automatically synced to each logstash
|
||||
# node.
|
||||
#
|
||||
# To enable a custom configuration see the following example and uncomment:
|
||||
# /usr/share/logstash/pipeline.custom/1234_input_custom.conf
|
||||
##
|
||||
# All of the defaults are loaded.
|
||||
/usr/share/logstash/pipeline.dynamic/0010_input_hhbeats.conf
|
||||
/usr/share/logstash/pipeline.so/1033_preprocess_snort.conf
|
||||
/usr/share/logstash/pipeline.so/1100_preprocess_bro_conn.conf
|
||||
/usr/share/logstash/pipeline.so/1101_preprocess_bro_dhcp.conf
|
||||
/usr/share/logstash/pipeline.so/1102_preprocess_bro_dns.conf
|
||||
/usr/share/logstash/pipeline.so/1103_preprocess_bro_dpd.conf
|
||||
/usr/share/logstash/pipeline.so/1104_preprocess_bro_files.conf
|
||||
/usr/share/logstash/pipeline.so/1105_preprocess_bro_ftp.conf
|
||||
/usr/share/logstash/pipeline.so/1106_preprocess_bro_http.conf
|
||||
/usr/share/logstash/pipeline.so/1107_preprocess_bro_irc.conf
|
||||
/usr/share/logstash/pipeline.so/1108_preprocess_bro_kerberos.conf
|
||||
/usr/share/logstash/pipeline.so/1109_preprocess_bro_notice.conf
|
||||
/usr/share/logstash/pipeline.so/1110_preprocess_bro_rdp.conf
|
||||
/usr/share/logstash/pipeline.so/1111_preprocess_bro_signatures.conf
|
||||
/usr/share/logstash/pipeline.so/1112_preprocess_bro_smtp.conf
|
||||
/usr/share/logstash/pipeline.so/1113_preprocess_bro_snmp.conf
|
||||
/usr/share/logstash/pipeline.so/1114_preprocess_bro_software.conf
|
||||
/usr/share/logstash/pipeline.so/1115_preprocess_bro_ssh.conf
|
||||
/usr/share/logstash/pipeline.so/1116_preprocess_bro_ssl.conf
|
||||
/usr/share/logstash/pipeline.so/1117_preprocess_bro_syslog.conf
|
||||
/usr/share/logstash/pipeline.so/1118_preprocess_bro_tunnel.conf
|
||||
/usr/share/logstash/pipeline.so/1119_preprocess_bro_weird.conf
|
||||
/usr/share/logstash/pipeline.so/1121_preprocess_bro_mysql.conf
|
||||
/usr/share/logstash/pipeline.so/1122_preprocess_bro_socks.conf
|
||||
/usr/share/logstash/pipeline.so/1123_preprocess_bro_x509.conf
|
||||
/usr/share/logstash/pipeline.so/1124_preprocess_bro_intel.conf
|
||||
/usr/share/logstash/pipeline.so/1125_preprocess_bro_modbus.conf
|
||||
/usr/share/logstash/pipeline.so/1126_preprocess_bro_sip.conf
|
||||
/usr/share/logstash/pipeline.so/1127_preprocess_bro_radius.conf
|
||||
/usr/share/logstash/pipeline.so/1128_preprocess_bro_pe.conf
|
||||
/usr/share/logstash/pipeline.so/1129_preprocess_bro_rfb.conf
|
||||
/usr/share/logstash/pipeline.so/1130_preprocess_bro_dnp3.conf
|
||||
/usr/share/logstash/pipeline.so/1131_preprocess_bro_smb_files.conf
|
||||
/usr/share/logstash/pipeline.so/1132_preprocess_bro_smb_mapping.conf
|
||||
/usr/share/logstash/pipeline.so/1133_preprocess_bro_ntlm.conf
|
||||
/usr/share/logstash/pipeline.so/1134_preprocess_bro_dce_rpc.conf
|
||||
/usr/share/logstash/pipeline.so/8001_postprocess_common_ip_augmentation.conf
|
||||
/usr/share/logstash/pipeline.dynamic/9997_output_helix.conf
|
||||
@@ -1,7 +1,7 @@
|
||||
input {
|
||||
beats {
|
||||
port => "5044"
|
||||
ssl => true
|
||||
ssl => false
|
||||
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
|
||||
ssl_certificate => "/usr/share/logstash/filebeat.crt"
|
||||
ssl_key => "/usr/share/logstash/filebeat.key"
|
||||
@@ -9,23 +9,6 @@ input {
|
||||
}
|
||||
}
|
||||
filter {
|
||||
if [type] == "ids" or [type] =~ "bro" {
|
||||
mutate {
|
||||
rename => { "host" => "beat_host" }
|
||||
remove_tag => ["beat"]
|
||||
add_field => { "sensor_name" => "%{[beat][name]}" }
|
||||
add_field => { "syslog-host_from" => "%{[beat][name]}" }
|
||||
remove_field => [ "beat", "prospector", "input", "offset" ]
|
||||
}
|
||||
}
|
||||
if [type] =~ "ossec" {
|
||||
mutate {
|
||||
rename => { "host" => "beat_host" }
|
||||
remove_tag => ["beat"]
|
||||
add_field => { "syslog-host_from" => "%{[beat][name]}" }
|
||||
remove_field => [ "beat", "prospector", "input", "offset" ]
|
||||
}
|
||||
}
|
||||
if [type] == "osquery" {
|
||||
mutate {
|
||||
rename => { "host" => "beat_host" }
|
||||
|
||||
142
salt/logstash/files/dynamic/9997_output_helix.conf
Normal file
142
salt/logstash/files/dynamic/9997_output_helix.conf
Normal file
@@ -0,0 +1,142 @@
|
||||
{% set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %}
|
||||
|
||||
filter {
|
||||
if [type] =~ /^bro_conn|bro_dns|bro_http|bro_files|bro_ssl|bro_dhcp|bro_x509$/ {
|
||||
grok {
|
||||
match => [
|
||||
"source_ip", "^%{IPV4:srcipv4}$",
|
||||
"source_ip", "(?<srcipv6>^([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{1,4}$|((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4})$)"
|
||||
]
|
||||
}
|
||||
grok {
|
||||
match => [
|
||||
"destination_ip", "(?<dstipv6>^([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{1,4}$|((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4})$)",
|
||||
"destination_ip", "^%{IPV4:dstipv4}$"
|
||||
]
|
||||
}
|
||||
|
||||
geoip {
|
||||
source => "[source_ip]"
|
||||
target => "source_geo"
|
||||
}
|
||||
geoip {
|
||||
source => "[destination_ip]"
|
||||
target => "destination_geo"
|
||||
}
|
||||
mutate {
|
||||
#rename => { "%{[source_geo][country_code]}" => "srccountrycode" }
|
||||
#rename => { "%{[destination_geo][country_code]}" => "dstcountrycode" }
|
||||
rename => { "[beat_host][name]" => "sensor" }
|
||||
copy => { "sensor" => "rawmsghostname" }
|
||||
rename => { "message" => "rawmsg" }
|
||||
#rename => { "event_type" => "program" }
|
||||
copy => { "type" => "class" }
|
||||
copy => { "class" => "program"}
|
||||
rename => { "source_port" => "srcport" }
|
||||
rename => { "destination_port" => "dstport" }
|
||||
remove_field => ["source_ip", "destination_ip"]
|
||||
remove_field => ["sensorname", "sensor_name", "service", "source", "tags", "syslog-host"]
|
||||
remove_field => ["sensor_name", "source_ips", "ips", "destination_ips", "syslog-priority", "syslog-file_name", "syslog-facility"]
|
||||
}
|
||||
if "bro_conn" in [class] {
|
||||
mutate {
|
||||
#add_field => { "metaclass" => "connection" }
|
||||
rename => { "original_bytes" => "sentbytes" }
|
||||
rename => { "respond_bytes" => "rcvdbytes" }
|
||||
rename => { "connection_state" => "connstate" }
|
||||
rename => { "uid" => "connectionid" }
|
||||
rename => { "respond_packets" => "rcvdpackets" }
|
||||
rename => { "original_packets" => "sentpackets" }
|
||||
rename => { "respond_ip_bytes" => "rcvdipbytes" }
|
||||
rename => { "original_ip_bytes" => "sentipbytes" }
|
||||
rename => { "local_respond" => "local_resp" }
|
||||
rename => { "local_orig" => "localorig" }
|
||||
rename => { "missed_bytes" => "missingbytes" }
|
||||
}
|
||||
}
|
||||
if "bro_dns" in [class] {
|
||||
mutate{
|
||||
#add_field = { "metaclass" => "dns"}
|
||||
rename => { "answers" => "answer" }
|
||||
rename => { "query" => "domain" }
|
||||
rename => { "query_class" => "queryclass" }
|
||||
rename => { "query_class_name" => "queryclassname" }
|
||||
rename => { "query_type" => "querytype" }
|
||||
rename => { "query_type_name" => "querytypename" }
|
||||
rename => { "ra" => "recursionavailable" }
|
||||
rename => { "rd" => "recursiondesired" }
|
||||
}
|
||||
}
|
||||
if "bro_dhcp" in [class] {
|
||||
mutate{
|
||||
#add_field = { "metaclass" => "dhcp"}
|
||||
rename => { "message_types" => "direction" }
|
||||
rename => { "lease_time" => "duration" }
|
||||
}
|
||||
}
|
||||
if "bro_files" in [class] {
|
||||
mutate{
|
||||
#add_field = { "metaclass" => "dns"}
|
||||
rename => { "missing_bytes" => "missingbytes" }
|
||||
rename => { "fuid" => "fileid" }
|
||||
rename => { "uid" => "connectionid" }
|
||||
}
|
||||
}
|
||||
if "bro_http" in [class] {
|
||||
mutate{
|
||||
#add_field = { "metaclass" => "dns"}
|
||||
rename => { "virtual_host" => "hostname" }
|
||||
rename => { "status_code" => "statuscode" }
|
||||
rename => { "status_message" => "statusmsg" }
|
||||
rename => { "resp_mime_types" => "rcvdmimetype" }
|
||||
rename => { "resp_fuids" => "rcvdfileid" }
|
||||
rename => { "response_body_len" => "rcvdbodybytes" }
|
||||
rename => { "request_body_len" => "sentbodybytes" }
|
||||
rename => { "uid" => "connectionid" }
|
||||
rename => { "ts"=> "eventtime" }
|
||||
rename => { "@timestamp"=> "eventtime" }
|
||||
}
|
||||
}
|
||||
if "bro_ssl" in [class] {
|
||||
mutate{
|
||||
#add_field = { "metaclass" => "dns"}
|
||||
rename => { "status_code" => "statuscode" }
|
||||
rename => { "status_message" => "statusmsg" }
|
||||
rename => { "resp_mime_types" => "rcvdmimetype" }
|
||||
rename => { "resp_fuids" => "rcvdfileid" }
|
||||
rename => { "response_body_len" => "rcvdbodybytes" }
|
||||
rename => { "request_body_len" => "sentbodybytes" }
|
||||
}
|
||||
}
|
||||
if "bro_weird" in [class] {
|
||||
mutate{
|
||||
#add_field = { "metaclass" => "dns"}
|
||||
rename => { "name" => "eventname" }
|
||||
}
|
||||
}
|
||||
if "bro_x509" in [class] {
|
||||
mutate{
|
||||
#add_field = { "metaclass" => "dns"}
|
||||
rename => { "certificate_common_name" => "certname" }
|
||||
rename => { "certificate_subject" => "certsubject" }
|
||||
rename => { "issuer_common_name" => "issuer" }
|
||||
rename => { "certificate_issuer" => "issuersubject" }
|
||||
rename => { "certificate_not_valid_before" => "issuetime" }
|
||||
rename => { "certificate_key_type" => "cert_type" }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output {
|
||||
if [type] =~ /^bro_conn|bro_dns|bro_http|bro_files|bro_ssl|bro_dhcp|bro_x509$/ {
|
||||
http {
|
||||
url => "https://helix-integrations.cloud.aws.apps.fireeye.com/api/upload"
|
||||
http_method => post
|
||||
http_compression => true
|
||||
socket_timeout => 60
|
||||
headers => ["Authorization","{{ HELIX_API_KEY }}"]
|
||||
format => json_batch
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,13 @@
|
||||
{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
|
||||
{% set nodetype = salt['grains.get']('role', '') %}
|
||||
|
||||
{% elif grains['role'] == 'so-helix' %}
|
||||
|
||||
{% set lsheap = salt['pillar.get']('master:lsheap', '') %}
|
||||
{% set freq = salt['pillar.get']('master:freq', '0') %}
|
||||
{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
|
||||
{% set nodetype = salt['grains.get']('role', '') %}
|
||||
|
||||
{% elif grains['role'] == 'so-eval' %}
|
||||
|
||||
{% set lsheap = salt['pillar.get']('master:lsheap', '') %}
|
||||
@@ -148,13 +155,13 @@ lslogdir:
|
||||
# Add the container
|
||||
so-logstashimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-logstash:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-logstash:HH1.1.1
|
||||
|
||||
so-logstash:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-logstashimage
|
||||
- image: soshybridhunter/so-logstash:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-logstash:HH1.1.1
|
||||
- hostname: so-logstash
|
||||
- name: so-logstash
|
||||
- user: logstash
|
||||
|
||||
48
salt/master/files/registry/scripts/so-docker-download.sh
Normal file
48
salt/master/files/registry/scripts/so-docker-download.sh
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
MASTER={{ MASTER }}
|
||||
VERSION="HH1.1.3"
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-core:$VERSION" \
|
||||
"so-cyberchef:$VERSION" \
|
||||
"so-acng:$VERSION" \
|
||||
"so-sensoroni:$VERSION" \
|
||||
"so-fleet:$VERSION" \
|
||||
"so-soctopus:$VERSION" \
|
||||
"so-steno:$VERSION" \
|
||||
"so-playbook:$VERSION" \
|
||||
"so-thehive-cortex:$VERSION" \
|
||||
"so-thehive:$VERSION" \
|
||||
"so-thehive-es:$VERSION" \
|
||||
"so-wazuh:$VERSION" \
|
||||
"so-kibana:$VERSION" \
|
||||
"so-auth-ui:$VERSION" \
|
||||
"so-auth-api:$VERSION" \
|
||||
"so-elastalert:$VERSION" \
|
||||
"so-navigator:$VERSION" \
|
||||
"so-filebeat:$VERSION" \
|
||||
"so-suricata:$VERSION" \
|
||||
"so-logstash:$VERSION" \
|
||||
"so-bro:$VERSION" \
|
||||
"so-idstools:$VERSION" \
|
||||
"so-fleet-launcher:$VERSION" \
|
||||
"so-freqserver:$VERSION" \
|
||||
"so-influxdb:$VERSION" \
|
||||
"so-grafana:$VERSION" \
|
||||
"so-telegraf:$VERSION" \
|
||||
"so-redis:$VERSION" \
|
||||
"so-mysql:$VERSION" \
|
||||
"so-curtor:$VERSION" \
|
||||
"so-elasticsearch:$VERSION" \
|
||||
"so-domainstats:$VERSION" \
|
||||
"so-tcpreplay:$VERSION" \
|
||||
)
|
||||
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
# Pull down the trusted docker image
|
||||
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
|
||||
# Tag it with the new registry destination
|
||||
docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
|
||||
docker push $MASTER:5000/soshybridhunter/$i
|
||||
done
|
||||
@@ -17,6 +17,15 @@
|
||||
|
||||
{% if masterproxy == 1 %}
|
||||
|
||||
socore_own_saltstack:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack
|
||||
- user: socore
|
||||
- group: socore
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
|
||||
# Create the directories for apt-cacher-ng
|
||||
aptcacherconfdir:
|
||||
file.directory:
|
||||
@@ -48,14 +57,14 @@ acngcopyconf:
|
||||
|
||||
so-acngimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-acng:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-acng:HH1.1.0
|
||||
|
||||
# Install the apt-cacher-ng container
|
||||
so-aptcacherng:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-acngimage
|
||||
- image: soshybridhunter/so-acng:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-acng:HH1.1.0
|
||||
- hostname: so-acng
|
||||
- port_bindings:
|
||||
- 0.0.0.0:3142:3142
|
||||
|
||||
26
salt/motd/files/package_update_reboot_required.jinja
Normal file
26
salt/motd/files/package_update_reboot_required.jinja
Normal file
@@ -0,0 +1,26 @@
|
||||
{% set needs_restarting_check = salt['mine.get']('*', 'needs_restarting.check', tgt_type='glob') -%}
|
||||
|
||||
{%- if needs_restarting_check %}
|
||||
{%- set minions_need_restarted = [] %}
|
||||
|
||||
{%- for minion, need_restarted in needs_restarting_check | dictsort() %}
|
||||
{%- if need_restarted == 'True' %}
|
||||
{% do minions_need_restarted.append(minion) %}
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
|
||||
{%- if minions_need_restarted | length > 0 %}
|
||||
****************************************************************************************************
|
||||
* The following nodes in your Security Onion grid may need to be restarted due to package updates. *
|
||||
* If the node has already been patched, restarted and been up for less than 15 minutes, then it *
|
||||
* may not have updated it's restart_needed status yet. This will cause it to be listed below, even *
|
||||
* if it has already been restarted. This feature will be improved in the future. *
|
||||
****************************************************************************************************
|
||||
|
||||
{% for minion in minions_need_restarted -%}
|
||||
{{ minion }}
|
||||
{% endfor -%}
|
||||
|
||||
{%- endif -%}
|
||||
|
||||
{%- endif -%}
|
||||
5
salt/motd/init.sls
Normal file
5
salt/motd/init.sls
Normal file
@@ -0,0 +1,5 @@
|
||||
package_update_reboot_required_motd:
|
||||
file.managed:
|
||||
- name: /etc/motd
|
||||
- source: salt://motd/files/package_update_reboot_required.jinja
|
||||
- template: jinja
|
||||
@@ -50,13 +50,13 @@ mysqldatadir:
|
||||
|
||||
so-mysqlimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-mysql:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-mysql:HH1.1.0
|
||||
|
||||
so-mysql:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-mysqlimage
|
||||
- image: soshybridhunter/so-mysql:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-mysql:HH1.1.0
|
||||
- hostname: so-mysql
|
||||
- user: socore
|
||||
- port_bindings:
|
||||
|
||||
5
salt/patch/needs_restarting.sls
Normal file
5
salt/patch/needs_restarting.sls
Normal file
@@ -0,0 +1,5 @@
|
||||
needs_restarting:
|
||||
module.run:
|
||||
- mine.send:
|
||||
- func: needs_restarting.check
|
||||
- order: last
|
||||
10
salt/patch/os/init.sls
Normal file
10
salt/patch/os/init.sls
Normal file
@@ -0,0 +1,10 @@
|
||||
include:
|
||||
{% if grains.os == "CentOS" %}
|
||||
- yum.packages
|
||||
{% endif %}
|
||||
- patch.needs_restarting
|
||||
|
||||
patch_os:
|
||||
pkg.uptodate:
|
||||
- name: patch_os
|
||||
- refresh: True
|
||||
76
salt/patch/os/schedule.sls
Normal file
76
salt/patch/os/schedule.sls
Normal file
@@ -0,0 +1,76 @@
|
||||
{% if salt['pillar.get']('patch:os:schedule_name') %}
|
||||
{% set patch_os_pillar = salt['pillar.get']('patch:os') %}
|
||||
{% set schedule_name = patch_os_pillar.schedule_name %}
|
||||
{% set splay = patch_os_pillar.get('splay', 300) %}
|
||||
|
||||
{% if schedule_name != 'manual' and schedule_name != 'auto' %}
|
||||
{% import_yaml "patch/os/schedules/"~schedule_name~".yml" as os_schedule %}
|
||||
|
||||
{% if patch_os_pillar.enabled %}
|
||||
|
||||
patch_os_schedule:
|
||||
schedule.present:
|
||||
- function: state.sls
|
||||
- job_args:
|
||||
- patch.os
|
||||
- when:
|
||||
{% for days in os_schedule.patch.os.schedule %}
|
||||
{% for day, times in days.items() %}
|
||||
{% for time in times %}
|
||||
- {{day}} {{time}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
- splay: {{splay}}
|
||||
- return_job: True
|
||||
|
||||
{% else %}
|
||||
|
||||
disable_patch_os_schedule:
|
||||
schedule.disabled:
|
||||
- name: patch_os_schedule
|
||||
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% elif schedule_name == 'auto' %}
|
||||
|
||||
{% if patch_os_pillar.enabled %}
|
||||
|
||||
patch_os_schedule:
|
||||
schedule.present:
|
||||
- function: state.sls
|
||||
- job_args:
|
||||
- patch.os
|
||||
- hours: 8
|
||||
- splay: {{splay}}
|
||||
- return_job: True
|
||||
|
||||
{% else %}
|
||||
|
||||
disable_patch_os_schedule:
|
||||
schedule.disabled:
|
||||
- name: patch_os_schedule
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% elif schedule_name == 'manual' %}
|
||||
|
||||
remove_patch_os_schedule:
|
||||
schedule.absent:
|
||||
- name: patch_os_schedule
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
no_patch_os_schedule_name_set:
|
||||
test.fail_without_changes:
|
||||
- name: "Set a pillar value for patch:os:schedule_name in this minion's .sls file. If an OS patch schedule is not listed as enabled in show_schedule output below, then OS patches will need to be applied manually until this is corrected."
|
||||
|
||||
show_patch_os_schedule:
|
||||
module.run:
|
||||
- schedule.is_enabled:
|
||||
- name: patch_os_schedule
|
||||
|
||||
{% endif %}
|
||||
10
salt/patch/os/schedules/example_schedule.yml
Normal file
10
salt/patch/os/schedules/example_schedule.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
patch:
|
||||
os:
|
||||
schedule:
|
||||
- Tuesday:
|
||||
- '15:00'
|
||||
- Thursday:
|
||||
- '03:00'
|
||||
- Saturday:
|
||||
- '01:00'
|
||||
- '15:00'
|
||||
@@ -96,13 +96,13 @@ stenolog:
|
||||
|
||||
so-stenoimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-steno:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-steno:HH1.1.3
|
||||
|
||||
so-steno:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-stenoimage
|
||||
- image: soshybridhunter/so-steno:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-steno:HH1.1.3
|
||||
- network_mode: host
|
||||
- privileged: True
|
||||
- port_bindings:
|
||||
|
||||
Binary file not shown.
@@ -11,9 +11,9 @@ playbookdb:
|
||||
|
||||
playbookwebhook:
|
||||
module.run:
|
||||
- name: sqlite3.modify
|
||||
- db: /opt/so/conf/playbook/redmine.db
|
||||
- sql: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
|
||||
- sqlite3.modify:
|
||||
- db: /opt/so/conf/playbook/redmine.db
|
||||
- sql: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
|
||||
|
||||
navigatorconfig:
|
||||
file.managed:
|
||||
@@ -26,13 +26,13 @@ navigatorconfig:
|
||||
|
||||
so-playbookimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-playbook:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-playbook:HH1.1.3
|
||||
|
||||
so-playbook:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-playbookimage
|
||||
- image: soshybridhunter/so-playbook:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-playbook:HH1.1.3
|
||||
- hostname: playbook
|
||||
- name: so-playbook
|
||||
- binds:
|
||||
@@ -42,13 +42,13 @@ so-playbook:
|
||||
|
||||
so-navigatorimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-navigator:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-navigator:HH1.1.1
|
||||
|
||||
so-navigator:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-navigatorimage
|
||||
- image: soshybridhunter/so-navigator:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-navigator:HH1.1.1
|
||||
- hostname: navigator
|
||||
- name: so-navigator
|
||||
- binds:
|
||||
@@ -56,3 +56,9 @@ so-navigator:
|
||||
- /opt/so/conf/playbook/nav_layer_playbook.json:/nav-app/src/assets/playbook.json:ro
|
||||
- port_bindings:
|
||||
- 0.0.0.0:4200:4200
|
||||
|
||||
/usr/sbin/so-playbook-sync:
|
||||
cron.present:
|
||||
- identifier: so-playbook-sync
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
|
||||
@@ -46,13 +46,13 @@ redisconfsync:
|
||||
|
||||
so-redisimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-redis:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-redis:HH1.1.0
|
||||
|
||||
so-redis:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-redisimage
|
||||
- image: soshybridhunter/so-redis:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-redis:HH1.1.0
|
||||
- hostname: so-redis
|
||||
- user: socore
|
||||
- port_bindings:
|
||||
|
||||
@@ -29,19 +29,19 @@ sensoronisync:
|
||||
|
||||
so-sensoroniimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-sensoroni:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-sensoroni:HH1.1.3
|
||||
|
||||
so-sensoroni:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-sensoroniimage
|
||||
- image: soshybridhunter/so-sensoroni:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-sensoroni:HH1.1.3
|
||||
- hostname: sensoroni
|
||||
- name: so-sensoroni
|
||||
- binds:
|
||||
- /nsm/sensoroni/jobs:/opt/sensoroni/jobs:rw
|
||||
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
|
||||
- /opt/so/log/sensoroni/:/opt/sensoroni/log/:rw
|
||||
- /opt/so/log/sensoroni/:/opt/sensoroni/logs/:rw
|
||||
- port_bindings:
|
||||
- 0.0.0.0:9822:9822
|
||||
- watch:
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
{%- set ip = salt['pillar.get']('static:masterip', '') %}
|
||||
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
|
||||
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
|
||||
|
||||
[es]
|
||||
es_url = http://{{ip}}:9200
|
||||
|
||||
[cortex]
|
||||
auto_analyze_alerts = no
|
||||
cortex_url = https://{{ip}}/cortex/
|
||||
cortex_key = {{ CORTEXKEY }}
|
||||
supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
|
||||
|
||||
[fir]
|
||||
fir_url = YOURFIRURL
|
||||
fir_token = YOURFIRTOKEN
|
||||
@@ -50,4 +57,4 @@ playbook_url = http://{{ip}}:3200/playbook
|
||||
playbook_key = a4a34538782804adfcb8dfae96262514ad70c37c
|
||||
|
||||
[log]
|
||||
logfile = /tmp/soctopus.log
|
||||
logfile = /var/log/SOCtopus/soctopus.log
|
||||
|
||||
@@ -1,23 +1,6 @@
|
||||
{% set es = salt['pillar.get']('static:masterip', '') %}
|
||||
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
|
||||
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
|
||||
es_host: {{es}}
|
||||
es_port: 9200
|
||||
name: Alert-Name
|
||||
type: frequency
|
||||
index: "*:logstash-*"
|
||||
num_events: 1
|
||||
timeframe:
|
||||
minutes: 10
|
||||
buffer_time:
|
||||
minutes: 10
|
||||
allow_buffer_time_overlap: true
|
||||
|
||||
filter:
|
||||
- query:
|
||||
query_string:
|
||||
query: 'select from test'
|
||||
|
||||
alert: modules.so.thehive.TheHiveAlerter
|
||||
|
||||
hive_connection:
|
||||
@@ -29,12 +12,12 @@ hive_proxies:
|
||||
https: ''
|
||||
|
||||
hive_alert_config:
|
||||
title: '{rule[name]}'
|
||||
type: 'external'
|
||||
title: '{rule[name]} - '
|
||||
type: 'playbook'
|
||||
source: 'SecurityOnion'
|
||||
description: '`Data:` {match[message]}'
|
||||
description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` <https://{{es}}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{match[_id]}'),sort:!('@timestamp',desc))> \n\n `Raw Data:` {match[message]}"
|
||||
severity: 2
|
||||
tags: ['elastalert', 'SecurityOnion']
|
||||
tags: ['playbook']
|
||||
tlp: 3
|
||||
status: 'New'
|
||||
follow: True
|
||||
|
||||
@@ -1,23 +1,6 @@
|
||||
{% set es = salt['pillar.get']('static:masterip', '') %}
|
||||
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
|
||||
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
|
||||
es_host: {{es}}
|
||||
es_port: 9200
|
||||
name: Alert-Name
|
||||
type: frequency
|
||||
index: "*:logstash-*"
|
||||
num_events: 1
|
||||
timeframe:
|
||||
minutes: 10
|
||||
buffer_time:
|
||||
minutes: 10
|
||||
allow_buffer_time_overlap: true
|
||||
|
||||
filter:
|
||||
- query:
|
||||
query_string:
|
||||
query: 'select from test'
|
||||
|
||||
alert: modules.so.thehive.TheHiveAlerter
|
||||
|
||||
hive_connection:
|
||||
@@ -28,20 +11,22 @@ hive_proxies:
|
||||
http: ''
|
||||
https: ''
|
||||
|
||||
hive_alert_config:
|
||||
title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}'
|
||||
type: 'external'
|
||||
source: 'SecurityOnion'
|
||||
description: '`Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}'
|
||||
severity: 2
|
||||
tags: ['elastalert', 'SecurityOnion']
|
||||
tlp: 3
|
||||
status: 'New'
|
||||
follow: True
|
||||
caseTemplate: '5000'
|
||||
|
||||
hive_observable_data_mapping:
|
||||
- ip: '{match[osquery][EndpointIP1]}'
|
||||
- ip: '{match[osquery][EndpointIP2]}'
|
||||
- other: '{match[osquery][hostIdentifier]}'
|
||||
- other: '{match[osquery][hostname]}'
|
||||
|
||||
hive_alert_config:
|
||||
title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}'
|
||||
type: 'osquery'
|
||||
source: 'SecurityOnion'
|
||||
description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` <https://{{es}}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{match[_id]}'),sort:!('@timestamp',desc))> \n\n `Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}"
|
||||
severity: 2
|
||||
tags: ['playbook','osquery']
|
||||
tlp: 3
|
||||
status: 'New'
|
||||
follow: True
|
||||
caseTemplate: '5000'
|
||||
|
||||
|
||||
|
||||
@@ -13,6 +13,12 @@ soctopussync:
|
||||
- group: 939
|
||||
- template: jinja
|
||||
|
||||
soctopuslogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/soctopus
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
playbookrulesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/rules/elastalert/playbook
|
||||
@@ -40,17 +46,18 @@ navigatordefaultlayer:
|
||||
|
||||
so-soctopusimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-soctopus:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-soctopus:HH1.1.3
|
||||
|
||||
so-soctopus:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-soctopusimage
|
||||
- image: soshybridhunter/so-soctopus:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-soctopus:HH1.1.3
|
||||
- hostname: soctopus
|
||||
- name: so-soctopus
|
||||
- binds:
|
||||
- /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro
|
||||
- /opt/so/log/soctopus/:/var/log/SOCtopus/:rw
|
||||
- /opt/so/rules/elastalert/playbook:/etc/playbook-rules:rw
|
||||
- /opt/so/conf/playbook/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
|
||||
- port_bindings:
|
||||
|
||||
@@ -1,15 +1,24 @@
|
||||
{% set master = salt['grains.get']('master') %}
|
||||
{% set master_minion_id = master.split(".")[0] %}
|
||||
{%- set masterip = salt['pillar.get']('static:masterip', '') -%}
|
||||
|
||||
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
|
||||
{% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %}
|
||||
{% set ca_server = grains.id %}
|
||||
{% else %}
|
||||
{% set trusttheca_text = salt['mine.get'](master_minion_id, 'x509.get_pem_entries')[master_minion_id]['/etc/pki/ca.crt']|replace('\n', '') %}
|
||||
{% set ca_server = master_minion_id %}
|
||||
{% endif %}
|
||||
|
||||
# Trust the CA
|
||||
|
||||
trusttheca:
|
||||
x509.pem_managed:
|
||||
- name: /etc/ssl/certs/intca.crt
|
||||
- text: {{ salt['mine.get'](master, 'x509.get_pem_entries')[master]['/etc/pki/ca.crt']|replace('\n', '') }}
|
||||
- text: {{ trusttheca_text }}
|
||||
|
||||
# Install packages needed for the sensor
|
||||
{% if grains['os'] != 'CentOS' %}
|
||||
# Install packages needed for the sensor
|
||||
m2cryptopkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: False
|
||||
@@ -20,29 +29,29 @@ m2cryptopkgs:
|
||||
# Create a cert for the talking to influxdb
|
||||
/etc/pki/influxdb.crt:
|
||||
x509.certificate_managed:
|
||||
- ca_server: {{ master }}
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: influxdb
|
||||
- public_key: /etc/pki/influxdb.key
|
||||
- CN: {{ master }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 3650
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- managed_private_key:
|
||||
name: /etc/pki/influxdb.key
|
||||
bits: 4096
|
||||
backup: True
|
||||
|
||||
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
|
||||
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix' %}
|
||||
|
||||
# Request a cert and drop it where it needs to go to be distributed
|
||||
/etc/pki/filebeat.crt:
|
||||
x509.certificate_managed:
|
||||
- ca_server: {{ master }}
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: filebeat
|
||||
- public_key: /etc/pki/filebeat.key
|
||||
- CN: {{ master }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 3650
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- managed_private_key:
|
||||
name: /etc/pki/filebeat.key
|
||||
@@ -70,12 +79,12 @@ fbcrtlink:
|
||||
# Create a cert for the docker registry
|
||||
/etc/pki/registry.crt:
|
||||
x509.certificate_managed:
|
||||
- ca_server: {{ master }}
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: registry
|
||||
- public_key: /etc/pki/registry.key
|
||||
- CN: {{ master }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 3650
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- managed_private_key:
|
||||
name: /etc/pki/registry.key
|
||||
@@ -85,12 +94,12 @@ fbcrtlink:
|
||||
# Create a cert for the reverse proxy
|
||||
/etc/pki/masterssl.crt:
|
||||
x509.certificate_managed:
|
||||
- ca_server: {{ master }}
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: masterssl
|
||||
- public_key: /etc/pki/masterssl.key
|
||||
- CN: {{ master }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 3650
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- managed_private_key:
|
||||
name: /etc/pki/masterssl.key
|
||||
@@ -103,7 +112,7 @@ fbcrtlink:
|
||||
- CN: {{ master }}
|
||||
- bits: 4096
|
||||
- days_remaining: 0
|
||||
- days_valid: 3650
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
|
||||
/etc/pki/fleet.crt:
|
||||
@@ -112,7 +121,7 @@ fbcrtlink:
|
||||
- CN: {{ master }}
|
||||
- subjectAltName: DNS:{{ master }},IP:{{ masterip }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 3650
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- managed_private_key:
|
||||
name: /etc/pki/fleet.key
|
||||
@@ -120,7 +129,7 @@ fbcrtlink:
|
||||
backup: True
|
||||
|
||||
{% endif %}
|
||||
{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-node' or grains['role'] == 'so-eval' %}
|
||||
{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-node' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix' %}
|
||||
|
||||
fbcertdir:
|
||||
file.directory:
|
||||
@@ -130,12 +139,12 @@ fbcertdir:
|
||||
# Request a cert and drop it where it needs to go to be distributed
|
||||
/opt/so/conf/filebeat/etc/pki/filebeat.crt:
|
||||
x509.certificate_managed:
|
||||
- ca_server: {{ master }}
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: filebeat
|
||||
- public_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
|
||||
- CN: {{ master }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 3650
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- managed_private_key:
|
||||
name: /opt/so/conf/filebeat/etc/pki/filebeat.key
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
|
||||
{%- if grains['role'] == 'so-eval' %}
|
||||
{%- set MTU = 1500 %}
|
||||
{%- elif grains['role'] == 'so-helix' %}
|
||||
{%- set MTU = 9000 %}
|
||||
{%- else %}
|
||||
{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
|
||||
{%- endif %}
|
||||
|
||||
@@ -72,13 +72,13 @@ suriconfigsync:
|
||||
|
||||
so-suricataimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-suricata:HH1.1.1
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-suricata:HH1.1.1
|
||||
|
||||
so-suricata:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-suricataimage
|
||||
- image: soshybridhunter/so-suricata:HH1.1.1
|
||||
- image: docker.io/soshybridhunter/so-suricata:HH1.1.1
|
||||
- privileged: True
|
||||
- environment:
|
||||
- INTERFACE={{ interface }}
|
||||
|
||||
18
salt/tcpreplay/init.sls
Normal file
18
salt/tcpreplay/init.sls
Normal file
@@ -0,0 +1,18 @@
|
||||
{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-eval' %}
|
||||
|
||||
so-tcpreplayimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-tcpreplay:HH1.1.4
|
||||
|
||||
so-tcpreplay:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-tcpreplay
|
||||
- network_mode: "host"
|
||||
- image: docker.io/soshybridhunter/so-tcpreplay:HH1.1.4
|
||||
- name: so-tcpreplay
|
||||
- user: root
|
||||
- interactive: True
|
||||
- tty: True
|
||||
|
||||
{% endif %}
|
||||
22
salt/top.sls
22
salt/top.sls
@@ -5,6 +5,24 @@
|
||||
{%- set THEHIVE = salt['pillar.get']('master:thehive', '0') -%}
|
||||
{%- set PLAYBOOK = salt['pillar.get']('master:playbook', '0') -%}
|
||||
base:
|
||||
'*':
|
||||
- patch.os.schedule
|
||||
- motd
|
||||
|
||||
'G@role:so-helix':
|
||||
- ca
|
||||
- ssl
|
||||
- common
|
||||
- firewall
|
||||
- idstools
|
||||
- pcap
|
||||
- suricata
|
||||
- bro
|
||||
- redis
|
||||
- logstash
|
||||
- filebeat
|
||||
- schedule
|
||||
|
||||
'G@role:so-sensor':
|
||||
- ca
|
||||
- ssl
|
||||
@@ -40,6 +58,7 @@ base:
|
||||
- suricata
|
||||
- bro
|
||||
- curator
|
||||
- cyberchef
|
||||
- elastalert
|
||||
{%- if OSQUERY != 0 %}
|
||||
- fleet
|
||||
@@ -66,6 +85,7 @@ base:
|
||||
- ca
|
||||
- ssl
|
||||
- common
|
||||
- cyberchef
|
||||
- sensoroni
|
||||
- firewall
|
||||
- master
|
||||
@@ -95,7 +115,7 @@ base:
|
||||
{%- if PLAYBOOK != 0 %}
|
||||
- playbook
|
||||
{%- endif %}
|
||||
|
||||
|
||||
|
||||
# Storage node logic
|
||||
|
||||
|
||||
@@ -31,6 +31,6 @@ echo "Applying cross cluster search config..."
|
||||
|
||||
# Add all the storage nodes to cross cluster searching.
|
||||
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}'
|
||||
{%- endfor %}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
|
||||
{%- set MASTER = grains['master'] %}
|
||||
# Wait for ElasticSearch to come up, so that we can query for version infromation
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
@@ -27,4 +26,4 @@ fi
|
||||
echo "Applying cross cluster search config..."
|
||||
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MASTER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
|
||||
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ grains.id }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
|
||||
|
||||
@@ -5,7 +5,7 @@ ossecgroup:
|
||||
group.present:
|
||||
- name: ossec
|
||||
- gid: 945
|
||||
|
||||
|
||||
# Add ossecm user
|
||||
ossecm:
|
||||
user.present:
|
||||
@@ -41,7 +41,7 @@ wazuhpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: False
|
||||
- pkgs:
|
||||
- wazuh-agent
|
||||
- wazuh-agent: 3.10.2-1
|
||||
|
||||
# Add Wazuh agent conf
|
||||
wazuhagentconf:
|
||||
@@ -64,13 +64,13 @@ wazuhagentregister:
|
||||
|
||||
so-wazuhimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false soshybridhunter/so-wazuh:HH1.1.0
|
||||
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-wazuh:HH1.1.3
|
||||
|
||||
so-wazuh:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-wazuhimage
|
||||
- image: soshybridhunter/so-wazuh:HH1.1.0
|
||||
- image: docker.io/soshybridhunter/so-wazuh:HH1.1.3
|
||||
- hostname: {{HOSTNAME}}-wazuh-manager
|
||||
- name: so-wazuh
|
||||
- detach: True
|
||||
|
||||
3
salt/yum/packages.sls
Normal file
3
salt/yum/packages.sls
Normal file
@@ -0,0 +1,3 @@
|
||||
install_yum_utils:
|
||||
pkg.installed:
|
||||
- name: yum-utils
|
||||
1304
setup/functions.sh
Normal file
1304
setup/functions.sh
Normal file
File diff suppressed because it is too large
Load Diff
@@ -6,4 +6,4 @@ if [ "$NM_DISPATCHER_ACTION" == "pre-up" ]; then
|
||||
ethtool -K $DEVICE_IFACE $i off;
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
770
setup/so-setup.sh
Normal file
770
setup/so-setup.sh
Normal file
@@ -0,0 +1,770 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Source the other pieces of the setup
|
||||
SCRIPTDIR=$(dirname "$0")
|
||||
source $SCRIPTDIR/functions.sh
|
||||
source $SCRIPTDIR/whiptail.sh
|
||||
|
||||
# See if this is an ISO install
|
||||
OPTIONS=$1
|
||||
|
||||
if [[ $OPTIONS = 'iso' ]]; then
|
||||
INSTALLMETHOD="iso"
|
||||
else
|
||||
INSTALLMETHOD="network"
|
||||
fi
|
||||
|
||||
# Global Variables
|
||||
HOSTNAME=$(cat /etc/hostname)
|
||||
MINION_ID=$(echo $HOSTNAME | awk -F. {'print $1'})
|
||||
TOTAL_MEM=`grep MemTotal /proc/meminfo | awk '{print $2}' | sed -r 's/.{3}$//'`
|
||||
NICS=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
|
||||
CPUCORES=$(cat /proc/cpuinfo | grep processor | wc -l)
|
||||
LISTCORES=$(cat /proc/cpuinfo | grep processor | awk '{print $3 " \"" "core" "\""}')
|
||||
RANDOMUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1)
|
||||
NODE_ES_PORT="9200"
|
||||
SETUPLOG="/root/sosetup.log"
|
||||
# End Global Variables
|
||||
|
||||
# Reset the Install Log
|
||||
date -u >$SETUPLOG 2>&1
|
||||
echo "stty size is: $(stty size)" >> $SETUPLOG 2>&1
|
||||
|
||||
# Check for prerequisites
|
||||
got_root
|
||||
detect_os
|
||||
|
||||
if [ $OS == ubuntu ]; then
|
||||
# Override the horrible Ubuntu whiptail color pallete
|
||||
update-alternatives --set newt-palette /etc/newt/palette.original
|
||||
fi
|
||||
|
||||
# Question Time
|
||||
echo "Asking user if they are sure they want to proceed" >> $SETUPLOG 2>&1
|
||||
if (whiptail_you_sure) ; then
|
||||
|
||||
# Create a temp dir to get started
|
||||
install_prep
|
||||
setterm -blank 0
|
||||
|
||||
if [ $INSTALLMETHOD == network ]; then
|
||||
# Let folks know they need their management interface already set up.
|
||||
whiptail_network_notice
|
||||
|
||||
# Set the hostname to reduce errors
|
||||
whiptail_set_hostname
|
||||
|
||||
# Set management nic
|
||||
whiptail_management_nic
|
||||
|
||||
# whiptail_create_socore_user
|
||||
# SCMATCH=no
|
||||
# while [ $SCMATCH != yes ]; do
|
||||
# whiptail_create_socore_user_password1
|
||||
# whiptail_create_socore_user_password2
|
||||
# check_socore_pass
|
||||
# done
|
||||
|
||||
else
|
||||
|
||||
# Set the hostname
|
||||
whiptail_set_hostname
|
||||
whiptail_management_nic
|
||||
|
||||
# Ask if you want dhcp or static
|
||||
whiptail_dhcp_or_static
|
||||
|
||||
# Do this if it static is selected
|
||||
if [ $ADDRESSTYPE != 'DHCP' ]; then
|
||||
whiptail_management_interface_ip
|
||||
whiptail_management_interface_mask
|
||||
whiptail_management_interface_gateway
|
||||
whiptail_management_interface_dns
|
||||
whiptail_management_interface_dns_search
|
||||
fi
|
||||
|
||||
# Go ahead and bring up networking so other parts of the install work
|
||||
set_hostname_iso
|
||||
set_management_interface
|
||||
|
||||
# Add an admin user
|
||||
whiptail_create_admin_user
|
||||
|
||||
# Get a password for the admin user
|
||||
APMATCH=no
|
||||
while [ $APMATCH != yes ]; do
|
||||
whiptail_create_admin_user_password1
|
||||
whiptail_create_admin_user_password2
|
||||
check_admin_pass
|
||||
done
|
||||
|
||||
fi
|
||||
|
||||
# Go ahead and gen the keys so we can use them for any sensor type - Disabled for now
|
||||
#minio_generate_keys
|
||||
|
||||
# What kind of install are we doing?
|
||||
whiptail_install_type
|
||||
|
||||
# How do we want to handle OS patching? manual, auto or scheduled days and hours
|
||||
whiptail_patch_schedule
|
||||
case $PATCHSCHEDULE in
|
||||
'New Schedule')
|
||||
whiptail_patch_schedule_select_days
|
||||
whiptail_patch_schedule_select_hours
|
||||
whiptail_patch_name_new_schedule
|
||||
patch_schedule_os_new
|
||||
;;
|
||||
'Import Schedule')
|
||||
whiptail_patch_schedule_import
|
||||
;;
|
||||
Automatic)
|
||||
PATCHSCHEDULENAME=auto
|
||||
;;
|
||||
Manual)
|
||||
PATCHSCHEDULENAME=manual
|
||||
;;
|
||||
esac
|
||||
|
||||
####################
|
||||
## Helix ##
|
||||
####################
|
||||
if [ $INSTALLTYPE == 'HELIXSENSOR' ]; then
|
||||
MASTERUPDATES=OPEN
|
||||
filter_unused_nics
|
||||
whiptail_bond_nics
|
||||
whiptail_helix_apikey
|
||||
whiptail_homenet_master
|
||||
RULESETUP=ETOPEN
|
||||
NSMSETUP=BASIC
|
||||
HNSENSOR=inherit
|
||||
LS_HEAP_SIZE="1000m"
|
||||
calculate_useable_cores
|
||||
whiptail_make_changes
|
||||
set_hostname
|
||||
clear_master
|
||||
mkdir -p /nsm
|
||||
get_filesystem_root
|
||||
get_filesystem_nsm
|
||||
get_main_ip
|
||||
if [ $INSTALLMETHOD == iso ]; then
|
||||
add_admin_user
|
||||
disable_onion_user
|
||||
fi
|
||||
#add_socore_user_master
|
||||
# Install salt and dependencies
|
||||
{
|
||||
sleep 0.5
|
||||
echo -e "XXX\n0\nCreating Bond Interface... \nXXX"
|
||||
create_sensor_bond >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n1\nGenerating Sensor Pillar... \nXXX"
|
||||
sensor_pillar >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n2\nInstalling and configuring Salt... \nXXX"
|
||||
echo " ** Installing Salt and Dependencies **" >> $SETUPLOG
|
||||
saltify >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n5\nInstalling Docker... \nXXX"
|
||||
docker_install >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n10\nConfiguring Salt Master... \nXXX"
|
||||
echo " ** Configuring Minion **" >> $SETUPLOG
|
||||
configure_minion helix >> $SETUPLOG 2>&1
|
||||
echo " ** Installing Salt Master **" >> $SETUPLOG
|
||||
install_master >> $SETUPLOG 2>&1
|
||||
salt_master_directories >> $SETUPLOG 2>&1
|
||||
update_sudoers >> $SETUPLOG 2>&1
|
||||
chown_salt_master >> $SETUPLOG 2>&1
|
||||
es_heapsize >> $SETUPLOG 2>&1
|
||||
ls_heapsize >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n25\nConfiguring Default Pillars... \nXXX"
|
||||
master_static >> $SETUPLOG 2>&1
|
||||
echo "** Generating the master pillar **" >> $SETUPLOG
|
||||
master_pillar >> $SETUPLOG 2>&1
|
||||
echo "** Generating the patch pillar **" >> $SETUPLOG
|
||||
patch_pillar >> $SETUPLOG 2>&1
|
||||
echo "** Generating the FireEye pillar **" >> $SETUPLOG
|
||||
fireeye_pillar >> $SETUPLOG 2>&1
|
||||
sensor_pillar >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
|
||||
copy_minion_tmp_files >> $SETUPLOG 2>&1
|
||||
# Do a checkin to push the key up
|
||||
echo "** Pushing the key up to Master **" >> $SETUPLOG
|
||||
salt_firstcheckin >> $SETUPLOG 2>&1
|
||||
# Accept the Master Key
|
||||
echo "** Accepting the key on the master **" >> $SETUPLOG
|
||||
accept_salt_key_local >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n35\nConfiguring Firewall... \nXXX"
|
||||
# Open the firewall
|
||||
echo "** Setting the initial firewall policy **" >> $SETUPLOG
|
||||
set_initial_firewall_policy >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n40\nGenerating CA... \nXXX"
|
||||
salt_checkin >> $SETUPLOG 2>&1
|
||||
salt-call state.apply ca >> $SETUPLOG 2>&1
|
||||
salt-call state.apply ssl >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n43\nInstalling Common Components... \nXXX"
|
||||
salt-call state.apply common >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n45\nApplying firewall rules... \nXXX"
|
||||
salt-call state.apply firewall >> $SETUPLOG 2>&1
|
||||
salt-call state.apply master >> $SETUPLOG 2>&1
|
||||
salt-call state.apply idstools >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n40\nInstalling Redis... \nXXX"
|
||||
salt-call state.apply redis >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n60\nInstalling Redis... \nXXX"
|
||||
salt-call state.apply logstash >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n75\nInstalling Filebeat... \nXXX"
|
||||
salt-call state.apply filebeat >> $SETUPLOG 2>&1
|
||||
salt-call state.apply utility >> $SETUPLOG 2>&1
|
||||
salt-call state.apply schedule >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n85\nEnabling Checking at Boot... \nXXX"
|
||||
checkin_at_boot >> $SETUPLOG 2>&1
|
||||
echo -e "XX\n97\nFinishing touches... \nXXX"
|
||||
filter_unused_nics >> $SETUPLOG 2>&1
|
||||
network_setup >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n98\nVerifying Setup... \nXXX"
|
||||
salt-call state.highstate >> $SETUPLOG 2>&1
|
||||
} |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
|
||||
GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
|
||||
if [[ $GOODSETUP == '0' ]]; then
|
||||
whiptail_setup_complete
|
||||
shutdown -r now
|
||||
else
|
||||
whiptail_setup_failed
|
||||
shutdown -r now
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
####################
|
||||
## Master ##
|
||||
####################
|
||||
if [ $INSTALLTYPE == 'MASTERONLY' ]; then
|
||||
|
||||
# Would you like to do an advanced install?
|
||||
whiptail_master_adv
|
||||
|
||||
# Choose Zeek or Community NSM
|
||||
whiptail_bro_version
|
||||
|
||||
# Select Snort or Suricata
|
||||
whiptail_nids
|
||||
|
||||
# Snag the HOME_NET
|
||||
whiptail_homenet_master
|
||||
|
||||
# Pick your Ruleset
|
||||
whiptail_rule_setup
|
||||
|
||||
# Get the code if it isn't ET Open
|
||||
if [ $RULESETUP != 'ETOPEN' ]; then
|
||||
# Get the code
|
||||
whiptail_oinkcode
|
||||
fi
|
||||
|
||||
# Find out how to handle updates
|
||||
whiptail_master_updates
|
||||
whiptail_enable_components
|
||||
process_components
|
||||
|
||||
# Do Advacned Setup if they chose it
|
||||
if [ $MASTERADV == 'ADVANCED' ]; then
|
||||
# Ask which bro logs to enable - Need to add Suricata check
|
||||
if [ $BROVERSION != 'SURICATA' ]; then
|
||||
whiptail_master_adv_service_brologs
|
||||
fi
|
||||
fi
|
||||
|
||||
# Get a password for the socore user
|
||||
whiptail_create_socore_user
|
||||
SCMATCH=no
|
||||
while [ $SCMATCH != yes ]; do
|
||||
whiptail_create_socore_user_password1
|
||||
whiptail_create_socore_user_password2
|
||||
check_socore_pass
|
||||
done
|
||||
|
||||
# Last Chance to back out
|
||||
whiptail_make_changes
|
||||
set_hostname
|
||||
generate_passwords
|
||||
auth_pillar
|
||||
clear_master
|
||||
mkdir -p /nsm
|
||||
get_filesystem_root
|
||||
get_filesystem_nsm
|
||||
# Enable Bro Logs
|
||||
# comment this out since we already copy this file to the destination that this function writes to
|
||||
#bro_logs_enabled
|
||||
|
||||
# Figure out the main IP address
|
||||
get_main_ip
|
||||
if [ $INSTALLMETHOD == iso ]; then
|
||||
add_admin_user
|
||||
disable_onion_user
|
||||
fi
|
||||
|
||||
# Add the user so we can sit back and relax
|
||||
#echo ""
|
||||
#echo "**** Please set a password for socore. You will use this password when setting up other Nodes/Sensors"
|
||||
#echo ""
|
||||
add_socore_user_master
|
||||
|
||||
# Install salt and dependencies
|
||||
{
|
||||
sleep 0.5
|
||||
echo -e "XXX\n1\nInstalling and configuring Salt... \nXXX"
|
||||
echo " ** Installing Salt and Dependencies **" >> $SETUPLOG
|
||||
saltify >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n5\nInstalling Docker... \nXXX"
|
||||
docker_install >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n10\nConfiguring Salt Master... \nXXX"
|
||||
echo " ** Configuring Minion **" >> $SETUPLOG
|
||||
configure_minion master >> $SETUPLOG 2>&1
|
||||
echo " ** Installing Salt Master **" >> $SETUPLOG
|
||||
install_master >> $SETUPLOG 2>&1
|
||||
salt_install_mysql_deps >> $SETUPLOG 2>&1
|
||||
salt_master_directories >> $SETUPLOG 2>&1
|
||||
update_sudoers >> $SETUPLOG 2>&1
|
||||
chown_salt_master >> $SETUPLOG 2>&1
|
||||
es_heapsize >> $SETUPLOG 2>&1
|
||||
ls_heapsize >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n25\nConfiguring Default Pillars... \nXXX"
|
||||
master_static >> $SETUPLOG 2>&1
|
||||
echo "** Generating the master pillar **" >> $SETUPLOG
|
||||
master_pillar >> $SETUPLOG 2>&1
|
||||
echo "** Generating the patch pillar **" >> $SETUPLOG
|
||||
patch_pillar >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n30\nAccepting Salt Keys... \nXXX"
|
||||
echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
|
||||
copy_minion_tmp_files >> $SETUPLOG 2>&1
|
||||
# Do a checkin to push the key up
|
||||
echo "** Pushing the key up to Master **" >> $SETUPLOG
|
||||
salt_firstcheckin >> $SETUPLOG 2>&1
|
||||
# Accept the Master Key
|
||||
echo "** Accepting the key on the master **" >> $SETUPLOG
|
||||
accept_salt_key_local >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n35\nConfiguring Firewall... \nXXX"
|
||||
# Open the firewall
|
||||
echo "** Setting the initial firewall policy **" >> $SETUPLOG
|
||||
set_initial_firewall_policy >> $SETUPLOG 2>&1
|
||||
# Do the big checkin but first let them know it will take a bit.
|
||||
echo -e "XXX\n40\nGenerating CA... \nXXX"
|
||||
salt_checkin >> $SETUPLOG 2>&1
|
||||
salt-call state.apply ca >> $SETUPLOG 2>&1
|
||||
salt-call state.apply ssl >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n43\nInstalling Common Components... \nXXX"
|
||||
salt-call state.apply common >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n45\nApplying firewall rules... \nXXX"
|
||||
salt-call state.apply firewall >> $SETUPLOG 2>&1
|
||||
salt-call state.apply master >> $SETUPLOG 2>&1
|
||||
salt-call state.apply idstools >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n40\nInstalling Redis... \nXXX"
|
||||
salt-call state.apply redis >> $SETUPLOG 2>&1
|
||||
if [[ $OSQUERY == '1' ]]; then
|
||||
echo -e "XXX\n41\nInstalling MySQL... \nXXX"
|
||||
salt-call state.apply mysql >> $SETUPLOG 2>&1
|
||||
fi
|
||||
echo -e "XXX\n45\nInstalling Elastic Components... \nXXX"
|
||||
salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
|
||||
salt-call state.apply logstash >> $SETUPLOG 2>&1
|
||||
salt-call state.apply kibana >> $SETUPLOG 2>&1
|
||||
salt-call state.apply elastalert >> $SETUPLOG 2>&1
|
||||
if [[ $WAZUH == '1' ]]; then
|
||||
echo -e "XXX\n68\nInstalling Wazuh... \nXXX"
|
||||
salt-call state.apply wazuh >> $SETUPLOG 2>&1
|
||||
fi
|
||||
echo -e "XXX\n75\nInstalling Filebeat... \nXXX"
|
||||
salt-call state.apply filebeat >> $SETUPLOG 2>&1
|
||||
salt-call state.apply utility >> $SETUPLOG 2>&1
|
||||
salt-call state.apply schedule >> $SETUPLOG 2>&1
|
||||
if [[ $OSQUERY == '1' ]]; then
|
||||
echo -e "XXX\n79\nInstalling Fleet... \nXXX"
|
||||
salt-call state.apply fleet >> $SETUPLOG 2>&1
|
||||
salt-call state.apply launcher >> $SETUPLOG 2>&1
|
||||
fi
|
||||
echo -e "XXX\n85\nConfiguring SOctopus... \nXXX"
|
||||
salt-call state.apply soctopus >> $SETUPLOG 2>&1
|
||||
if [[ $THEHIVE == '1' ]]; then
|
||||
echo -e "XXX\n87\nInstalling TheHive... \nXXX"
|
||||
salt-call state.apply hive >> $SETUPLOG 2>&1
|
||||
fi
|
||||
if [[ $PLAYBOOK == '1' ]]; then
|
||||
echo -e "XXX\n89\nInstalling Playbook... \nXXX"
|
||||
salt-call state.apply playbook >> $SETUPLOG 2>&1
|
||||
fi
|
||||
echo -e "XXX\n75\nEnabling Checking at Boot... \nXXX"
|
||||
checkin_at_boot >> $SETUPLOG 2>&1
|
||||
echo -e "XX\n97\nFinishing touches... \nXXX"
|
||||
filter_unused_nics >> $SETUPLOG 2>&1
|
||||
network_setup >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n98\nVerifying Setup... \nXXX"
|
||||
salt-call state.highstate >> $SETUPLOG 2>&1
|
||||
} |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
|
||||
GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
|
||||
if [[ $GOODSETUP == '0' ]]; then
|
||||
whiptail_setup_complete
|
||||
if [[ $THEHIVE == '1' ]]; then
|
||||
check_hive_init_then_reboot
|
||||
else
|
||||
shutdown -r now
|
||||
fi
|
||||
else
|
||||
whiptail_setup_failed
|
||||
shutdown -r now
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
####################
|
||||
## Sensor ##
|
||||
####################
|
||||
|
||||
if [ $INSTALLTYPE == 'SENSORONLY' ]; then
|
||||
filter_unused_nics
|
||||
whiptail_bond_nics
|
||||
whiptail_management_server
|
||||
whiptail_master_updates
|
||||
set_updates
|
||||
whiptail_homenet_sensor
|
||||
whiptail_sensor_config
|
||||
# Calculate lbprocs so we can call it in the prompts
|
||||
calculate_useable_cores
|
||||
if [ $NSMSETUP == 'ADVANCED' ]; then
|
||||
whiptail_bro_pins
|
||||
whiptail_suricata_pins
|
||||
whiptail_bond_nics_mtu
|
||||
else
|
||||
whiptail_basic_bro
|
||||
whiptail_basic_suri
|
||||
fi
|
||||
whiptail_make_changes
|
||||
set_hostname
|
||||
clear_master
|
||||
mkdir -p /nsm
|
||||
get_filesystem_root
|
||||
get_filesystem_nsm
|
||||
if [ $INSTALLMETHOD == iso ]; then
|
||||
add_admin_user
|
||||
disable_onion_user
|
||||
fi
|
||||
copy_ssh_key >> $SETUPLOG 2>&1
|
||||
{
|
||||
sleep 0.5
|
||||
echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
|
||||
set_initial_firewall_policy >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n1\nInstalling pip3... \nXXX"
|
||||
echo -e "XXX\n3\nCreating Bond Interface... \nXXX"
|
||||
create_sensor_bond >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n4\nGenerating Sensor Pillar... \nXXX"
|
||||
sensor_pillar >> $SETUPLOG 2>&1
|
||||
echo "** Generating the patch pillar **" >> $SETUPLOG
|
||||
patch_pillar >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n5\nInstalling Salt Components... \nXXX"
|
||||
saltify >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n20\nInstalling Docker... \nXXX"
|
||||
docker_install >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n22\nConfiguring Salt Minion... \nXXX"
|
||||
configure_minion sensor >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
|
||||
copy_minion_tmp_files >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n25\nSending Salt Key to Master... \nXXX"
|
||||
salt_firstcheckin >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n26\nTelling the Master to Accept Key... \nXXX"
|
||||
# Accept the Salt Key
|
||||
accept_salt_key_remote >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n27\nApplying SSL Certificates... \nXXX"
|
||||
salt-call state.apply ca >> $SETUPLOG 2>&1
|
||||
salt-call state.apply ssl >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n35\nInstalling Core Components... \nXXX"
|
||||
salt-call state.apply common >> $SETUPLOG 2>&1
|
||||
salt-call state.apply firewall >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n50\nInstalling PCAP... \nXXX"
|
||||
salt-call state.apply pcap >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n60\nInstalling IDS components... \nXXX"
|
||||
salt-call state.apply suricata >> $SETUPLOG 2>&1
|
||||
checkin_at_boot >> $SETUPLOG 2>&1
|
||||
echo -e "XX\n97\nFinishing touches... \nXXX"
|
||||
filter_unused_nics >> $SETUPLOG 2>&1
|
||||
network_setup >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n98\nVerifying Setup... \nXXX"
|
||||
salt-call state.highstate >> $SETUPLOG 2>&1
|
||||
} |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
|
||||
GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
|
||||
if [[ $GOODSETUP == '0' ]]; then
|
||||
whiptail_setup_complete
|
||||
shutdown -r now
|
||||
else
|
||||
whiptail_setup_failed
|
||||
shutdown -r now
|
||||
fi
|
||||
fi
|
||||
|
||||
#######################
|
||||
## Eval Mode ##
|
||||
#######################
|
||||
|
||||
if [ $INSTALLTYPE == 'EVALMODE' ]; then
|
||||
|
||||
# Filter out the management NIC
|
||||
filter_unused_nics
|
||||
|
||||
# Select which NICs are in the bond
|
||||
whiptail_bond_nics
|
||||
|
||||
# Snag the HOME_NET
|
||||
whiptail_homenet_master
|
||||
whiptail_eval_adv_warning
|
||||
whiptail_enable_components
|
||||
|
||||
# Set a bunch of stuff since this is eval
|
||||
es_heapsize
|
||||
ls_heapsize
|
||||
NODE_ES_HEAP_SIZE="600m"
|
||||
NODE_LS_HEAP_SIZE="500m"
|
||||
LSPIPELINEWORKERS=1
|
||||
LSPIPELINEBATCH=125
|
||||
LSINPUTTHREADS=1
|
||||
LSINPUTBATCHCOUNT=125
|
||||
RULESETUP=ETOPEN
|
||||
NSMSETUP=BASIC
|
||||
NIDS=Suricata
|
||||
BROVERSION=ZEEK
|
||||
CURCLOSEDAYS=30
|
||||
process_components
|
||||
whiptail_make_changes
|
||||
set_hostname
|
||||
generate_passwords
|
||||
auth_pillar
|
||||
clear_master
|
||||
mkdir -p /nsm
|
||||
get_filesystem_root
|
||||
get_filesystem_nsm
|
||||
get_log_size_limit
|
||||
get_main_ip
|
||||
if [ $INSTALLMETHOD == iso ]; then
|
||||
add_admin_user
|
||||
disable_onion_user
|
||||
fi
|
||||
# Add the user so we can sit back and relax
|
||||
add_socore_user_master
|
||||
{
|
||||
sleep 0.5
|
||||
echo -e "XXX\n0\nCreating Bond Interface... \nXXX"
|
||||
create_sensor_bond >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n1\nInstalling Python 3... \nXXX"
|
||||
echo -e "XXX\n2\nInstalling saltstack... \nXXX"
|
||||
saltify >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n3\nInstalling docker... \nXXX"
|
||||
docker_install >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n5\nInstalling master code... \nXXX"
|
||||
install_master >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n5\nInstalling mysql dependencies for saltstack... \nXXX"
|
||||
salt_install_mysql_deps >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n6\nCopying salt code... \nXXX"
|
||||
salt_master_directories >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n6\nupdating suduers... \nXXX"
|
||||
update_sudoers >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n7\nFixing some permissions... \nXXX"
|
||||
chown_salt_master >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n7\nCreating the static pillar... \nXXX"
|
||||
# Set the static values
|
||||
master_static >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n7\nCreating the master pillar... \nXXX"
|
||||
master_pillar >> $SETUPLOG 2>&1
|
||||
echo "** Generating the patch pillar **" >> $SETUPLOG
|
||||
patch_pillar >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n7\nConfiguring minion... \nXXX"
|
||||
configure_minion eval >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n7\nSetting the node type to eval... \nXXX"
|
||||
set_node_type >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n7\nStorage node pillar... \nXXX"
|
||||
node_pillar >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n8\nCreating firewall policies... \nXXX"
|
||||
set_initial_firewall_policy >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
|
||||
copy_minion_tmp_files >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n10\nRegistering agent... \nXXX"
|
||||
salt_firstcheckin >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n11\nAccepting Agent... \nXXX"
|
||||
accept_salt_key_local >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n12\nRunning the SSL states... \nXXX"
|
||||
salt_checkin >> $SETUPLOG 2>&1
|
||||
salt-call state.apply ca >> $SETUPLOG 2>&1
|
||||
salt-call state.apply ssl >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n15\nInstalling core components... \nXXX"
|
||||
salt-call state.apply common >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n18\nInitializing firewall rules... \nXXX"
|
||||
salt-call state.apply firewall >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n25\nInstalling master components... \nXXX"
|
||||
salt-call state.apply master >> $SETUPLOG 2>&1
|
||||
salt-call state.apply idstools >> $SETUPLOG 2>&1
|
||||
if [[ $OSQUERY == '1' ]]; then
|
||||
salt-call state.apply mysql >> $SETUPLOG 2>&1
|
||||
fi
|
||||
echo -e "XXX\n35\nInstalling ElasticSearch... \nXXX"
|
||||
salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n40\nInstalling Logstash... \nXXX"
|
||||
salt-call state.apply logstash >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n45\nInstalling Kibana... \nXXX"
|
||||
salt-call state.apply kibana >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n50\nInstalling pcap... \nXXX"
|
||||
salt-call state.apply pcap >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n52\nInstalling Suricata... \nXXX"
|
||||
salt-call state.apply suricata >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n54\nInstalling Zeek... \nXXX"
|
||||
salt-call state.apply bro >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n56\nInstalling curator... \nXXX"
|
||||
salt-call state.apply curator >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n58\nInstalling elastalert... \nXXX"
|
||||
salt-call state.apply elastalert >> $SETUPLOG 2>&1
|
||||
if [[ $OSQUERY == '1' ]]; then
|
||||
echo -e "XXX\n60\nInstalling fleet... \nXXX"
|
||||
salt-call state.apply fleet >> $SETUPLOG 2>&1
|
||||
salt-call state.apply redis >> $SETUPLOG 2>&1
|
||||
fi
|
||||
if [[ $WAZUH == '1' ]]; then
|
||||
echo -e "XXX\n65\nInstalling Wazuh components... \nXXX"
|
||||
salt-call state.apply wazuh >> $SETUPLOG 2>&1
|
||||
fi
|
||||
echo -e "XXX\n85\nInstalling filebeat... \nXXX"
|
||||
salt-call state.apply filebeat >> $SETUPLOG 2>&1
|
||||
salt-call state.apply utility >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n90\nInstalling misc components... \nXXX"
|
||||
salt-call state.apply schedule >> $SETUPLOG 2>&1
|
||||
salt-call state.apply soctopus >> $SETUPLOG 2>&1
|
||||
if [[ $THEHIVE == '1' ]]; then
|
||||
echo -e "XXX\n91\nInstalling The Hive... \nXXX"
|
||||
salt-call state.apply hive >> $SETUPLOG 2>&1
|
||||
fi
|
||||
if [[ $PLAYBOOK == '1' ]]; then
|
||||
echo -e "XXX\n93\nInstalling Playbook... \nXXX"
|
||||
salt-call state.apply playbook >> $SETUPLOG 2>&1
|
||||
fi
|
||||
echo -e "XXX\n95\nSetting checkin to run on boot... \nXXX"
|
||||
checkin_at_boot >> $SETUPLOG 2>&1
|
||||
echo -e "XX\n97\nFinishing touches... \nXXX"
|
||||
filter_unused_nics >> $SETUPLOG 2>&1
|
||||
network_setup >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n98\nVerifying Setup... \nXXX"
|
||||
salt-call state.highstate >> $SETUPLOG 2>&1
|
||||
} |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
|
||||
GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
|
||||
if [[ $GOODSETUP == '0' ]]; then
|
||||
whiptail_setup_complete
|
||||
if [[ $THEHIVE == '1' ]]; then
|
||||
check_hive_init_then_reboot
|
||||
else
|
||||
shutdown -r now
|
||||
fi
|
||||
else
|
||||
whiptail_setup_failed
|
||||
shutdown -r now
|
||||
fi
|
||||
fi
|
||||
|
||||
###################
|
||||
## Nodes ##
|
||||
###################
|
||||
|
||||
if [ $INSTALLTYPE == 'STORAGENODE' ] || [ $INSTALLTYPE == 'PARSINGNODE' ] || [ $INSTALLTYPE == 'HOTNODE' ] || [ $INSTALLTYPE == 'WARMNODE' ]; then
|
||||
whiptail_management_server
|
||||
whiptail_master_updates
|
||||
set_updates
|
||||
get_log_size_limit
|
||||
CURCLOSEDAYS=30
|
||||
es_heapsize
|
||||
ls_heapsize
|
||||
whiptail_node_advanced
|
||||
if [ $NODESETUP == 'NODEADVANCED' ]; then
|
||||
whiptail_node_es_heap
|
||||
whiptail_node_ls_heap
|
||||
whiptail_node_ls_pipeline_worker
|
||||
whiptail_node_ls_pipline_batchsize
|
||||
whiptail_node_ls_input_threads
|
||||
whiptail_node_ls_input_batch_count
|
||||
whiptail_cur_close_days
|
||||
whiptail_log_size_limit
|
||||
else
|
||||
NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE
|
||||
NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE
|
||||
LSPIPELINEWORKERS=$CPUCORES
|
||||
LSPIPELINEBATCH=125
|
||||
LSINPUTTHREADS=1
|
||||
LSINPUTBATCHCOUNT=125
|
||||
fi
|
||||
whiptail_make_changes
|
||||
set_hostname
|
||||
clear_master
|
||||
mkdir -p /nsm
|
||||
get_filesystem_root
|
||||
get_filesystem_nsm
|
||||
if [ $INSTALLMETHOD == iso ]; then
|
||||
add_admin_user
|
||||
disable_onion_user
|
||||
fi
|
||||
copy_ssh_key >> $SETUPLOG 2>&1
|
||||
{
|
||||
sleep 0.5
|
||||
echo -e "XXX\n0\nSetting Initial Firewall Policy... \nXXX"
|
||||
set_initial_firewall_policy >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n1\nInstalling pip3... \nXXX"
|
||||
echo -e "XXX\n5\nInstalling Salt Packages... \nXXX"
|
||||
saltify >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n20\nInstalling Docker... \nXXX"
|
||||
docker_install >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n30\nInitializing Minion... \nXXX"
|
||||
configure_minion node >> $SETUPLOG 2>&1
|
||||
set_node_type >> $SETUPLOG 2>&1
|
||||
node_pillar >> $SETUPLOG 2>&1
|
||||
echo "** Generating the patch pillar **" >> $SETUPLOG
|
||||
patch_pillar >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n24\nCopying Minion Pillars to Master... \nXXX"
|
||||
copy_minion_tmp_files >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n35\nSending and Accepting Salt Key... \nXXX"
|
||||
salt_firstcheckin >> $SETUPLOG 2>&1
|
||||
# Accept the Salt Key
|
||||
accept_salt_key_remote >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n40\nApplying SSL Certificates... \nXXX"
|
||||
salt-call state.apply ca >> $SETUPLOG 2>&1
|
||||
salt-call state.apply ssl >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n50\nConfiguring Firewall... \nXXX"
|
||||
salt-call state.apply common >> $SETUPLOG 2>&1
|
||||
salt-call state.apply firewall >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n70\nInstalling Elastic Components... \nXXX"
|
||||
salt-call state.apply logstash >> $SETUPLOG 2>&1
|
||||
salt-call state.apply elasticsearch >> $SETUPLOG 2>&1
|
||||
salt-call state.apply curator >> $SETUPLOG 2>&1
|
||||
salt-call state.apply filebeat >> $SETUPLOG 2>&1
|
||||
checkin_at_boot >> $SETUPLOG 2>&1
|
||||
echo -e "XX\n97\nFinishing touches... \nXXX"
|
||||
filter_unused_nics >> $SETUPLOG 2>&1
|
||||
network_setup >> $SETUPLOG 2>&1
|
||||
echo -e "XXX\n98\nVerifying Setup... \nXXX"
|
||||
} |whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
|
||||
GOODSETUP=$(tail -10 $SETUPLOG | grep Failed | awk '{ print $2}')
|
||||
if [[ $GOODSETUP == '0' ]]; then
|
||||
whiptail_setup_complete
|
||||
shutdown -r now
|
||||
else
|
||||
whiptail_setup_failed
|
||||
shutdown -r now
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
else
|
||||
echo "User not sure. Cancelling setup.">> $SETUPLOG 2>&1
|
||||
whiptail_cancel
|
||||
fi
|
||||
751
setup/whiptail.sh
Normal file
751
setup/whiptail.sh
Normal file
@@ -0,0 +1,751 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
whiptail_basic_bro() {
|
||||
|
||||
BASICBRO=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the number of bro processes:" 10 75 $LBPROCS 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_basic_suri() {
|
||||
|
||||
BASICSURI=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the number of Suricata Processes:" 10 75 $LBPROCS 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_bro_pins() {
|
||||
|
||||
BROPINS=$(whiptail --noitem --title "Pin Bro CPUS" --checklist "Please Select $LBPROCS cores to pin Bro to:" 20 75 12 ${LISTCORES[@]} 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_bro_version() {
|
||||
|
||||
BROVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate meta data?" 20 75 4 "ZEEK" "Install Zeek (aka Bro)" ON \
|
||||
"SURICATA" "SUPER EXPERIMENTAL" OFF 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_bond_nics() {
|
||||
|
||||
local nic_list=()
|
||||
for FNIC in ${FNICS[@]}; do
|
||||
nic_list+=($FNIC "Interface" "OFF")
|
||||
done
|
||||
|
||||
BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 75 12 ${nic_list[@]} 3>&1 1>&2 2>&3 )
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
while [ -z "$BNICS" ]
|
||||
do
|
||||
BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 75 12 ${nic_list[@]} 3>&1 1>&2 2>&3 )
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
whiptail_bond_nics_mtu() {
|
||||
|
||||
# Set the MTU on the monitor interface
|
||||
MTU=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the MTU for the monitor NICs" 10 75 1500 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_cancel() {
|
||||
|
||||
whiptail --title "Security Onion Setup" --msgbox "Cancelling Setup. No changes have been made." 8 75
|
||||
if [ -d "/root/installtmp" ]; then
|
||||
echo "/root/installtmp exists" >> $SETUPLOG 2>&1
|
||||
install_cleanup
|
||||
echo "/root/installtmp removed" >> $SETUPLOG 2>&1
|
||||
fi
|
||||
exit
|
||||
|
||||
}
|
||||
|
||||
whiptail_check_exitstatus() {
|
||||
|
||||
if [ $1 == '1' ]; then
|
||||
echo "They hit cancel"
|
||||
whiptail_cancel
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
whiptail_create_admin_user() {
|
||||
|
||||
ADMINUSER=$(whiptail --title "Security Onion Install" --inputbox \
|
||||
"Please enter a username for your new admin user" 10 60 3>&1 1>&2 2>&3)
|
||||
|
||||
}
|
||||
|
||||
whiptail_create_admin_user_password1() {
|
||||
|
||||
ADMINPASS1=$(whiptail --title "Security Onion Install" --passwordbox \
|
||||
"Enter a password for $ADMINUSER" 10 60 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whiptail_create_admin_user_password2() {
|
||||
|
||||
ADMINPASS2=$(whiptail --title "Security Onion Install" --passwordbox \
|
||||
"Re-enter a password for $ADMINUSER" 10 60 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_create_socore_user() {
|
||||
|
||||
whiptail --title "Security Onion Setup" --msgbox "Set a password for the socore user. This account is used for adding sensors remotely." 8 75
|
||||
|
||||
}
|
||||
|
||||
whiptail_create_socore_user_password1() {
|
||||
|
||||
COREPASS1=$(whiptail --title "Security Onion Install" --passwordbox \
|
||||
"Enter a password for user socore" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_create_socore_user_password2() {
|
||||
|
||||
COREPASS2=$(whiptail --title "Security Onion Install" --passwordbox \
|
||||
"Re-enter a password for user socore" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_cur_close_days() {
|
||||
|
||||
CURCLOSEDAYS=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Please specify the threshold (in days) at which Elasticsearch indices will be closed" 10 75 $CURCLOSEDAYS 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_dhcp_or_static() {
|
||||
|
||||
ADDRESSTYPE=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose how to set up your management interface:" 20 78 4 \
|
||||
"STATIC" "Set a static IPv4 address" ON \
|
||||
"DHCP" "Use DHCP to configure the Management Interface" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whiptail_enable_components() {
|
||||
COMPONENTS=$(whiptail --title "Security Onion Setup" --checklist \
|
||||
"Select Components to install" 20 75 8 \
|
||||
"GRAFANA" "Enable Grafana for system monitoring" ON \
|
||||
"OSQUERY" "Enable Fleet with osquery" ON \
|
||||
"WAZUH" "Enable Wazuh" ON \
|
||||
"THEHIVE" "Enable TheHive" ON \
|
||||
"PLAYBOOK" "Enable Playbook" ON 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_eval_adv() {
|
||||
EVALADVANCED=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose your eval install:" 20 75 4 \
|
||||
"BASIC" "Install basic components for evaluation" ON \
|
||||
"ADVANCED" "Choose additional components to be installed" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whiptail_eval_adv_warning() {
|
||||
whiptail --title "Security Onion Setup" --msgbox "Please keep in mind the more services that you enable the more RAM that is required." 8 75
|
||||
}
|
||||
|
||||
whiptail_helix_apikey() {
|
||||
HELIXAPIKEY=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your Helix API Key: \n \nThis can be set later using so-helix-apikey" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_homenet_master() {
|
||||
|
||||
# Ask for the HOME_NET on the master
|
||||
HNMASTER=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your HOME_NET separated by ," 10 75 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_homenet_sensor() {
|
||||
|
||||
# Ask to inherit from master
|
||||
whiptail --title "Security Onion Setup" --yesno "Do you want to inherit the HOME_NET from the Master?" 8 75
|
||||
|
||||
local exitstatus=$?
|
||||
if [ $exitstatus == 0 ]; then
|
||||
HNSENSOR=inherit
|
||||
else
|
||||
HNSENSOR=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your HOME_NET separated by ," 10 75 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
whiptail_install_type() {
|
||||
|
||||
# What kind of install are we doing?
|
||||
INSTALLTYPE=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose Install Type:" 20 75 13 \
|
||||
"SENSORONLY" "Create a forward only sensor" ON \
|
||||
"STORAGENODE" "Add a Storage Hot Node with parsing" OFF \
|
||||
"MASTERONLY" "Start a new grid" OFF \
|
||||
"EVALMODE" "Evaluate all the things" OFF \
|
||||
"HELIXSENSOR" "Connect this sensor to FireEye Helix" OFF \
|
||||
"PARSINGNODE" "TODO Add a dedicated Parsing Node" OFF \
|
||||
"HOTNODE" "TODO Add Hot Node (Storage Node without Parsing)" OFF \
|
||||
"WARMNODE" "TODO Add Warm Node to existing Hot or Storage node" OFF \
|
||||
"WAZUH" "TODO Stand Alone Wazuh Node" OFF \
|
||||
"STRELKA" "TODO Stand Alone Strelka Node" OFF \
|
||||
"FLEET" "TODO Stand Alone Fleet OSQuery Node" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_log_size_limit() {
|
||||
|
||||
LOG_SIZE_LIMIT=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
|
||||
By default, this is set to 85% of the disk space allotted for /nsm." 10 75 $LOG_SIZE_LIMIT 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_management_interface_dns() {
|
||||
|
||||
MDNS=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your DNS server using space between multiple" 10 60 8.8.8.8 8.8.4.4 3>&1 1>&2 2>&3)
|
||||
|
||||
}
|
||||
|
||||
whiptail_management_interface_dns_search() {
|
||||
|
||||
MSEARCH=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your DNS search domain" 10 60 searchdomain.local 3>&1 1>&2 2>&3)
|
||||
|
||||
}
|
||||
|
||||
whiptail_management_interface_gateway() {
|
||||
|
||||
MGATEWAY=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your gateway" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
|
||||
|
||||
}
|
||||
|
||||
whiptail_management_interface_ip() {
|
||||
|
||||
MIP=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your IP address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
|
||||
|
||||
}
|
||||
|
||||
whiptail_management_interface_mask() {
|
||||
|
||||
MMASK=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the bit mask for your subnet" 10 60 24 3>&1 1>&2 2>&3)
|
||||
|
||||
}
|
||||
|
||||
whiptail_management_nic() {
|
||||
|
||||
MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 75 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
while [ -z "$MNIC" ]
|
||||
do
|
||||
MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 75 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
whiptail_nids() {
|
||||
|
||||
NIDS=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose which IDS to run:" 20 75 4 \
|
||||
"Suricata" "Suricata 4.X" ON \
|
||||
"Snort" "Snort 3.0 Beta" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_oinkcode() {
|
||||
|
||||
OINKCODE=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your oinkcode" 10 75 XXXXXXX 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_make_changes() {
|
||||
|
||||
whiptail --title "Security Onion Setup" --yesno "We are going to set this machine up as a $INSTALLTYPE. Please hit YES to make changes or NO to cancel." 8 75
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_management_server() {
|
||||
|
||||
MSRV=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter your Master Server HOSTNAME. It is CASE SENSITIVE!" 10 75 XXXX 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
# See if it resolves. Otherwise prompt to add to host file
|
||||
TESTHOST=$(host $MSRV)
|
||||
|
||||
if [[ $TESTHOST = *"not found"* ]] || [[ $TESTHOST = *"connection timed out"* ]]; then
|
||||
add_master_hostfile
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# Ask if you want to do advanced setup of the Master
|
||||
whiptail_master_adv() {
|
||||
|
||||
MASTERADV=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose what type of master install:" 20 75 4 \
|
||||
"BASIC" "Install master with recommended settings" ON \
|
||||
"ADVANCED" "Do additional configuration to the master" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
# Ask which additional components to install
|
||||
whiptail_master_adv_service_brologs() {
|
||||
|
||||
BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 75 12 \
|
||||
"conn" "Connection Logging" ON \
|
||||
"dce_rpc" "RPC Logs" ON \
|
||||
"dhcp" "DHCP Logs" ON \
|
||||
"dhcpv6" "DHCP IPv6 Logs" ON \
|
||||
"dnp3" "DNP3 Logs" ON \
|
||||
"dns" "DNS Logs" ON \
|
||||
"dpd" "DPD Logs" ON \
|
||||
"files" "Files Logs" ON \
|
||||
"ftp" "FTP Logs" ON \
|
||||
"http" "HTTP Logs" ON \
|
||||
"intel" "Intel Hits Logs" ON \
|
||||
"irc" "IRC Chat Logs" ON \
|
||||
"kerberos" "Kerberos Logs" ON \
|
||||
"modbus" "MODBUS Logs" ON \
|
||||
"mqtt" "MQTT Logs" ON \
|
||||
"notice" "Zeek Notice Logs" ON \
|
||||
"ntlm" "NTLM Logs" ON \
|
||||
"openvpn" "OPENVPN Logs" ON \
|
||||
"pe" "PE Logs" ON \
|
||||
"radius" "Radius Logs" ON \
|
||||
"rfb" "RFB Logs" ON \
|
||||
"rdp" "RDP Logs" ON \
|
||||
"signatures" "Signatures Logs" ON \
|
||||
"sip" "SIP Logs" ON \
|
||||
"smb_files" "SMB Files Logs" ON \
|
||||
"smb_mapping" "SMB Mapping Logs" ON \
|
||||
"smtp" "SMTP Logs" ON \
|
||||
"snmp" "SNMP Logs" ON \
|
||||
"software" "Software Logs" ON \
|
||||
"ssh" "SSH Logs" ON \
|
||||
"ssl" "SSL Logs" ON \
|
||||
"syslog" "Syslog Logs" ON \
|
||||
"telnet" "Telnet Logs" ON \
|
||||
"tunnel" "Tunnel Logs" ON \
|
||||
"weird" "Zeek Weird Logs" ON \
|
||||
"mysql" "MySQL Logs" ON \
|
||||
"socks" "SOCKS Logs" ON \
|
||||
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_network_notice() {
|
||||
|
||||
whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Hit YES to continue." 8 75
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_advanced() {
|
||||
|
||||
NODESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"What type of config would you like to use?:" 20 75 4 \
|
||||
"NODEBASIC" "Install Storage Node with recommended settings" ON \
|
||||
"NODEADVANCED" "Advanced Node Setup" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_es_heap() {
|
||||
|
||||
es_heapsize
|
||||
NODE_ES_HEAP_SIZE=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"\nEnter ES Heap Size: \n \n(Recommended value is pre-populated)" 10 75 $ES_HEAP_SIZE 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_ls_heap() {
|
||||
|
||||
ls_heapsize
|
||||
NODE_LS_HEAP_SIZE=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"\nEnter LogStash Heap Size: \n \n(Recommended value is pre-populated)" 10 75 $LS_HEAP_SIZE 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_ls_pipeline_worker() {
|
||||
|
||||
LSPIPELINEWORKERS=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"\nEnter LogStash Pipeline Workers: \n \n(Recommended value is pre-populated)" 10 75 $CPUCORES 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_ls_pipline_batchsize() {
|
||||
|
||||
LSPIPELINEBATCH=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"\nEnter LogStash Pipeline Batch Size: \n \n(Default value is pre-populated)" 10 75 125 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_ls_input_threads() {
|
||||
|
||||
LSINPUTTHREADS=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"\nEnter LogStash Input Threads: \n \n(Default value is pre-populated)" 10 75 1 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_ls_input_batch_count() {
|
||||
|
||||
LSINPUTBATCHCOUNT=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"\nEnter LogStash Input Batch Count: \n \n(Default value is pre-populated)" 10 75 125 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_passwords_dont_match() {
|
||||
|
||||
whiptail --title "Security Onion Setup" --msgbox "Passwords don't match. Please re-enter." 8 75
|
||||
|
||||
}
|
||||
|
||||
whiptail_patch_name_new_schedule() {
|
||||
|
||||
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
while [[ -z "$PATCHSCHEDULENAME" ]]; do
|
||||
whiptail --title "Security Onion Setup" --msgbox "Please enter a name for this OS patch schedule." 8 75
|
||||
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
done
|
||||
|
||||
|
||||
}
|
||||
|
||||
whiptail_patch_schedule() {
|
||||
|
||||
# What kind of patch schedule are we doing?
|
||||
PATCHSCHEDULE=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose OS patch schedule. This will NOT update Security Onion related tools such as Zeek, Elasticsearch, Kibana, SaltStack, etc." 15 75 5 \
|
||||
"Automatic" "Updates installed every 8 hours if available" ON \
|
||||
"Manual" "Updates will be installed manually" OFF \
|
||||
"Import Schedule" "Import named schedule on following screen" OFF \
|
||||
"New Schedule" "Configure and name new schedule on next screen" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_patch_schedule_import() {
|
||||
|
||||
unset PATCHSCHEDULENAME
|
||||
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
while [[ -z "$PATCHSCHEDULENAME" ]]; do
|
||||
whiptail --title "Security Onion Setup" --msgbox "Please enter a name for the OS patch schedule you want to inherit." 8 75
|
||||
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/<schedulename>.yml" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
whiptail_patch_schedule_select_days() {
|
||||
# Select the days to patch
|
||||
PATCHSCHEDULEDAYS=($(whiptail --title "Security Onion Setup" --checklist \
|
||||
"Which days do you want to apply OS patches?" 15 75 8 \
|
||||
"Monday" "" OFF \
|
||||
"Tuesday" "" ON \
|
||||
"Wednesday" "" OFF \
|
||||
"Thursday" "" OFF \
|
||||
"Friday" "" OFF \
|
||||
"Saturday" "" OFF \
|
||||
"Sunday" "" OFF 3>&1 1>&2 2>&3 ))
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whiptail_patch_schedule_select_hours() {
|
||||
# Select the hours to patch
|
||||
PATCHSCHEDULEHOURS=($(whiptail --title "Security Onion Setup" --checklist \
|
||||
"At which time, UTC, do you want to apply OS patches on the selected days? Hours 12 through 23 can be selected on the next screen." 22 75 13 \
|
||||
"00:00" "" OFF \
|
||||
"01:00" "" OFF \
|
||||
"02:00" "" OFF \
|
||||
"03:00" "" OFF \
|
||||
"04:00" "" OFF \
|
||||
"05:00" "" OFF \
|
||||
"06:00" "" OFF \
|
||||
"07:00" "" OFF \
|
||||
"08:00" "" OFF \
|
||||
"09:00" "" OFF \
|
||||
"10:00" "" OFF \
|
||||
"11:00" "" OFF 3>&1 1>&2 2>&3 ))
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
# Select the hours to patch
|
||||
PATCHSCHEDULEHOURS+=($(whiptail --title "Security Onion Setup" --checklist \
|
||||
"At which time, UTC, do you want to apply OS patches on the selected days?" 22 75 13 \
|
||||
"12:00" "" OFF \
|
||||
"13:00" "" OFF \
|
||||
"14:00" "" OFF \
|
||||
"15:00" "" ON \
|
||||
"16:00" "" OFF \
|
||||
"17:00" "" OFF \
|
||||
"18:00" "" OFF \
|
||||
"19:00" "" OFF \
|
||||
"20:00" "" OFF \
|
||||
"21:00" "" OFF \
|
||||
"22:00" "" OFF \
|
||||
"23:00" "" OFF 3>&1 1>&2 2>&3 ))
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whiptail_rule_setup() {
|
||||
|
||||
# Get pulled pork info
|
||||
RULESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Which IDS ruleset would you like to use?\n\nThis master server is responsible for downloading the IDS ruleset from the Internet.\n\nSensors then pull a copy of this ruleset from the master server.\n\nIf you select a commercial ruleset, it is your responsibility to purchase enough licenses for all of your sensors in compliance with your vendor's policies." 20 75 4 \
|
||||
"ETOPEN" "Emerging Threats Open" ON \
|
||||
"ETPRO" "Emerging Threats PRO" OFF \
|
||||
"TALOSET" "Snort Subscriber (Talos) and ET NoGPL rulesets" OFF \
|
||||
"TALOS" "Snort Subscriber (Talos) ruleset and set a policy" OFF \
|
||||
3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_sensor_config() {
|
||||
|
||||
NSMSETUP=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"What type of configuration would you like to use?:" 20 75 4 \
|
||||
"BASIC" "Install NSM components with recommended settings" ON \
|
||||
"ADVANCED" "Configure each component individually" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_set_hostname() {
|
||||
|
||||
HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the Hostname you would like to set." 10 75 $HOSTNAME 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
while [[ "$HOSTNAME" == 'localhost' ]] ; do
|
||||
whiptail --title "Security Onion Setup" --msgbox "Please choose a hostname that isn't localhost." 8 75
|
||||
HOSTNAME=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"Enter the Hostname you would like to set." 10 75 $HOSTNAME 3>&1 1>&2 2>&3)
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
whiptail_setup_complete() {
|
||||
|
||||
whiptail --title "Security Onion Setup" --msgbox "Finished installing this as an $INSTALLTYPE. Press Enter to reboot." 8 75
|
||||
install_cleanup
|
||||
|
||||
}
|
||||
|
||||
whiptail_setup_failed() {
|
||||
|
||||
whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $SETUPLOG for details. Press Enter to reboot." 8 75
|
||||
install_cleanup
|
||||
|
||||
}
|
||||
|
||||
whiptail_shard_count() {
|
||||
|
||||
SHARDCOUNT=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"\nEnter ES Shard Count: \n \n(Default value is pre-populated)" 10 75 125 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_suricata_pins() {
|
||||
|
||||
FILTEREDCORES=$(echo ${LISTCORES[@]} ${BROPINS[@]} | tr -d '"' | tr ' ' '\n' | sort | uniq -u | awk '{print $1 " \"" "core" "\""}')
|
||||
SURIPINS=$(whiptail --noitem --title "Pin Suricata CPUS" --checklist "Please Select $LBPROCS cores to pin Suricata to:" 20 75 12 ${FILTEREDCORES[@]} 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_master_updates() {
|
||||
|
||||
MASTERUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"How would you like to download updates for your grid?:" 20 75 4 \
|
||||
"MASTER" "Master node is proxy for OS/Docker updates." ON \
|
||||
"OPEN" "Each node connect to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_updates() {
|
||||
|
||||
NODEUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"How would you like to download updates for this node?:" 20 75 4 \
|
||||
"MASTER" "Download OS/Docker updates from the Master." ON \
|
||||
"OPEN" "Download updates directly from the Internet" OFF 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_you_sure() {
|
||||
|
||||
echo "whiptail_you_sure called" >> $SETUPLOG 2>&1
|
||||
whiptail --title "Security Onion Setup" --yesno "Are you sure you want to install Security Onion over the internet?" 8 75
|
||||
|
||||
local exitstatus=$?
|
||||
echo "whiptail_you_sure returning $exitstatus" >> $SETUPLOG 2>&1
|
||||
return $exitstatus
|
||||
|
||||
}
|
||||
2105
so-setup-network.sh
2105
so-setup-network.sh
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user