Merge pull request #1168 from Security-Onion-Solutions/issue/1049

Issue/1049
This commit is contained in:
Josh Patterson
2020-08-13 10:12:47 -04:00
committed by GitHub
15 changed files with 614 additions and 97 deletions

View File

@@ -13,6 +13,7 @@ role:
fleet:
heavynode:
helixsensor:
import:
manager:
managersearch:
standalone:

View File

@@ -2,7 +2,7 @@ base:
'*':
- patch.needs_restarting
'*_eval or *_helix or *_heavynode or *_sensor or *_standalone':
'*_eval or *_helix or *_heavynode or *_sensor or *_standalone or *_import':
- match: compound
- zeek
@@ -80,3 +80,10 @@ base:
- logstash.search
- elasticsearch.search
- minions.{{ grains.id }}
'*_import':
- zeeklogs
- secrets
- elasticsearch.eval
- global
- minions.{{ grains.id }}

View File

@@ -0,0 +1,12 @@
{% set docker = {
'containers': [
'so-filebeat',
'so-nginx',
'so-soc',
'so-kratos',
'so-elasticsearch',
'so-kibana',
'so-suricata',
'so-zeek'
]
} %}

View File

@@ -27,7 +27,7 @@ exec 3>&1 1>${SOUP_LOG} 2>&1
manager_check() {
# Check to see if this is a manager
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch')$ ]]; then
if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch'|'so-import')$ ]]; then
echo "This is a manager. We can proceed."
else
echo "Please run soup on the manager. The manager controls all updates."
@@ -93,7 +93,21 @@ pillar_changes() {
update_dockers() {
# List all the containers
if [ $MANAGERCHECK != 'so-helix' ]; then
if [ $MANAGERCHECK == 'so-import' ]; then
TRUSTED_CONTAINERS=( \
"so-idstools" \
"so-nginx" \
"so-filebeat" \
"so-suricata" \
"so-soc" \
"so-elasticsearch" \
"so-kibana" \
"so-kratos" \
"so-suricata" \
"so-registry" \
"so-pcaptools" \
"so-zeek" )
elif [ $MANAGERCHECK != 'so-helix' ]; then
TRUSTED_CONTAINERS=( \
"so-acng" \
"so-thehive-cortex" \

View File

@@ -25,7 +25,7 @@
{% set FEATUREZ = '' %}
{% endif %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import'] %}
{% set esclustername = salt['pillar.get']('manager:esclustername', '') %}
{% set esheap = salt['pillar.get']('manager:esheap', '') %}
{% set ismanager = True %}

View File

@@ -74,7 +74,7 @@ filebeat.modules:
# List of prospectors to fetch data.
filebeat.inputs:
#------------------------------ Log prospector --------------------------------
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
{%- if grains['role'] in ['so-sensor', "so-eval", "so-helix", "so-heavynode", "so-standalone", "so-import"] %}
- type: udp
enabled: true
host: "0.0.0.0:514"
@@ -253,7 +253,7 @@ output.{{ type }}:
{%- endfor %}
{%- else %}
#----------------------------- Elasticsearch/Logstash output ---------------------------------
{%- if grains['role'] == "so-eval" %}
{%- if grains['role'] in ["so-eval", "so-import"] %}
output.elasticsearch:
enabled: true
hosts: ["{{ MANAGER }}:9200"]

View File

@@ -488,3 +488,54 @@ role:
localhost:
portgroups:
- {{ portgroups.all }}
import:
chain:
DOCKER-USER:
hostgroups:
manager:
portgroups:
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.influxdb }}
- {{ portgroups.elasticsearch_rest }}
- {{ portgroups.elasticsearch_node }}
minion:
portgroups:
- {{ portgroups.docker_registry }}
sensor:
portgroups:
- {{ portgroups.beats_5044 }}
- {{ portgroups.beats_5644 }}
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:
- {{ portgroups.syslog}}
beats_endpoint:
portgroups:
- {{ portgroups.beats_5044 }}
beats_endpoint_ssl:
portgroups:
- {{ portgroups.beats_5644 }}
elasticsearch_rest:
portgroups:
- {{ portgroups.elasticsearch_rest }}
analyst:
portgroups:
- {{ portgroups.nginx }}
INPUT:
hostgroups:
anywhere:
portgroups:
- {{ portgroups.ssh }}
dockernet:
portgroups:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_manager }}

6
salt/import/bond.sls Normal file
View File

@@ -0,0 +1,6 @@
configure_bond0:
network.managed:
- name: bond0
- type: bond
- mode: '1'
- enabled: True

View File

@@ -0,0 +1,326 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
client_max_body_size 1024M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
#server {
# listen 80 default_server;
# listen [::]:80 default_server;
# server_name _;
# root /opt/socore/html;
# index index.html;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
# location / {
# }
# error_page 404 /404.html;
# location = /40x.html {
# }
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
#}
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index blank.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
}
}
{% endif %}
# Settings for a TLS enabled server.
server {
listen 443 ssl http2 default_server;
#listen [::]:443 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index index.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location / {
auth_request /auth/sessions/whoami;
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cyberchef/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /navigator/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /packages/ {
try_files $uri =206;
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /nodered/ {
proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /playbook/ {
proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- if FLEET_NODE %}
location /fleet/ {
return 301 https://{{ FLEET_IP }}/fleet;
}
{%- else %}
location /fleet/ {
proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- endif %}
location /thehive/ {
proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cortex/ {
proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /soctopus/ {
proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/app/soc/ {
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
}
location /kibana/app/fleet/ {
rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent;
}
location /kibana/app/soctopus/ {
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
}
location /sensoroniagents/ {
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
error_page 401 = @error401;
location @error401 {
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
return 302 /auth/self-service/browser/flows/login;
}
#error_page 404 /404.html;
# location = /usr/share/nginx/html/40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
}

View File

@@ -7,7 +7,7 @@
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %}
{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone'] %}
{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import'] %}
{% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %}
{% set ca_server = grains.id %}
{% else %}
@@ -72,7 +72,7 @@ influxkeyperms:
- mode: 640
- group: 939
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone'] %}
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %}
/etc/pki/filebeat.key:
x509.private_key_managed:
@@ -361,7 +361,7 @@ fleetkeyperms:
- group: 939
{% endif %}
{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone'] %}
{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %}
fbcertdir:
file.directory:

View File

@@ -386,3 +386,23 @@ base:
- fleet
- fleet.install_package
- filebeat
'*_import and G@saltversion:{{saltversion}}':
- match: compound
- ca
- ssl
- registry
- manager
- common
- nginx
- soc
- firewall
- idstools
- suricata.manager
- elasticsearch
- kibana
- filebeat
- utility
- suricata
- zeek
- schedule

View File

@@ -10,7 +10,7 @@ crossclusterson:
- template: jinja
{% endif %}
{% if grains['role'] == 'so-eval' %}
{% if grains['role'] in ['so-eval', 'so-import'] %}
fixsearch:
cmd.script:
- shell: /bin/bash

View File

@@ -142,65 +142,7 @@ secrets_pillar(){
fi
}
# Enable Bro Logs
zeek_logs_enabled() {
echo "Enabling Bro Logs" >> "$setup_log" 2>&1
local zeeklogs_pillar=./pillar/zeeklogs.sls
printf '%s\n'\
"zeeklogs:"\
" enabled:" > "$zeeklogs_pillar"
if [ "$MANAGERADV" = 'ADVANCED' ]; then
for BLOG in "${BLOGS[@]}"; do
echo " - $BLOG" | tr -d '"' >> "$zeeklogs_pillar"
done
else
printf '%s\n'\
" - conn"\
" - dce_rpc"\
" - dhcp"\
" - dhcpv6"\
" - dnp3"\
" - dns"\
" - dpd"\
" - files"\
" - ftp"\
" - http"\
" - intel"\
" - irc"\
" - kerberos"\
" - modbus"\
" - mqtt"\
" - notice"\
" - ntlm"\
" - openvpn"\
" - pe"\
" - radius"\
" - rfb"\
" - rdp"\
" - signatures"\
" - sip"\
" - smb_files"\
" - smb_mapping"\
" - smtp"\
" - snmp"\
" - software"\
" - ssh"\
" - ssl"\
" - syslog"\
" - telnet"\
" - tunnel"\
" - weird"\
" - mysql"\
" - socks"\
" - x509" >> "$zeeklogs_pillar"
fi
printf '%s\n' '----' >> "$setup_log" 2>&1
cat "$zeeklogs_pillar" >> "$setup_log" 2>&1
}
check_admin_pass() {
check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH"
@@ -360,7 +302,7 @@ configure_minion() {
'helix')
echo "master: $HOSTNAME" >> "$minion_config"
;;
'manager' | 'eval' | 'managersearch' | 'standalone')
'manager' | 'eval' | 'managersearch' | 'standalone' | 'import')
printf '%s\n'\
"master: $HOSTNAME"\
"mysql.host: '$MAINIP'"\
@@ -416,11 +358,19 @@ check_requirements() {
req_cores=4
if [[ "$node_type" == 'sensor' ]]; then req_nics=2; else req_nics=1; fi
if [[ "$node_type" == 'fleet' ]]; then req_mem=4; fi
elif [[ "$standalone_or_dist" == 'import' ]]; then
req_mem=4
req_cores=2
req_nics=1
fi
if [[ $setup_type == 'network' ]]; then
if [[ $setup_type == 'network' ]] ; then
if [[ -n $nsm_mount ]]; then
req_storage=100
if [[ "$standalone_or_dist" == 'import' ]]; then
req_storage=50
else
req_storage=100
fi
if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then
whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB"
fi
@@ -428,7 +378,11 @@ check_requirements() {
whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB"
fi
else
req_storage=200
if [[ "$standalone_or_dist" == 'import' ]]; then
req_storage=50
else
req_storage=200
fi
if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then
whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB"
fi
@@ -463,7 +417,7 @@ copy_salt_master_config() {
copy_minion_tmp_files() {
case "$install_type" in
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE')
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
if [ -d "$temp_install_dir"/salt ] ; then
@@ -720,7 +674,7 @@ docker_install() {
else
case "$install_type" in
'MANAGER' | 'EVAL')
'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORT')
apt-get update >> "$setup_log" 2>&1
;;
*)
@@ -764,6 +718,22 @@ docker_seed_registry() {
local VERSION="$SOVERSION"
if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then
if [ "$install_type" == 'IMPORT' ]; then
local TRUSTED_CONTAINERS=(\
"so-idstools:$VERSION" \
"so-nginx:$VERSION" \
"so-filebeat:$VERSION" \
"so-suricata:$VERSION" \
"so-soc:$VERSION" \
"so-elasticsearch:$VERSION" \
"so-kibana:$VERSION" \
"so-kratos:$VERSION" \
"so-suricata:$VERSION" \
"so-registry:$VERSION" \
"so-pcaptools:$VERSION" \
"so-zeek:$VERSION"
)
else
local TRUSTED_CONTAINERS=(\
"so-nginx:$VERSION" \
"so-filebeat:$VERSION" \
@@ -775,7 +745,8 @@ docker_seed_registry() {
"so-telegraf:$VERSION" \
"so-zeek:$VERSION"
)
if [ "$install_type" != 'HELIXSENSOR' ]; then
fi
if [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'IMPORT' ]; then
TRUSTED_CONTAINERS=("${TRUSTED_CONTAINERS[@]}" \
"so-acng:$VERSION" \
"so-thehive-cortex:$VERSION" \
@@ -903,7 +874,7 @@ got_root() {
get_minion_type() {
local minion_type
case "$install_type" in
'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE')
'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE' | 'IMPORT')
minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]')
;;
'HELIXSENSOR')
@@ -1275,7 +1246,7 @@ saltify() {
set_progress_str 6 'Installing various dependencies'
yum -y install wget nmap-ncat >> "$setup_log" 2>&1
case "$install_type" in
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE')
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT')
reserve_group_ids >> "$setup_log" 2>&1
yum -y install epel-release >> "$setup_log" 2>&1
yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1
@@ -1346,7 +1317,7 @@ saltify() {
'FLEET')
if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi
;;
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR?
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # TODO: should this also be HELIXSENSOR?
# Add saltstack repo(s)
wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3001.1/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
@@ -1403,7 +1374,7 @@ saltify() {
salt_checkin() {
case "$install_type" in
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE') # Fix Mine usage
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # Fix Mine usage
{
echo "Building Certificate Authority";
salt-call state.apply ca;
@@ -1567,7 +1538,7 @@ set_hostname() {
set_hostname_iso
if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then
if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then
if ! getent hosts "$MSRV"; then
echo "$MSRVIP $MSRV" >> /etc/hosts
fi
@@ -1599,7 +1570,7 @@ set_initial_firewall_policy() {
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost minion "$MAINIP"
$default_salt_dir/pillar/data/addtotab.sh managertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE')
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
@@ -1804,8 +1775,68 @@ es_heapsize() {
fi
export ES_HEAP_SIZE
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then
NODE_ES_HEAP_SIZE=ES_HEAP_SIZE
export NODE_ES_HEAP_SIZE
fi
}
# Enable Bro Logs
zeek_logs_enabled() {
echo "Enabling Bro Logs" >> "$setup_log" 2>&1
local zeeklogs_pillar=./pillar/zeeklogs.sls
printf '%s\n'\
"zeeklogs:"\
" enabled:" > "$zeeklogs_pillar"
if [ "$MANAGERADV" = 'ADVANCED' ]; then
for BLOG in "${BLOGS[@]}"; do
echo " - $BLOG" | tr -d '"' >> "$zeeklogs_pillar"
done
else
printf '%s\n'\
" - conn"\
" - dce_rpc"\
" - dhcp"\
" - dhcpv6"\
" - dnp3"\
" - dns"\
" - dpd"\
" - files"\
" - ftp"\
" - http"\
" - intel"\
" - irc"\
" - kerberos"\
" - modbus"\
" - mqtt"\
" - notice"\
" - ntlm"\
" - openvpn"\
" - pe"\
" - radius"\
" - rfb"\
" - rdp"\
" - signatures"\
" - sip"\
" - smb_files"\
" - smb_mapping"\
" - smtp"\
" - snmp"\
" - software"\
" - ssh"\
" - ssl"\
" - syslog"\
" - telnet"\
" - tunnel"\
" - weird"\
" - mysql"\
" - socks"\
" - x509" >> "$zeeklogs_pillar"
fi
printf '%s\n' '----' >> "$setup_log" 2>&1
cat "$zeeklogs_pillar" >> "$setup_log" 2>&1
}

View File

@@ -178,6 +178,8 @@ elif [ "$install_type" = 'FLEET' ]; then
OSQUERY=1
elif [ "$install_type" = 'HELIXSENSOR' ]; then
is_helix=true
elif [ "$install_type" = 'IMPORT' ]; then
is_import=true
fi
if [[ $is_manager && $is_sensor ]]; then
@@ -186,11 +188,15 @@ elif [[ $is_fleet_standalone ]]; then
check_requirements "dist" "fleet"
elif [[ $is_sensor && ! $is_eval ]]; then
check_requirements "dist" "sensor"
elif [[ $is_distmanager || $is_minion ]]; then
elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then
check_requirements "dist"
elif [[ $is_import ]]; then
check_requirements "import"
fi
whiptail_patch_schedule
if [[ ! $is_import ]]; then
whiptail_patch_schedule
fi
case "$setup_type" in
'iso')
@@ -252,17 +258,44 @@ if [[ $is_node ]]; then
CURCLOSEDAYS=30
fi
if [[ $is_import ]]; then
PATCHSCHEDULENAME=auto
MTU=1500
RULESETUP=ETOPEN
NSMSETUP=BASIC
HNSENSOR=inherit
MANAGERUPDATES=0
MANAGERADV=BASIC
INTERFACE=bond0
ZEEKVERSION=ZEEK
NIDS=Suricata
RULESETUP=ETOPEN
GRAFANA=0
OSQUERY=0
WAZUH=0
THEHIVE=0
PLAYBOOK=0
STRELKA=0
fi
# Start user prompts
if [[ $is_helix || $is_sensor ]]; then
whiptail_sensor_nics
fi
if [[ $is_helix || $is_sensor || $is_import ]]; then
calculate_useable_cores
fi
if [[ $is_helix || $is_manager ]]; then
if [[ $is_helix || $is_manager || $is_import ]]; then
whiptail_homenet_manager
fi
if [[ $is_helix || $is_manager || $is_node ]]; then
if [[ $is_helix || $is_manager || $is_node || $is_import ]]; then
set_base_heapsizes
fi
@@ -287,6 +320,9 @@ if [[ $is_manager ]]; then
if [[ $STRELKA == 1 ]]; then
whiptail_strelka_rules
fi
fi
if [[ $is_manager || $is_import ]]; then
collect_webuser_inputs
get_redirect
fi
@@ -348,7 +384,7 @@ else
FLEETNODEPASSWD1=$WEBPASSWD1
fi
if [[ $is_manager ]]; then whiptail_so_allow; fi
if [[ $is_manager || $is_import ]]; then whiptail_so_allow; fi
whiptail_make_changes
@@ -372,7 +408,7 @@ fi
} >> $setup_log 2>&1
if [[ $is_manager ]]; then
if [[ $is_manager || $is_import ]]; then
{
generate_passwords;
secrets_pillar;
@@ -391,7 +427,7 @@ fi
host_pillar >> $setup_log 2>&1
if [[ $is_minion ]]; then
if [[ $is_minion || $is_import ]]; then
set_updates >> $setup_log 2>&1
copy_ssh_key >> $setup_log 2>&1
fi
@@ -412,6 +448,9 @@ fi
if [[ $is_sensor || $is_helix ]]; then
set_progress_str 3 'Configuring sensor interface'
configure_network_sensor >> $setup_log 2>&1
fi
if [[ $is_sensor || $is_helix || $is_import ]]; then
set_progress_str 4 'Generating sensor pillar'
sensor_pillar >> $setup_log 2>&1
fi
@@ -428,7 +467,7 @@ fi
set_progress_str 9 'Initializing Salt minion'
configure_minion "$minion_type" >> $setup_log 2>&1
if [[ $is_manager || $is_helix ]]; then
if [[ $is_manager || $is_helix || $is_import ]]; then
set_progress_str 10 'Configuring Salt master'
{
create_local_directories;
@@ -473,7 +512,7 @@ fi
accept_salt_key_remote >> $setup_log 2>&1
fi
if [[ $is_manager ]]; then
if [[ $is_manager || $is_import ]]; then
set_progress_str 20 'Accepting Salt key'
salt-key -ya "$MINION_ID" >> $setup_log 2>&1
fi
@@ -486,10 +525,15 @@ fi
salt-call state.apply salt.minion -l info >> $setup_log 2>&1
fi
if [[ $is_import ]]; then
set_progress_str 22 'Configuring bond interface'
salt-call state.apply import.bond -l info >> $setup_log 2>&1
fi
set_progress_str 23 'Generating CA and checking in'
salt_checkin >> $setup_log 2>&1
if [[ $is_manager || $is_helix ]]; then
if [[ $is_manager || $is_helix || $is_import ]]; then
set_progress_str 25 'Configuring firewall'
set_initial_firewall_policy >> $setup_log 2>&1
@@ -527,7 +571,7 @@ fi
set_progress_str 64 "$(print_salt_state_apply 'nginx')"
salt-call state.apply -l info nginx >> $setup_log 2>&1
if [[ $is_manager || $is_node ]]; then
if [[ $is_manager || $is_node || $is_import ]]; then
set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')"
salt-call state.apply -l info elasticsearch >> $setup_log 2>&1
fi
@@ -535,7 +579,9 @@ fi
if [[ $is_sensor ]]; then
set_progress_str 65 "$(print_salt_state_apply 'pcap')"
salt-call state.apply -l info pcap >> $setup_log 2>&1
fi
if [[ $is_sensor || $is_import ]]; then
set_progress_str 66 "$(print_salt_state_apply 'suricata')"
salt-call state.apply -l info suricata >> $setup_log 2>&1
@@ -548,13 +594,15 @@ fi
salt-call state.apply -l info curator >> $setup_log 2>&1
fi
if [[ $is_manager ]]; then
if [[ $is_manager || $is_import ]]; then
set_progress_str 69 "$(print_salt_state_apply 'soc')"
salt-call state.apply -l info soc >> $setup_log 2>&1
set_progress_str 70 "$(print_salt_state_apply 'kibana')"
salt-call state.apply -l info kibana >> $setup_log 2>&1
fi
if [[ $is_manager ]]; then
set_progress_str 71 "$(print_salt_state_apply 'elastalert')"
salt-call state.apply -l info elastalert >> $setup_log 2>&1
@@ -612,7 +660,7 @@ fi
fi
fi
if [[ $is_manager || $is_helix ]]; then
if [[ $is_manager || $is_helix || $is_import ]]; then
set_progress_str 81 "$(print_salt_state_apply 'utility')"
salt-call state.apply -l info utility >> $setup_log 2>&1
fi
@@ -629,7 +677,7 @@ fi
filter_unused_nics >> $setup_log 2>&1
network_setup >> $setup_log 2>&1
if [[ $is_manager ]]; then
if [[ $is_manager || $is_import ]]; then
set_progress_str 87 'Adding user to SOC'
add_web_user >> $setup_log 2>&1
fi

View File

@@ -471,10 +471,11 @@ whiptail_install_type() {
# What kind of install are we doing?
install_type=$(whiptail --title "Security Onion Setup" --radiolist \
"Choose install type:" 10 65 3 \
"Choose install type:" 10 65 4 \
"EVAL" "Evaluation mode (not for production) " ON \
"STANDALONE" "Standalone production install " OFF \
"DISTRIBUTED" "Distributed install submenu " OFF \
"IMPORT" "Standalone to import PCAP or log files " OFF \
3>&1 1>&2 2>&3
)