diff --git a/salt/common/tools/sbin/so-elastic-restart b/salt/common/tools/sbin/so-elastic-restart
new file mode 100755
index 000000000..0e3c5937d
--- /dev/null
+++ b/salt/common/tools/sbin/so-elastic-restart
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+
+{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
+/usr/sbin/so-restart elasticsearch $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
+/usr/sbin/so-restart kibana $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
+/usr/sbin/so-restart logstash $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
+/usr/sbin/so-restart filebeat $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
+/usr/sbin/so-restart curator $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
+/usr/sbin/so-restart elastalert $1
+{%- endif %}
diff --git a/salt/common/tools/sbin/so-elastic-start b/salt/common/tools/sbin/so-elastic-start
new file mode 100755
index 000000000..51657ff54
--- /dev/null
+++ b/salt/common/tools/sbin/so-elastic-start
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+
+{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
+/usr/sbin/so-start elasticsearch $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
+/usr/sbin/so-start kibana $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
+/usr/sbin/so-start logstash $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
+/usr/sbin/so-start filebeat $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
+/usr/sbin/so-start curator $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
+/usr/sbin/so-start elastalert $1
+{%- endif %}
diff --git a/salt/common/tools/sbin/so-elastic-stop b/salt/common/tools/sbin/so-elastic-stop
new file mode 100755
index 000000000..2f6c46082
--- /dev/null
+++ b/salt/common/tools/sbin/so-elastic-stop
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+
+{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
+/usr/sbin/so-stop elasticsearch $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
+/usr/sbin/so-stop kibana $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
+/usr/sbin/so-stop logstash $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
+/usr/sbin/so-stop filebeat $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
+/usr/sbin/so-stop curator $1
+{%- endif %}
+
+{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
+/usr/sbin/so-stop elastalert $1
+{%- endif %}
diff --git a/salt/common/tools/sbin/so-salt-minion-check b/salt/common/tools/sbin/so-salt-minion-check
old mode 100644
new mode 100755
diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml
index acad465d1..18d1c9c81 100644
--- a/salt/elasticsearch/files/elasticsearch.yml
+++ b/salt/elasticsearch/files/elasticsearch.yml
@@ -44,3 +44,4 @@ cluster.routing.allocation.disk.watermark.flood_stage: 98%
node.attr.box_type: {{ NODE_ROUTE_TYPE }}
node.name: {{ ESCLUSTERNAME }}
script.max_compilations_rate: 1000/1m
+indices.query.bool.max_clause_count: 1500
diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json
index cc5be34ea..55b928ef0 100644
--- a/salt/sensoroni/files/sensoroni.json
+++ b/salt/sensoroni/files/sensoroni.json
@@ -1,12 +1,16 @@
{% set URLBASE = salt['pillar.get']('global:url_base') -%}
+{% set DESCRIPTION = salt['pillar.get']('sensoroni:node_description') -%}
+{% set ADDRESS = salt['pillar.get']('sensoroni:node_address') -%}
{% set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%}
-{% set CHECKININTERVALMS = salt['pillar.get']('sensoroni:sensor_checkin_interval_ms', 10000) -%}
+{% set CHECKININTERVALMS = salt['pillar.get']('sensoroni:node_checkin_interval_ms', 10000) -%}
{% set STENOENABLED = salt['pillar.get']('steno:enabled', False) -%}
{
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
"logLevel":"info",
"agent": {
"role": "{{ grains.role }}",
+ "description": "{{ DESCRIPTION }}",
+ "address": "{{ ADDRESS }}",
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},
"serverUrl": "https://{{ URLBASE }}/sensoroniagents",
"verifyCert": false,
diff --git a/salt/strelka/files/backend/backend.yaml b/salt/strelka/files/backend/backend.yaml
index b71e8ac74..db6ce0560 100644
--- a/salt/strelka/files/backend/backend.yaml
+++ b/salt/strelka/files/backend/backend.yaml
@@ -6,8 +6,8 @@
{%- endif -%}
logging_cfg: '/etc/strelka/logging.yaml'
limits:
- max_files: 5000
- time_to_live: 900
+ max_files: 0
+ time_to_live: 0
max_depth: 15
distribution: 600
scanner: 150
diff --git a/setup/so-functions b/setup/so-functions
index 3cf268869..e8360c671 100755
--- a/setup/so-functions
+++ b/setup/so-functions
@@ -251,19 +251,19 @@ check_pass_match() {
fi
}
+# False if stopped, true if running
check_service_status() {
local service_name=$1
echo "Checking service $service_name status" >> "$setup_log" 2>&1
systemctl status $service_name > /dev/null 2>&1
local status=$?
- #true if there is an issue with the service false if it is running properly
if [ $status -gt 0 ]; then
- echo "$service_name is not running" >> "$setup_log" 2>&1
- echo 1;
+ echo " $service_name is not running" >> "$setup_log" 2>&1
+ return 1;
else
- echo "$service_name is running" >> "$setup_log" 2>&1
- echo 0;
+ echo " $service_name is running" >> "$setup_log" 2>&1
+ return 0;
fi
}
@@ -273,28 +273,27 @@ check_salt_master_status() {
salt-call saltutil.kill_all_jobs > /dev/null 2>&1
salt-call state.show_top > /dev/null 2>&1
local status=$?
- #true if there is an issue talking to salt master
if [ $status -gt 0 ]; then
- echo 1;
+ echo " Could not talk to salt master" >> "$setup_log" 2>&1
+ return 1;
else
- echo "Can talk to salt master" >> "$setup_log" 2>&1
- echo 0;
+ echo " Can talk to salt master" >> "$setup_log" 2>&1
+ return 0;
fi
}
check_salt_minion_status() {
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
- salt "$MINION_ID" test.ping >> "$setup_log" 2>&1
+ salt "$MINION_ID" test.ping > /dev/null 2>&1
local status=$?
- #true if there is an issue getting a job response from the minion
if [ $status -gt 0 ]; then
- echo 1;
+ echo " Minion did not respond" >> "$setup_log" 2>&1
+ return 1;
else
- echo "Received job response from salt minion" >> "$setup_log" 2>&1
- echo 0;
+ echo " Received job response from salt minion" >> "$setup_log" 2>&1
+ return 0;
fi
-
}
check_soremote_pass() {
@@ -767,12 +766,12 @@ detect_os() {
disable_auto_start() {
- if crontab -l -u $INSTALLUSERNAME 2>&1 | grep so-setup > /dev/null 2>&1; then
+ if crontab -l -u $INSTALLUSERNAME 2>&1 | grep -q so-setup; then
# Remove the automated setup script from crontab, if it exists
logCmd "crontab -u $INSTALLUSERNAME -r"
fi
- if grep so-setup /home/$INSTALLUSERNAME/.bash_profile > /dev/null 2>&1; then
+ if grep -q so-setup /home/$INSTALLUSERNAME/.bash_profile; then
# Truncate last line of the bash profile
info "Removing auto-run of setup from bash profile"
sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1
@@ -1104,10 +1103,10 @@ manager_pillar() {
manager_global() {
local global_pillar="$local_salt_dir/pillar/global.sls"
- if [ -z "$SENSOR_CHECKIN_INTERVAL_MS" ]; then
- SENSOR_CHECKIN_INTERVAL_MS=10000
+ if [ -z "$NODE_CHECKIN_INTERVAL_MS" ]; then
+ NODE_CHECKIN_INTERVAL_MS=10000
if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ] || [ "$install_type" = 'IMPORT' ]; then
- SENSOR_CHECKIN_INTERVAL_MS=1000
+ NODE_CHECKIN_INTERVAL_MS=1000
fi
fi
@@ -1167,7 +1166,9 @@ manager_global() {
" imagerepo: '$IMAGEREPO'"\
" pipeline: 'redis'"\
"sensoroni:"\
- " sensor_checkin_interval_ms: $SENSOR_CHECKIN_INTERVAL_MS"\
+ " node_address: '$MAINIP'"\
+ " node_description: '$NODE_DESCRIPTION'"\
+ " node_checkin_interval_ms: $NODE_CHECKIN_INTERVAL_MS"\
"strelka:"\
" enabled: $STRELKA"\
" rules: 1"\
@@ -1382,13 +1383,47 @@ reserve_group_ids() {
reinstall_init() {
info "Putting system in state to run setup again"
+
+ if [[ $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then
+ local salt_services=( "salt-master" "salt-minion" )
+ else
+ local salt_services=( "salt-minion" )
+ fi
+
+ local service_retry_count=20
{
- # Kill any salt processes
- pkill -9 -ef /usr/bin/salt
+ if command -v salt-call &> /dev/null; then
+ # Disable schedule so highstate doesn't start running during the install
+ salt-call -l info schedule.disable
+
+ # Kill any currently running salt jobs, also to prevent issues with highstate.
+ salt-call -l info saltutil.kill_all_jobs
+ fi
+
+ # Kill any salt processes (safely)
+ for service in "${salt_services[@]}"; do
+ # Stop the service in the background so we can exit after a certain amount of time
+ systemctl stop "$service" &
+ local pid=$!
+
+ local count=0
+ while check_service_status "$service"; do
+ if [[ $count -gt $service_retry_count ]]; then
+ echo "Could not stop $service after 1 minute, exiting setup."
+
+ # Stop the systemctl process trying to kill the service, show user a message, then exit setup
+ kill -9 $pid
+ kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
+ fi
+
+ sleep 5
+ ((count++))
+ done
+ done
# Remove all salt configs
- rm -rf /etc/salt/global /etc/salt/minion /etc/salt/master /etc/salt/pki/*
+ rm -rf /etc/salt/grains /etc/salt/minion /etc/salt/pki/*
if command -v docker &> /dev/null; then
# Stop and remove all so-* containers so files can be changed with more safety
@@ -1409,7 +1444,7 @@ reinstall_init() {
# Remove the old launcher package in case the config changes
remove_package launcher-final
- } >> $setup_log 2>&1
+ } >> "$setup_log" 2>&1
}
backup_dir() {
@@ -1606,61 +1641,59 @@ salt_checkin() {
"salt-master" \
"salt-minion"
)
- local LOOP_COUNT=0
+ local count=0
+
for service in "${SALT_SERVICES[@]}"; do
- echo "Stopping service $service" >> "$setup_log" 2>&1
- systemctl stop "$service" >> "$setup_log" 2>&1
- LOOP_COUNT=0
- while ! (( $(check_service_status $service) )); do
- echo "$service still running" >> "$setup_log" 2>&1
- if [ $LOOP_COUNT -gt 60 ]; then
- echo "$service could not be stopped in 60 seconds, exiting" >> "$setup_log" 2>&1
- exit 1
+ {
+ echo "Restarting service $service"
+ systemctl restart "$service" &
+ local pid=$!
+ } >> "$setup_log" 2>&1
+
+ count=0
+ while ! (check_service_status "$service"); do
+ # On final loop, kill the pid trying to restart service and try to manually kill then start it
+ if [ $count -eq 12 ]; then
+ {
+ kill -9 "$pid"
+ systemctl kill "$service"
+ systemctl start "$service" &
+ local pid=$!
+ } >> "$setup_log" 2>&1
fi
- sleep 1;
- ((LOOP_COUNT+=1))
+
+ if [ $count -gt 12 ]; then
+ echo "$service could not be restarted in 120 seconds, exiting" >> "$setup_log" 2>&1
+ kill -9 "$pid"
+ kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
+ fi
+ sleep 10;
+ ((count++))
done
done
- sleep 5;
-
- for service in "${SALT_SERVICES[@]}"; do
- echo "Starting service $service" >> "$setup_log" 2>&1
- systemctl start "$service" >> "$setup_log" 2>&1
- LOOP_COUNT=0
- while (( $(check_service_status $service) )); do
- echo "$service still not running" >> "$setup_log" 2>&1
- if [ $LOOP_COUNT -gt 60 ]; then
- echo "$service could not be started in 60 seconds, exiting" >> "$setup_log" 2>&1
- exit 1
- fi
- sleep 1;
- ((LOOP_COUNT+=1))
- done
- done
-
- sleep 5;
-
- LOOP_COUNT=0
- while (( $(check_salt_master_status) )); do
+ count=0
+ while ! (check_salt_master_status); do
echo "salt minion cannot talk to salt master" >> "$setup_log" 2>&1
- if [ $LOOP_COUNT -gt 30 ]; then
+ if [ $count -gt 30 ]; then
echo "salt minion could not talk to salt master after 30 attempts, exiting" >> "$setup_log" 2>&1
- exit 1
+ kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
sleep 1;
- ((LOOP_COUNT+=1))
+ ((count++))
done
- LOOP_COUNT=0
- while (( $(check_salt_minion_status) )); do
+ count=0
+ while ! (check_salt_minion_status); do
echo "salt master did not get a job response from salt minion" >> "$setup_log" 2>&1
- if [ $LOOP_COUNT -gt 30 ]; then
+ if [ $count -gt 30 ]; then
echo "salt master did not get a job response from salt minion after 30 attempts, exiting" >> "$setup_log" 2>&1
- exit 1
+ kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
+ systemctl kill salt-minion
+ systemctl start salt-minion
sleep 1;
- ((LOOP_COUNT+=1))
+ ((count++))
done
echo " Confirming existence of the CA certificate"
diff --git a/setup/so-setup b/setup/so-setup
index 77c579cfc..8ee236bf1 100755
--- a/setup/so-setup
+++ b/setup/so-setup
@@ -428,72 +428,87 @@ whiptail_make_changes
# From here on changes will be made.
echo "1" > /root/accept_changes
-if [[ $is_reinstall ]]; then
- reinstall_init
-fi
+# Set up handler for setup to exit early (use `kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1` in child scripts)
+trap 'catch $LINENO' SIGUSR1
+catch() {
+ info "Fatal error occurred at $1 in so-setup, failing setup."
+ whiptail_setup_failed
+ exit
+}
+
+# Init networking so rest of install works
if [[ -n "$TURBO" ]]; then
use_turbo_proxy
fi
if [[ "$setup_type" == 'iso' ]]; then
- # Init networking so rest of install works
- set_hostname
+ set_hostname >> $setup_log 2>&1
set_management_interface
fi
disable_ipv6
-disable_auto_start
if [[ "$setup_type" != 'iso' ]]; then
- set_hostname
+ set_hostname >> $setup_log 2>&1
fi
if [[ $is_minion ]]; then
add_mngr_ip_to_hosts
fi
-{
- mark_version;
- clear_manager;
-} >> $setup_log 2>&1
-
-
-if [[ $is_manager || $is_import ]]; then
- {
- generate_passwords;
- secrets_pillar;
- add_socore_user_manager;
- } >> $setup_log 2>&1
-fi
-
-if [[ $is_manager && ! $is_eval ]]; then
- add_soremote_user_manager >> $setup_log 2>&1
-fi
-
+# This block sets REDIRECTIT which is used by a function outside the below subshell
{
set_main_ip;
set_redirect;
} >> $setup_log 2>&1
-host_pillar >> $setup_log 2>&1
-
-if [[ $is_minion || $is_import ]]; then
- set_updates >> $setup_log 2>&1
- [ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1
-fi
-
# Begin install
{
# Set initial percentage to 0
export percentage=0
+
+ # Show initial progress message
+ set_progress_str 0 'Running initial configuration steps'
+
set_path
- if [[ $is_manager && $is_airgap ]]; then
- info "Creating airgap repo"
- create_repo >> $setup_log 2>&1
+ if [[ $is_reinstall ]]; then
+ reinstall_init
+ fi
+
+ disable_auto_start
+
+ {
+ mark_version;
+ clear_manager;
+ } >> $setup_log 2>&1
+
+
+ if [[ $is_manager || $is_import ]]; then
+ {
+ generate_passwords;
+ secrets_pillar;
+ add_socore_user_manager;
+ } >> $setup_log 2>&1
+ fi
+
+ if [[ $is_manager && ! $is_eval ]]; then
+ add_soremote_user_manager >> $setup_log 2>&1
+ fi
+
+ host_pillar >> $setup_log 2>&1
+
+ if [[ $is_minion || $is_import ]]; then
+ set_updates >> $setup_log 2>&1
+ [ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1
+ fi
+
+ if [[ $is_manager && $is_airgap ]]; then
+ info "Creating airgap repo"
+ create_repo >> $setup_log 2>&1
airgap_rules >> $setup_log 2>&1
- fi
+ fi
if [[ $is_minion ]]; then
set_progress_str 1 'Configuring firewall'
@@ -583,7 +598,7 @@ fi
if [[ $is_minion ]]; then
set_progress_str 22 'Checking if the Salt Minion needs to be updated'
- salt-call state.apply salt.minion -l info >> $setup_log 2>&1
+ salt-call state.apply -l info salt.minion >> $setup_log 2>&1
fi
set_progress_str 23 'Generating CA and checking in'