mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Put functions in correct order
This commit is contained in:
@@ -17,6 +17,7 @@
|
||||
|
||||
# README - DO NOT DEFINE GLOBAL VARIABLES IN THIS FILE. Instead use so-variables.
|
||||
|
||||
### Begin Logging Section ###
|
||||
log() {
|
||||
msg=$1
|
||||
level=${2:-I}
|
||||
@@ -41,51 +42,7 @@ logCmd() {
|
||||
info "Executing command: $cmd"
|
||||
$cmd >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
filter_unused_nics() {
|
||||
|
||||
if [[ $MNIC ]]; then local grep_string="$MNIC\|bond0"; else local grep_string="bond0"; fi
|
||||
|
||||
# If we call this function and NICs have already been assigned to the bond interface then add them to the grep search string
|
||||
if [[ $BNICS ]]; then
|
||||
grep_string="$grep_string"
|
||||
for BONDNIC in "${BNICS[@]}"; do
|
||||
grep_string="$grep_string\|$BONDNIC"
|
||||
done
|
||||
fi
|
||||
|
||||
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
|
||||
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
|
||||
readarray -t filtered_nics <<< "$filtered_nics"
|
||||
|
||||
nic_list=()
|
||||
for nic in "${filtered_nics[@]}"; do
|
||||
case $(cat "/sys/class/net/${nic}/carrier" 2>/dev/null) in
|
||||
1)
|
||||
nic_list+=("$nic" "Link UP " "OFF")
|
||||
;;
|
||||
0)
|
||||
nic_list+=("$nic" "Link DOWN " "OFF")
|
||||
;;
|
||||
*)
|
||||
nic_list+=("$nic" "Link UNKNOWN " "OFF")
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
export nic_list
|
||||
}
|
||||
|
||||
calculate_useable_cores() {
|
||||
|
||||
# Calculate reasonable core usage
|
||||
local cores_for_zeek=$(( (num_cpu_cores/2) - 1 ))
|
||||
local lb_procs_round
|
||||
lb_procs_round=$(printf "%.0f\n" $cores_for_zeek)
|
||||
|
||||
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
|
||||
export lb_procs
|
||||
}
|
||||
### End Logging Section ###
|
||||
|
||||
airgap_rules() {
|
||||
# Copy the rules for suricata if using Airgap
|
||||
@@ -99,16 +56,6 @@ airgap_rules() {
|
||||
cp -Rv /root/SecurityOnion/agrules/strelka /nsm/repo/rules/
|
||||
}
|
||||
|
||||
analyze_system() {
|
||||
title "System Characteristics"
|
||||
logCmd "uptime"
|
||||
logCmd "uname -a"
|
||||
logCmd "free -h"
|
||||
logCmd "lscpu"
|
||||
logCmd "df -h"
|
||||
logCmd "ip a"
|
||||
}
|
||||
|
||||
accept_salt_key_remote() {
|
||||
systemctl restart salt-minion
|
||||
|
||||
@@ -146,24 +93,6 @@ addtotab_generate_templates() {
|
||||
|
||||
}
|
||||
|
||||
# $5 => (optional) password variable
|
||||
so_add_user() {
|
||||
local username=$1
|
||||
local uid=$2
|
||||
local gid=$3
|
||||
local home_dir=$4
|
||||
if [ "$5" ]; then local pass=$5; fi
|
||||
|
||||
echo "Add $username user" >> "$setup_log" 2>&1
|
||||
groupadd --gid "$gid" "$username"
|
||||
useradd -m --uid "$uid" --gid "$gid" --home-dir "$home_dir" "$username"
|
||||
|
||||
# If a password has been passed in, set the password
|
||||
if [ "$pass" ]; then
|
||||
echo "$username":"$pass" | chpasswd --crypt-method=SHA512
|
||||
fi
|
||||
}
|
||||
|
||||
add_socore_user_manager() {
|
||||
so_add_user "socore" "939" "939" "/opt/so" >> "$setup_log" 2>&1
|
||||
}
|
||||
@@ -172,29 +101,6 @@ add_soremote_user_manager() {
|
||||
so_add_user "soremote" "947" "947" "/home/soremote" "$SOREMOTEPASS1" >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
wait_for_file() {
|
||||
local filename=$1
|
||||
local max_attempts=$2 # this is multiplied by the wait interval, so make sure it isn't too large
|
||||
local cur_attempts=0
|
||||
local wait_interval=$3
|
||||
local total_time=$(( max_attempts * wait_interval ))
|
||||
local date
|
||||
date=$(date)
|
||||
|
||||
while [[ $cur_attempts -lt $max_attempts ]]; do
|
||||
if [ -f "$filename" ]; then
|
||||
echo "File $filename found at $date" >> "$setup_log" 2>&1
|
||||
return 0
|
||||
else
|
||||
((cur_attempts++))
|
||||
echo "File $filename does not exist; waiting ${wait_interval}s then checking again ($cur_attempts/$max_attempts)..." >> "$setup_log" 2>&1
|
||||
sleep "$wait_interval"
|
||||
fi
|
||||
done
|
||||
echo "Could not find $filename after waiting ${total_time}s" >> "$setup_log" 2>&1
|
||||
return 1
|
||||
}
|
||||
|
||||
add_web_user() {
|
||||
wait_for_file /opt/so/conf/kratos/db/db.sqlite 30 5
|
||||
{
|
||||
@@ -204,22 +110,25 @@ add_web_user() {
|
||||
} >> "/root/so-user-add.log" 2>&1
|
||||
}
|
||||
|
||||
# Create an secrets pillar so that passwords survive re-install
|
||||
secrets_pillar(){
|
||||
if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then
|
||||
echo "Creating Secrets Pillar" >> "$setup_log" 2>&1
|
||||
mkdir -p $local_salt_dir/pillar
|
||||
printf '%s\n'\
|
||||
"secrets:"\
|
||||
" mysql: $MYSQLPASS"\
|
||||
" playbook_db: $PLAYBOOKDBPASS"\
|
||||
" playbook_admin: $PLAYBOOKADMINPASS"\
|
||||
" playbook_automation: $PLAYBOOKAUTOMATIONPASS"\
|
||||
" grafana_admin: $GRAFANAPASS"\
|
||||
" fleet: $FLEETPASS"\
|
||||
" fleet_jwt: $FLEETJWT"\
|
||||
" fleet_enroll-secret: False" > $local_salt_dir/pillar/secrets.sls
|
||||
fi
|
||||
analyze_system() {
|
||||
title "System Characteristics"
|
||||
logCmd "uptime"
|
||||
logCmd "uname -a"
|
||||
logCmd "free -h"
|
||||
logCmd "lscpu"
|
||||
logCmd "df -h"
|
||||
logCmd "ip a"
|
||||
}
|
||||
|
||||
calculate_useable_cores() {
|
||||
|
||||
# Calculate reasonable core usage
|
||||
local cores_for_zeek=$(( (num_cpu_cores/2) - 1 ))
|
||||
local lb_procs_round
|
||||
lb_procs_round=$(printf "%.0f\n" $cores_for_zeek)
|
||||
|
||||
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
|
||||
export lb_procs
|
||||
}
|
||||
|
||||
check_admin_pass() {
|
||||
@@ -825,6 +734,18 @@ check_requirements() {
|
||||
fi
|
||||
}
|
||||
|
||||
compare_main_nic_ip() {
|
||||
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
|
||||
read -r -d '' message <<- EOM
|
||||
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
|
||||
|
||||
This is not a supported configuration, please remediate and rerun setup.
|
||||
EOM
|
||||
whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
|
||||
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
compare_versions() {
|
||||
manager_ver=$($sshcmd -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion)
|
||||
|
||||
@@ -1219,6 +1140,95 @@ download_repo_tarball() {
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
elasticsearch_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
|
||||
# Create the node pillar
|
||||
printf '%s\n'\
|
||||
"elasticsearch:"\
|
||||
" mainip: '$MAINIP'"\
|
||||
" mainint: '$MNIC'"\
|
||||
" esheap: '$NODE_ES_HEAP_SIZE'" >> "$pillar_file"
|
||||
if [ -n "$ESCLUSTERNAME" ]; then
|
||||
printf '%s\n'\
|
||||
" esclustername: $ESCLUSTERNAME" >> "$pillar_file"
|
||||
else
|
||||
printf '%s\n'\
|
||||
" esclustername: {{ grains.host }}" >> "$pillar_file"
|
||||
fi
|
||||
printf '%s\n'\
|
||||
" node_type: '$NODETYPE'"\
|
||||
" es_port: $node_es_port"\
|
||||
" log_size_limit: $log_size_limit"\
|
||||
" node_route_type: 'hot'"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
printf '%s\n'\
|
||||
"logstash_settings:"\
|
||||
" ls_pipeline_batch_size: $LSPIPELINEBATCH"\
|
||||
" ls_input_threads: $LSINPUTTHREADS"\
|
||||
" lsheap: $NODE_LS_HEAP_SIZE"\
|
||||
" ls_pipeline_workers: $num_cpu_cores"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
}
|
||||
|
||||
es_heapsize() {
|
||||
|
||||
# Determine ES Heap Size
|
||||
if [ "$total_mem" -lt 8000 ] ; then
|
||||
ES_HEAP_SIZE="600m"
|
||||
elif [ "$total_mem" -ge 100000 ]; then
|
||||
# Set a max of 25GB for heap size
|
||||
# https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
|
||||
ES_HEAP_SIZE="25000m"
|
||||
else
|
||||
# Set heap size to 25% of available memory
|
||||
ES_HEAP_SIZE=$(( total_mem / 4 ))"m"
|
||||
fi
|
||||
export ES_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then
|
||||
NODE_ES_HEAP_SIZE=ES_HEAP_SIZE
|
||||
export NODE_ES_HEAP_SIZE
|
||||
fi
|
||||
}
|
||||
|
||||
filter_unused_nics() {
|
||||
|
||||
if [[ $MNIC ]]; then local grep_string="$MNIC\|bond0"; else local grep_string="bond0"; fi
|
||||
|
||||
# If we call this function and NICs have already been assigned to the bond interface then add them to the grep search string
|
||||
if [[ $BNICS ]]; then
|
||||
grep_string="$grep_string"
|
||||
for BONDNIC in "${BNICS[@]}"; do
|
||||
grep_string="$grep_string\|$BONDNIC"
|
||||
done
|
||||
fi
|
||||
|
||||
# Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use)
|
||||
filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g')
|
||||
readarray -t filtered_nics <<< "$filtered_nics"
|
||||
|
||||
nic_list=()
|
||||
for nic in "${filtered_nics[@]}"; do
|
||||
case $(cat "/sys/class/net/${nic}/carrier" 2>/dev/null) in
|
||||
1)
|
||||
nic_list+=("$nic" "Link UP " "OFF")
|
||||
;;
|
||||
0)
|
||||
nic_list+=("$nic" "Link DOWN " "OFF")
|
||||
;;
|
||||
*)
|
||||
nic_list+=("$nic" "Link UNKNOWN " "OFF")
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
export nic_list
|
||||
}
|
||||
|
||||
fireeye_pillar() {
|
||||
|
||||
local fireeye_pillar_path=$local_salt_dir/pillar/fireeye
|
||||
@@ -1369,6 +1379,33 @@ import_registry_docker() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Set Logstash heap size based on total memory
|
||||
ls_heapsize() {
|
||||
|
||||
if [ "$total_mem" -ge 32000 ]; then
|
||||
LS_HEAP_SIZE='1000m'
|
||||
return
|
||||
fi
|
||||
|
||||
case "$install_type" in
|
||||
'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
|
||||
LS_HEAP_SIZE='1000m'
|
||||
;;
|
||||
'EVAL')
|
||||
LS_HEAP_SIZE='700m'
|
||||
;;
|
||||
*)
|
||||
LS_HEAP_SIZE='500m'
|
||||
;;
|
||||
esac
|
||||
export LS_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
|
||||
NODE_LS_HEAP_SIZE=LS_HEAP_SIZE
|
||||
export NODE_LS_HEAP_SIZE
|
||||
fi
|
||||
}
|
||||
|
||||
manager_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
@@ -1608,6 +1645,11 @@ manager_global() {
|
||||
printf '%s\n' '----' >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
mark_version() {
|
||||
# Drop a file with the current version
|
||||
echo "$SOVERSION" > /etc/soversion
|
||||
}
|
||||
|
||||
minio_generate_keys() {
|
||||
|
||||
local charSet="[:graph:]"
|
||||
@@ -1669,40 +1711,6 @@ network_setup() {
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
elasticsearch_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
|
||||
# Create the node pillar
|
||||
printf '%s\n'\
|
||||
"elasticsearch:"\
|
||||
" mainip: '$MAINIP'"\
|
||||
" mainint: '$MNIC'"\
|
||||
" esheap: '$NODE_ES_HEAP_SIZE'" >> "$pillar_file"
|
||||
if [ -n "$ESCLUSTERNAME" ]; then
|
||||
printf '%s\n'\
|
||||
" esclustername: $ESCLUSTERNAME" >> "$pillar_file"
|
||||
else
|
||||
printf '%s\n'\
|
||||
" esclustername: {{ grains.host }}" >> "$pillar_file"
|
||||
fi
|
||||
printf '%s\n'\
|
||||
" node_type: '$NODETYPE'"\
|
||||
" es_port: $node_es_port"\
|
||||
" log_size_limit: $log_size_limit"\
|
||||
" node_route_type: 'hot'"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
printf '%s\n'\
|
||||
"logstash_settings:"\
|
||||
" ls_pipeline_batch_size: $LSPIPELINEBATCH"\
|
||||
" ls_input_threads: $LSINPUTTHREADS"\
|
||||
" lsheap: $NODE_LS_HEAP_SIZE"\
|
||||
" ls_pipeline_workers: $num_cpu_cores"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
}
|
||||
|
||||
parse_install_username() {
|
||||
# parse out the install username so things copy correctly
|
||||
INSTALLUSERNAME=${SUDO_USER:-${USER}}
|
||||
@@ -2140,6 +2148,24 @@ salt_firstcheckin() {
|
||||
salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
|
||||
}
|
||||
|
||||
# Create an secrets pillar so that passwords survive re-install
|
||||
secrets_pillar(){
|
||||
if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then
|
||||
echo "Creating Secrets Pillar" >> "$setup_log" 2>&1
|
||||
mkdir -p $local_salt_dir/pillar
|
||||
printf '%s\n'\
|
||||
"secrets:"\
|
||||
" mysql: $MYSQLPASS"\
|
||||
" playbook_db: $PLAYBOOKDBPASS"\
|
||||
" playbook_admin: $PLAYBOOKADMINPASS"\
|
||||
" playbook_automation: $PLAYBOOKAUTOMATIONPASS"\
|
||||
" grafana_admin: $GRAFANAPASS"\
|
||||
" fleet: $FLEETPASS"\
|
||||
" fleet_jwt: $FLEETJWT"\
|
||||
" fleet_enroll-secret: False" > $local_salt_dir/pillar/secrets.sls
|
||||
fi
|
||||
}
|
||||
|
||||
set_base_heapsizes() {
|
||||
es_heapsize
|
||||
ls_heapsize
|
||||
@@ -2155,18 +2181,6 @@ set_main_ip() {
|
||||
MNIC_IP=$(ip a s "$MNIC" | grep -oE 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2)
|
||||
}
|
||||
|
||||
compare_main_nic_ip() {
|
||||
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
|
||||
read -r -d '' message <<- EOM
|
||||
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
|
||||
|
||||
This is not a supported configuration, please remediate and rerun setup.
|
||||
EOM
|
||||
whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
|
||||
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Add /usr/sbin to everyone's path
|
||||
set_path() {
|
||||
echo "complete -cf sudo" > /etc/profile.d/securityonion.sh
|
||||
@@ -2440,6 +2454,24 @@ set_updates() {
|
||||
fi
|
||||
}
|
||||
|
||||
# $5 => (optional) password variable
|
||||
so_add_user() {
|
||||
local username=$1
|
||||
local uid=$2
|
||||
local gid=$3
|
||||
local home_dir=$4
|
||||
if [ "$5" ]; then local pass=$5; fi
|
||||
|
||||
echo "Add $username user" >> "$setup_log" 2>&1
|
||||
groupadd --gid "$gid" "$username"
|
||||
useradd -m --uid "$uid" --gid "$gid" --home-dir "$home_dir" "$username"
|
||||
|
||||
# If a password has been passed in, set the password
|
||||
if [ "$pass" ]; then
|
||||
echo "$username":"$pass" | chpasswd --crypt-method=SHA512
|
||||
fi
|
||||
}
|
||||
|
||||
steno_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
@@ -2451,11 +2483,6 @@ steno_pillar() {
|
||||
|
||||
}
|
||||
|
||||
mark_version() {
|
||||
# Drop a file with the current version
|
||||
echo "$SOVERSION" > /etc/soversion
|
||||
}
|
||||
|
||||
update_sudoers_for_testing() {
|
||||
if [ -n "$TESTING" ]; then
|
||||
info "Ensuring $INSTALLUSERNAME has password-less sudo access for automated testing purposes."
|
||||
@@ -2507,56 +2534,29 @@ use_turbo_proxy() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Set Logstash heap size based on total memory
|
||||
ls_heapsize() {
|
||||
wait_for_file() {
|
||||
local filename=$1
|
||||
local max_attempts=$2 # this is multiplied by the wait interval, so make sure it isn't too large
|
||||
local cur_attempts=0
|
||||
local wait_interval=$3
|
||||
local total_time=$(( max_attempts * wait_interval ))
|
||||
local date
|
||||
date=$(date)
|
||||
|
||||
if [ "$total_mem" -ge 32000 ]; then
|
||||
LS_HEAP_SIZE='1000m'
|
||||
return
|
||||
fi
|
||||
|
||||
case "$install_type" in
|
||||
'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
|
||||
LS_HEAP_SIZE='1000m'
|
||||
;;
|
||||
'EVAL')
|
||||
LS_HEAP_SIZE='700m'
|
||||
;;
|
||||
*)
|
||||
LS_HEAP_SIZE='500m'
|
||||
;;
|
||||
esac
|
||||
export LS_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
|
||||
NODE_LS_HEAP_SIZE=LS_HEAP_SIZE
|
||||
export NODE_LS_HEAP_SIZE
|
||||
fi
|
||||
while [[ $cur_attempts -lt $max_attempts ]]; do
|
||||
if [ -f "$filename" ]; then
|
||||
echo "File $filename found at $date" >> "$setup_log" 2>&1
|
||||
return 0
|
||||
else
|
||||
((cur_attempts++))
|
||||
echo "File $filename does not exist; waiting ${wait_interval}s then checking again ($cur_attempts/$max_attempts)..." >> "$setup_log" 2>&1
|
||||
sleep "$wait_interval"
|
||||
fi
|
||||
done
|
||||
echo "Could not find $filename after waiting ${total_time}s" >> "$setup_log" 2>&1
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
es_heapsize() {
|
||||
|
||||
# Determine ES Heap Size
|
||||
if [ "$total_mem" -lt 8000 ] ; then
|
||||
ES_HEAP_SIZE="600m"
|
||||
elif [ "$total_mem" -ge 100000 ]; then
|
||||
# Set a max of 25GB for heap size
|
||||
# https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html
|
||||
ES_HEAP_SIZE="25000m"
|
||||
else
|
||||
# Set heap size to 25% of available memory
|
||||
ES_HEAP_SIZE=$(( total_mem / 4 ))"m"
|
||||
fi
|
||||
export ES_HEAP_SIZE
|
||||
|
||||
if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then
|
||||
NODE_ES_HEAP_SIZE=ES_HEAP_SIZE
|
||||
export NODE_ES_HEAP_SIZE
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
whiptail_prog_new_message() {
|
||||
local message=$1
|
||||
set_progress_str "$percentage" "$message"
|
||||
|
||||
Reference in New Issue
Block a user