mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-13 12:42:56 +01:00
merge with dev and fix merge conflict in so-functions https://github.com/Security-Onion-Solutions/securityonion/issues/3264
This commit is contained in:
@@ -35,6 +35,7 @@ ADMINPASS2=onionuser
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=distributed-search
|
||||
INTERWEBS=AIRGAP
|
||||
install_type=SEARCHNODE
|
||||
# LSINPUTBATCHCOUNT=
|
||||
# LSINPUTTHREADS=
|
||||
|
||||
@@ -35,6 +35,7 @@ ZEEKVERSION=ZEEK
|
||||
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||
HNSENSOR=inherit
|
||||
HOSTNAME=distributed-sensor
|
||||
INTERWEBS=AIRGAP
|
||||
install_type=SENSOR
|
||||
# LSINPUTBATCHCOUNT=
|
||||
# LSINPUTTHREADS=
|
||||
|
||||
@@ -41,7 +41,7 @@ install_type=EVAL
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
MANAGERUPDATES=0
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
|
||||
@@ -42,7 +42,7 @@ INTERWEBS=AIRGAP
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
MANAGERUPDATES=0
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
|
||||
@@ -41,7 +41,7 @@ install_type=IMPORT
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
MANAGERUPDATES=0
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
|
||||
@@ -41,7 +41,7 @@ install_type=IMPORT
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
MANAGERUPDATES=0
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
|
||||
@@ -41,7 +41,7 @@ install_type=IMPORT
|
||||
# LSPIPELINEBATCH=
|
||||
# LSPIPELINEWORKERS=
|
||||
MANAGERADV=BASIC
|
||||
MANAGERUPDATES=1
|
||||
MANAGERUPDATES=0
|
||||
# MDNS=
|
||||
# MGATEWAY=
|
||||
# MIP=
|
||||
|
||||
@@ -44,6 +44,21 @@ logCmd() {
|
||||
}
|
||||
### End Logging Section ###
|
||||
|
||||
airgap_repo() {
|
||||
# Remove all the repo files
|
||||
rm -rf /etc/yum.repos.d/*
|
||||
echo "[airgap_repo]" > /etc/yum.repos.d/airgap_repo.repo
|
||||
if $is_manager; then
|
||||
echo "baseurl=https://$HOSTNAME/repo" >> /etc/yum.repos.d/airgap_repo.repo
|
||||
else
|
||||
echo "baseurl=https://$MSRV/repo" >> /etc/yum.repos.d/airgap_repo.repo
|
||||
fi
|
||||
echo "gpgcheck=1" >> /etc/yum.repos.d/airgap_repo.repo
|
||||
echo "sslverify=0" >> /etc/yum.repos.d/airgap_repo.repo
|
||||
echo "name=Airgap Repo" >> /etc/yum.repos.d/airgap_repo.repo
|
||||
echo "enabled=1" >> /etc/yum.repos.d/airgap_repo.repo
|
||||
}
|
||||
|
||||
airgap_rules() {
|
||||
# Copy the rules for suricata if using Airgap
|
||||
mkdir -p /nsm/repo/rules
|
||||
@@ -147,6 +162,25 @@ check_hive_init() {
|
||||
docker rm so-thehive
|
||||
}
|
||||
|
||||
check_manager_state() {
|
||||
echo "Checking state of manager services. This may take a moment..."
|
||||
retry 2 15 "__check_so_status" >> $setup_log 2>&1 && retry 2 15 "__check_salt_master" >> $setup_log 2>&1 && return 0 || return 1
|
||||
}
|
||||
|
||||
__check_so_status() {
|
||||
local so_status_output
|
||||
so_status_output=$($sshcmd -i /root/.ssh/so.key soremote@"$MSRV" cat /opt/so/log/sostatus/status.log)
|
||||
[[ -z $so_status_output ]] && so_status_output=1
|
||||
return $so_status_output
|
||||
}
|
||||
|
||||
__check_salt_master() {
|
||||
local salt_master_status
|
||||
salt_master_status=$($sshcmd -i /root/.ssh/so.key soremote@"$MSRV" systemctl is-active --quiet salt-master)
|
||||
[[ -z $salt_master_status ]] && salt_master_status=1
|
||||
return $salt_master_status
|
||||
}
|
||||
|
||||
check_network_manager_conf() {
|
||||
local gmdconf="/usr/lib/NetworkManager/conf.d/10-globally-managed-devices.conf"
|
||||
local nmconf="/etc/NetworkManager/NetworkManager.conf"
|
||||
@@ -159,11 +193,6 @@ check_network_manager_conf() {
|
||||
systemctl restart NetworkManager
|
||||
} >> "$setup_log" 2>&1
|
||||
fi
|
||||
|
||||
#if test -f "$nmconf"; then
|
||||
# sed -i 's/managed=false/managed=true/g' "$nmconf" >> "$setup_log" 2>&1
|
||||
# systemctl restart NetworkManager >> "$setup_log" 2>&1
|
||||
# fi
|
||||
|
||||
if [[ ! -d "$preupdir" ]]; then
|
||||
mkdir "$preupdir" >> "$setup_log" 2>&1
|
||||
@@ -400,7 +429,7 @@ collect_hostname() {
|
||||
|
||||
if [[ $HOSTNAME == 'securityonion' ]]; then # Will only check HOSTNAME=securityonion once
|
||||
if ! (whiptail_avoid_default_hostname); then
|
||||
whiptail_set_hostname
|
||||
whiptail_set_hostname "$HOSTNAME"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -486,6 +515,22 @@ collect_node_ls_pipeline_worker_count() {
|
||||
done
|
||||
}
|
||||
|
||||
collect_ntp_servers() {
|
||||
if whiptail_ntp_ask; then
|
||||
[[ $is_airgap ]] && ntp_string=""
|
||||
whiptail_ntp_servers "$ntp_string"
|
||||
|
||||
while ! valid_ntp_list "$ntp_string"; do
|
||||
whiptail_invalid_input
|
||||
whiptail_ntp_servers "$ntp_string"
|
||||
done
|
||||
|
||||
IFS="," read -r -a ntp_servers <<< "$ntp_string" # Split string on commas into array
|
||||
else
|
||||
ntp_servers=()
|
||||
fi
|
||||
}
|
||||
|
||||
collect_oinkcode() {
|
||||
whiptail_oinkcode
|
||||
|
||||
@@ -537,7 +582,7 @@ collect_patch_schedule_name_import() {
|
||||
|
||||
collect_proxy() {
|
||||
[[ -n $TESTING ]] && return
|
||||
collect_proxy_details
|
||||
collect_proxy_details || return
|
||||
while ! proxy_validate; do
|
||||
if whiptail_invalid_proxy; then
|
||||
collect_proxy_details no_ask
|
||||
@@ -581,7 +626,9 @@ collect_proxy_details() {
|
||||
else
|
||||
so_proxy="$proxy_addr"
|
||||
fi
|
||||
export proxy
|
||||
export so_proxy
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -702,6 +749,42 @@ configure_minion() {
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
configure_ntp() {
|
||||
local chrony_conf=/etc/chrony.conf
|
||||
|
||||
# Install chrony if it isn't already installed
|
||||
if ! command -v chronyc &> /dev/null; then
|
||||
yum -y install chrony
|
||||
fi
|
||||
|
||||
[[ -f $chrony_conf ]] && mv $chrony_conf "$chrony_conf.bak"
|
||||
|
||||
printf '%s\n' "# NTP server list" > $chrony_conf
|
||||
|
||||
# Build list of servers
|
||||
for addr in "${ntp_servers[@]}"; do
|
||||
echo "server $addr iburst" >> $chrony_conf
|
||||
done
|
||||
|
||||
printf '\n%s\n' "# Config options" >> $chrony_conf
|
||||
|
||||
printf '%s\n' \
|
||||
'driftfile /var/lib/chrony/drift' \
|
||||
'makestep 1.0 3' \
|
||||
'rtcsync' \
|
||||
'logdir /var/log/chrony' >> $chrony_conf
|
||||
|
||||
systemctl enable chronyd
|
||||
systemctl restart chronyd
|
||||
|
||||
# Tell the chrony daemon to sync time & update the system time
|
||||
# Since these commands only make a call to chronyd, wait after each command to make sure the changes are made
|
||||
printf "Syncing chrony time to server: "
|
||||
chronyc -a 'burst 4/4' && sleep 30
|
||||
printf "Forcing chrony to update the time: "
|
||||
chronyc -a makestep && sleep 30
|
||||
}
|
||||
|
||||
checkin_at_boot() {
|
||||
local minion_config=/etc/salt/minion
|
||||
|
||||
@@ -950,7 +1033,7 @@ create_repo() {
|
||||
|
||||
detect_cloud() {
|
||||
echo "Testing if setup is running on a cloud instance..." | tee -a "$setup_log"
|
||||
if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null); then export is_cloud="true"; fi
|
||||
if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null) || [ -f /var/log/waagent.log ]; then export is_cloud="true"; fi
|
||||
}
|
||||
|
||||
detect_os() {
|
||||
@@ -1054,40 +1137,11 @@ disable_ipv6() {
|
||||
} >> /etc/sysctl.conf
|
||||
}
|
||||
|
||||
#disable_misc_network_features() {
|
||||
# filter_unused_nics
|
||||
# if [ ${#filtered_nics[@]} -ne 0 ]; then
|
||||
# for unused_nic in "${filtered_nics[@]}"; do
|
||||
# if [ -n "$unused_nic" ]; then
|
||||
# echo "Disabling unused NIC: $unused_nic" >> "$setup_log" 2>&1
|
||||
#
|
||||
# # Disable DHCPv4/v6 and autoconnect
|
||||
# nmcli con mod "$unused_nic" \
|
||||
# ipv4.method disabled \
|
||||
# ipv6.method ignore \
|
||||
# connection.autoconnect "no" >> "$setup_log" 2>&1
|
||||
#
|
||||
# # Flush any existing IPs
|
||||
# ip addr flush "$unused_nic" >> "$setup_log" 2>&1
|
||||
# fi
|
||||
# done
|
||||
# fi
|
||||
# # Disable IPv6
|
||||
# {
|
||||
# echo "net.ipv6.conf.all.disable_ipv6 = 1"
|
||||
# echo "net.ipv6.conf.default.disable_ipv6 = 1"
|
||||
# echo "net.ipv6.conf.lo.disable_ipv6 = 1"
|
||||
# } >> /etc/sysctl.conf
|
||||
#}
|
||||
|
||||
docker_install() {
|
||||
|
||||
if [ $OS = 'centos' ]; then
|
||||
{
|
||||
yum clean expire-cache;
|
||||
if [[ ! $is_airgap ]]; then
|
||||
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
|
||||
fi
|
||||
if [[ ! $is_iso ]]; then
|
||||
yum -y install docker-ce-20.10.5-3.el7 containerd.io-1.4.4-3.1.el7;
|
||||
fi
|
||||
@@ -1436,8 +1490,6 @@ install_cleanup() {
|
||||
info "Removing so-setup permission entry from sudoers file"
|
||||
sed -i '/so-setup/d' /etc/sudoers
|
||||
fi
|
||||
|
||||
so-ssh-harden -q
|
||||
}
|
||||
|
||||
import_registry_docker() {
|
||||
@@ -1539,8 +1591,7 @@ manager_pillar() {
|
||||
printf '%s\n'\
|
||||
" kratoskey: '$KRATOSKEY'"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
manager_global() {
|
||||
local global_pillar="$local_salt_dir/pillar/global.sls"
|
||||
@@ -1564,7 +1615,6 @@ manager_global() {
|
||||
"global:"\
|
||||
" soversion: '$SOVERSION'"\
|
||||
" hnmanager: '$HNMANAGER'"\
|
||||
" ntpserver: '$NTPSERVER'"\
|
||||
" dockernet: '$DOCKERNET'"\
|
||||
" mdengine: '$ZEEKVERSION'"\
|
||||
" ids: '$NIDS'"\
|
||||
@@ -1718,7 +1768,6 @@ manager_global() {
|
||||
" bip: '$DOCKERBIP'"\
|
||||
"redis_settings:"\
|
||||
" redis_maxmemory: 812" >> "$global_pillar"
|
||||
|
||||
|
||||
printf '%s\n' '----' >> "$setup_log" 2>&1
|
||||
}
|
||||
@@ -1781,6 +1830,19 @@ network_setup() {
|
||||
} >> "$setup_log" 2>&1
|
||||
}
|
||||
|
||||
ntp_pillar() {
|
||||
local pillar_file="$temp_install_dir"/pillar/minions/"$MINION_ID".sls
|
||||
|
||||
if [[ ${#ntp_servers[@]} -gt 0 ]]; then
|
||||
printf '%s\n'\
|
||||
"ntp:"\
|
||||
" servers:" >> "$pillar_file"
|
||||
for addr in "${ntp_servers[@]}"; do
|
||||
printf '%s\n' " - '$addr'" >> "$pillar_file"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
parse_install_username() {
|
||||
# parse out the install username so things copy correctly
|
||||
INSTALLUSERNAME=${SUDO_USER:-${USER}}
|
||||
@@ -1829,12 +1891,13 @@ print_salt_state_apply() {
|
||||
}
|
||||
|
||||
proxy_validate() {
|
||||
echo "Testing proxy..."
|
||||
local test_url="https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS"
|
||||
proxy_test_err=$(curl -sS "$test_url" --proxy "$so_proxy" 2>&1)
|
||||
proxy_test_err=$(curl -sS "$test_url" --proxy "$so_proxy" --connect-timeout 5 2>&1) # set short connection timeout so user doesn't sit waiting for proxy test to timeout
|
||||
local ret=$?
|
||||
|
||||
if [[ $ret != 0 ]]; then
|
||||
error "Could not reach $test_url using proxy $so_proxy"
|
||||
error "Could not reach $test_url using proxy provided"
|
||||
error "Received error: $proxy_test_err"
|
||||
if [[ -n $TESTING ]]; then
|
||||
error "Exiting setup"
|
||||
@@ -1988,11 +2051,6 @@ saltify() {
|
||||
|
||||
# Install updates and Salt
|
||||
if [ $OS = 'centos' ]; then
|
||||
set_progress_str 5 'Installing Salt repo'
|
||||
{
|
||||
sudo rpm --import https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/SALTSTACK-GPG-KEY.pub;
|
||||
cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo;
|
||||
} >> "$setup_log" 2>&1
|
||||
set_progress_str 6 'Installing various dependencies'
|
||||
if [[ ! $is_iso ]]; then
|
||||
logCmd "yum -y install wget nmap-ncat"
|
||||
@@ -2001,49 +2059,31 @@ saltify() {
|
||||
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT')
|
||||
reserve_group_ids >> "$setup_log" 2>&1
|
||||
if [[ ! $is_iso ]]; then
|
||||
logCmd "yum -y install epel-release"
|
||||
logCmd "yum -y install sqlite argon2 curl mariadb-devel python3-pip"
|
||||
retry 50 10 "pip3 install --user influxdb" >> "$setup_log" 2>&1 || exit 1
|
||||
logCmd "yum -y install sqlite argon2 curl mariadb-devel"
|
||||
fi
|
||||
# Download Ubuntu Keys in case manager updates = 1
|
||||
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
||||
if [[ ! $is_airgap ]]; then
|
||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
|
||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3003/SALTSTACK-GPG-KEY.pub"
|
||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg"
|
||||
logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
||||
logCmd "cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo"
|
||||
fi
|
||||
set_progress_str 7 'Installing salt-master'
|
||||
if [[ ! $is_iso ]]; then
|
||||
logCmd "yum -y install salt-master-3002.5"
|
||||
logCmd "yum -y install salt-master-3003"
|
||||
fi
|
||||
systemctl enable salt-master >> "$setup_log" 2>&1
|
||||
;;
|
||||
*)
|
||||
if [ "$MANAGERUPDATES" = '1' ]; then
|
||||
{
|
||||
if [[ ! $is_airgap ]]; then
|
||||
# Create the GPG Public Key for the Salt Repo
|
||||
cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key;
|
||||
|
||||
# Copy repo files over
|
||||
cp ./yum_repos/saltstack.repo /etc/yum.repos.d/saltstack.repo;
|
||||
else
|
||||
info "This is airgap"
|
||||
fi
|
||||
} >> "$setup_log" 2>&1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
if [[ ! $is_airgap ]]; then
|
||||
cp ./yum_repos/wazuh.repo /etc/yum.repos.d/wazuh.repo >> "$setup_log" 2>&1
|
||||
yum clean expire-cache >> "$setup_log" 2>&1
|
||||
fi
|
||||
set_progress_str 8 'Installing salt-minion & python modules'
|
||||
{
|
||||
if [[ ! $is_iso ]]; then
|
||||
yum -y install epel-release
|
||||
yum -y install salt-minion-3002.5\
|
||||
yum -y install salt-minion-3003\
|
||||
python3\
|
||||
python36-docker\
|
||||
python36-dateutil\
|
||||
@@ -2095,8 +2135,8 @@ saltify() {
|
||||
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
|
||||
|
||||
# Add saltstack repo(s)
|
||||
wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
|
||||
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.5 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||
wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3003/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
|
||||
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3003 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||
|
||||
# Add Docker repo
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
|
||||
@@ -2104,7 +2144,7 @@ saltify() {
|
||||
|
||||
# Get gpg keys
|
||||
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
||||
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com$py_ver_url_path/ubuntu/"$ubuntu_version"/amd64/archive/3003/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
|
||||
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
||||
|
||||
@@ -2117,7 +2157,7 @@ saltify() {
|
||||
set_progress_str 6 'Installing various dependencies'
|
||||
retry 50 10 "apt-get -y install sqlite3 argon2 libssl-dev" >> "$setup_log" 2>&1 || exit 1
|
||||
set_progress_str 7 'Installing salt-master'
|
||||
retry 50 10 "apt-get -y install salt-master=3002.5+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-get -y install salt-master=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
|
||||
;;
|
||||
*)
|
||||
@@ -2128,14 +2168,14 @@ saltify() {
|
||||
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
|
||||
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
|
||||
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
|
||||
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.5/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||
echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3003/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
|
||||
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
|
||||
;;
|
||||
esac
|
||||
|
||||
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
|
||||
set_progress_str 8 'Installing salt-minion & python modules'
|
||||
retry 50 10 "apt-get -y install salt-minion=3002.5+ds-1 salt-common=3002.5+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-get -y install salt-minion=3003+ds-1 salt-common=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
|
||||
if [[ $OSVER != 'xenial' ]]; then
|
||||
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb" >> "$setup_log" 2>&1 || exit 1
|
||||
@@ -2265,6 +2305,30 @@ secrets_pillar(){
|
||||
fi
|
||||
}
|
||||
|
||||
securityonion_repo() {
|
||||
# Remove all the current repos
|
||||
if [[ "$OS" == "centos" ]]; then
|
||||
if [[ "$INTERWEBS" == "AIRGAP" ]]; then
|
||||
echo "This is airgap I don't need to add this repo"
|
||||
else
|
||||
mkdir -p /root/oldrepos
|
||||
mv -v /etc/yum.repos.d/* /root/oldrepos/
|
||||
ls -la /etc/yum.repos.d/
|
||||
rm -rf /etc/yum.repos.d
|
||||
yum clean all
|
||||
yum repolist all
|
||||
mkdir -p /etc/yum.repos.d
|
||||
if [[ ! $is_manager && "$MANAGERUPDATES" == "1" ]]; then
|
||||
cp -f ../salt/repo/client/files/centos/securityonioncache.repo /etc/yum.repos.d/
|
||||
else
|
||||
cp -f ../salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "This is Ubuntu"
|
||||
fi
|
||||
}
|
||||
|
||||
set_base_heapsizes() {
|
||||
es_heapsize
|
||||
ls_heapsize
|
||||
@@ -2672,7 +2736,8 @@ update_sudoers() {
|
||||
|
||||
update_packages() {
|
||||
if [ "$OS" = 'centos' ]; then
|
||||
yum -y update >> "$setup_log"
|
||||
yum repolist >> /dev/null
|
||||
yum -y update --exclude=salt* >> "$setup_log"
|
||||
else
|
||||
retry 50 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1
|
||||
|
||||
@@ -46,8 +46,8 @@ check_new_repos() {
|
||||
if [[ $OS == 'centos' ]]; then
|
||||
local repo_arr=(
|
||||
"https://download.docker.com/linux/centos/docker-ce.repo"
|
||||
"https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
|
||||
"https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
|
||||
"https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3003/SALTSTACK-GPG-KEY.pub"
|
||||
"https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3003/SALTSTACK-GPG-KEY.pub"
|
||||
"https://download.docker.com/linux/ubuntu/gpg"
|
||||
"https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
||||
"https://packages.wazuh.com/3.x/yum/"
|
||||
@@ -59,7 +59,7 @@ check_new_repos() {
|
||||
local repo_arr=(
|
||||
"https://download.docker.com/linux/ubuntu/gpg"
|
||||
"https://download.docker.com/linux/ubuntu"
|
||||
"https://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.5/SALTSTACK-GPG-KEY.pub"
|
||||
"https://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3003/SALTSTACK-GPG-KEY.pub"
|
||||
"https://packages.wazuh.com/key/GPG-KEY-WAZUH"
|
||||
"https://packages.wazuh.com"
|
||||
)
|
||||
|
||||
@@ -47,6 +47,7 @@ source ./so-variables
|
||||
# Parse command line arguments
|
||||
setup_type=$1
|
||||
automation=$2
|
||||
WHATWOULDYOUSAYYAHDOHERE=setup
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
arg="$1"
|
||||
@@ -167,10 +168,8 @@ set_ssh_cmds $automated
|
||||
local_sbin="$(pwd)/../salt/common/tools/sbin"
|
||||
export PATH=$PATH:$local_sbin
|
||||
|
||||
set_network_dev_status_list
|
||||
set_palette >> $setup_log 2>&1
|
||||
|
||||
|
||||
# Kernel messages can overwrite whiptail screen #812
|
||||
# https://github.com/Security-Onion-Solutions/securityonion/issues/812
|
||||
dmesg -D
|
||||
@@ -265,7 +264,7 @@ elif [ "$install_type" = 'ANALYST' ]; then
|
||||
fi
|
||||
|
||||
# Check if this is an airgap install
|
||||
if [[ ( $is_manager || $is_import ) && $is_iso ]]; then
|
||||
if [[ $is_iso || $is_minion ]]; then
|
||||
whiptail_airgap
|
||||
if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
|
||||
is_airgap=true
|
||||
@@ -291,13 +290,22 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
|
||||
[[ -f $net_init_file ]] && whiptail_net_reinit && reinit_networking=true
|
||||
|
||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||
collect_hostname
|
||||
fi
|
||||
|
||||
whiptail_node_description
|
||||
|
||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||
network_init_whiptail
|
||||
else
|
||||
source "$net_init_file"
|
||||
fi
|
||||
|
||||
if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
|
||||
whiptail_management_interface_setup
|
||||
fi
|
||||
|
||||
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
|
||||
network_init
|
||||
fi
|
||||
@@ -315,10 +323,6 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
|
||||
whiptail_management_interface_setup
|
||||
fi
|
||||
|
||||
if [[ $is_minion ]]; then
|
||||
add_mngr_ip_to_hosts
|
||||
fi
|
||||
@@ -334,7 +338,8 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
"MNIC=$MNIC" \
|
||||
"HOSTNAME=$HOSTNAME" \
|
||||
"MSRV=$MSRV" \
|
||||
"MSRVIP=$MSRVIP" > "$install_opt_file"
|
||||
"MSRVIP=$MSRVIP" \
|
||||
"NODE_DESCRIPTION=\"$NODE_DESCRIPTION\"" > "$install_opt_file"
|
||||
[[ -n $so_proxy ]] && echo "so_proxy=$so_proxy" >> "$install_opt_file"
|
||||
download_repo_tarball
|
||||
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
|
||||
@@ -433,6 +438,7 @@ if [[ $is_helix ]]; then
|
||||
fi
|
||||
|
||||
if [[ $is_helix || $is_sensor ]]; then
|
||||
set_network_dev_status_list
|
||||
whiptail_sensor_nics
|
||||
fi
|
||||
|
||||
@@ -534,6 +540,8 @@ if [[ $is_sensor && ! $is_eval ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
[[ $is_iso ]] && collect_ntp_servers
|
||||
|
||||
if [[ $is_node && ! $is_eval ]]; then
|
||||
whiptail_node_advanced
|
||||
if [ "$NODESETUP" == 'NODEADVANCED' ]; then
|
||||
@@ -550,7 +558,6 @@ if [[ $is_node && ! $is_eval ]]; then
|
||||
LSPIPELINEWORKERS=$num_cpu_cores
|
||||
LSPIPELINEBATCH=125
|
||||
LSINPUTTHREADS=1
|
||||
LSPIPELINEBATCH=125
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -564,14 +571,19 @@ fi
|
||||
|
||||
if [[ $is_manager || $is_import ]]; then collect_so_allow; fi
|
||||
|
||||
whiptail_make_changes
|
||||
# This block sets REDIRECTIT which is used by a function outside the below subshell
|
||||
set_redirect >> $setup_log 2>&1
|
||||
|
||||
if [[ $is_minion ]] && ! check_manager_state; then
|
||||
echo "Manager was not in a good state" >> "$setup_log" 2>&1
|
||||
whiptail_manager_error
|
||||
fi
|
||||
|
||||
whiptail_end_settings
|
||||
|
||||
# From here on changes will be made.
|
||||
echo "1" > /root/accept_changes
|
||||
|
||||
# This block sets REDIRECTIT which is used by a function outside the below subshell
|
||||
set_redirect >> $setup_log 2>&1
|
||||
|
||||
|
||||
# Begin install
|
||||
{
|
||||
@@ -581,6 +593,8 @@ set_redirect >> $setup_log 2>&1
|
||||
# Show initial progress message
|
||||
set_progress_str 0 'Running initial configuration steps'
|
||||
|
||||
[[ ${#ntp_servers[@]} -gt 0 ]] && configure_ntp >> $setup_log 2>&1
|
||||
|
||||
reserve_ports
|
||||
|
||||
set_path
|
||||
@@ -613,6 +627,8 @@ set_redirect >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
host_pillar >> $setup_log 2>&1
|
||||
ntp_pillar >> $setup_log 2>&1
|
||||
|
||||
|
||||
if [[ $is_minion || $is_import ]]; then
|
||||
set_updates >> $setup_log 2>&1
|
||||
@@ -630,7 +646,14 @@ set_redirect >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
set_progress_str 2 'Updating packages'
|
||||
update_packages >> $setup_log 2>&1
|
||||
# Import the gpg keys
|
||||
gpg_rpm_import >> $setup_log 2>&1
|
||||
if [[ ! $is_airgap ]]; then
|
||||
securityonion_repo >> $setup_log 2>&1
|
||||
update_packages >> $setup_log 2>&1
|
||||
else
|
||||
airgap_repo >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
if [[ $is_sensor || $is_helix || $is_import ]]; then
|
||||
set_progress_str 3 'Generating sensor pillar'
|
||||
@@ -888,6 +911,7 @@ set_redirect >> $setup_log 2>&1
|
||||
set_progress_str 85 'Applying finishing touches'
|
||||
filter_unused_nics >> $setup_log 2>&1
|
||||
network_setup >> $setup_log 2>&1
|
||||
so-ssh-harden >> $setup_log 2>&1
|
||||
|
||||
if [[ $is_manager || $is_import ]]; then
|
||||
set_progress_str 87 'Adding user to SOC'
|
||||
@@ -942,6 +966,7 @@ else
|
||||
} | whiptail_gauge_post_setup "Running post-installation steps..."
|
||||
|
||||
whiptail_setup_complete
|
||||
[[ $setup_type != 'iso' ]] && whitpail_ssh_warning
|
||||
echo "Post-installation steps have completed." >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
|
||||
@@ -72,3 +72,6 @@ export install_opt_file
|
||||
|
||||
net_init_file=/root/net_init
|
||||
export net_init_file
|
||||
|
||||
ntp_string="0.pool.ntp.org,1.pool.ntp.org"
|
||||
export ntp_string
|
||||
|
||||
@@ -19,13 +19,18 @@ whiptail_airgap() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
INTERWEBS=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||
"Choose your install conditions:" 20 75 4 \
|
||||
"STANDARD" "This manager has internet accesss" ON \
|
||||
"AIRGAP" "This manager does not have internet access" OFF 3>&1 1>&2 2>&3 )
|
||||
local node_str='node'
|
||||
[[ $is_manager || $is_import ]] && node_str='manager'
|
||||
|
||||
INTERWEBS=$(whiptail --title "Security Onion Setup" --menu \
|
||||
"How should this $node_str be installed?" 10 60 2 \
|
||||
"Standard " "This $node_str has internet accesss" \
|
||||
"Airgap " "This $node_str does not have internet access" 3>&1 1>&2 2>&3 )
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
INTERWEBS=$(echo "${INTERWEBS^^}" | tr -d ' ')
|
||||
}
|
||||
|
||||
whiptail_avoid_default_hostname() {
|
||||
@@ -79,7 +84,7 @@ whiptail_bond_nics_mtu() {
|
||||
|
||||
whiptail_cancel() {
|
||||
|
||||
whiptail --title "Security Onion Setup" --msgbox "Cancelling Setup. No changes have been made." 8 75
|
||||
whiptail --title "Security Onion Setup" --msgbox "Cancelling Setup." 8 75
|
||||
if [ -d "/root/installtmp" ]; then
|
||||
{
|
||||
echo "/root/installtmp exists";
|
||||
@@ -88,7 +93,7 @@ whiptail_cancel() {
|
||||
} >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
title "User cancelled setup, no changes made."
|
||||
title "User cancelled setup."
|
||||
|
||||
exit
|
||||
}
|
||||
@@ -391,6 +396,7 @@ whiptail_dockernet_net() {
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_enable_components() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -423,6 +429,211 @@ whiptail_enable_components() {
|
||||
done
|
||||
}
|
||||
|
||||
whiptail_end_settings() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
# BASIC INFO (NETWORK, HOSTNAME, DESCRIPTION, ETC)
|
||||
|
||||
read -r -d '' end_msg <<- EOM
|
||||
Node Type: $install_type
|
||||
Hostname: $HOSTNAME
|
||||
EOM
|
||||
|
||||
[[ -n $NODE_DESCRIPTION ]] && __append_end_msg "Description: $NODE_DESCRIPTION"
|
||||
|
||||
[[ $is_airgap ]] && __append_end_msg "Airgap: True"
|
||||
|
||||
if [[ $is_minion ]]; then
|
||||
__append_end_msg "Manager Hostname: $MSRV"
|
||||
__append_end_msg "Manager IP: $MSRVIP"
|
||||
fi
|
||||
|
||||
|
||||
[[ $is_iso ]] && __append_end_msg "Network: $address_type"
|
||||
|
||||
__append_end_msg "Management NIC: $MNIC"
|
||||
__append_end_msg "Management IP: $MAINIP"
|
||||
|
||||
if [[ $address_type == 'STATIC' ]]; then
|
||||
__append_end_msg "Gateway: $MGATEWAY"
|
||||
__append_end_msg "DNS: $MDNS"
|
||||
__append_end_msg "DNS Domain: $MSEARCH"
|
||||
fi
|
||||
|
||||
if [[ -n $so_proxy ]]; then
|
||||
__append_end_msg "Proxy:"
|
||||
__append_end_msg " Server URL: $proxy_addr"
|
||||
[[ -n $proxy_user ]] && __append_end_msg " User: $proxy_user"
|
||||
else
|
||||
__append_end_msg "Proxy: N/A"
|
||||
fi
|
||||
|
||||
if [[ $is_sensor ]]; then
|
||||
__append_end_msg "Bond NIC(s):"
|
||||
for nic in "${BNICS[@]}"; do
|
||||
__append_end_msg " - $nic"
|
||||
done
|
||||
[[ -n $MTU ]] && __append_end_msg "MTU: $MTU"
|
||||
fi
|
||||
|
||||
local homenet_arr
|
||||
if [[ -n $HNMANAGER ]]; then
|
||||
__append_end_msg "Home Network(s):"
|
||||
IFS="," read -r -a homenet_arr <<< "$HNMANAGER"
|
||||
for net in "${homenet_arr[@]}"; do
|
||||
__append_end_msg " - $net"
|
||||
done
|
||||
elif [[ -n $HNSENSOR ]]; then
|
||||
__append_end_msg "Home Network(s):"
|
||||
IFS="," read -r -a homenet_arr <<< "$HNSENSOR"
|
||||
for net in "${homenet_arr[@]}"; do
|
||||
__append_end_msg " - $net"
|
||||
done
|
||||
fi
|
||||
|
||||
[[ -n $REDIRECTIT ]] && __append_end_msg "Access URL: https://${REDIRECTIT}"
|
||||
|
||||
[[ -n $ALLOW_CIDR ]] && __append_end_msg "Allowed IP or Subnet: $ALLOW_CIDR"
|
||||
|
||||
[[ -n $WEBUSER ]] && __append_end_msg "Web User: $WEBUSER"
|
||||
|
||||
[[ -n $FLEETNODEUSER ]] && __append_end_msg "Fleet User: $FLEETNODEUSER"
|
||||
|
||||
if [[ $is_manager ]]; then
|
||||
__append_end_msg "Enabled Optional Components:"
|
||||
for component in "${COMPONENTS[@]}"; do
|
||||
__append_end_msg " - $component"
|
||||
done
|
||||
fi
|
||||
|
||||
# METADATA / IDS
|
||||
|
||||
if [[ -n $ZEEKVERSION ]]; then
|
||||
local md_tool_string=${ZEEKVERSION,;}
|
||||
md_tool_string=${md_tool_string^}
|
||||
|
||||
__append_end_msg "Metadata Tool: $md_tool_string"
|
||||
fi
|
||||
|
||||
[[ -n $RULESETUP ]] && __append_end_msg "IDS Ruleset: $RULESETUP"
|
||||
[[ -n $OINKCODE ]] && __append_end_msg "Oinkcode: $OINKCODE"
|
||||
|
||||
# PATCH SCHEDULE
|
||||
|
||||
if [[ -n $PATCHSCHEDULENAME ]]; then
|
||||
__append_end_msg "Patch Schedule:"
|
||||
if [[ $PATCHSCHEDULENAME == 'auto'|| $PATCHSCHEDULENAME == 'manual' ]]; then
|
||||
__append_end_msg " Type: $PATCHSCHEDULENAME"
|
||||
else
|
||||
__append_end_msg " Name: $PATCHSCHEDULENAME"
|
||||
fi
|
||||
if [[ ${#PATCHSCHEDULEDAYS[@]} -gt 0 ]]; then
|
||||
__append_end_msg " Day(s):"
|
||||
for day in "${PATCHSCHEDULEDAYS[@]}"; do
|
||||
__append_end_msg " - $day"
|
||||
done
|
||||
fi
|
||||
if [[ ${#PATCHSCHEDULEHOURS[@]} -gt 0 ]]; then
|
||||
__append_end_msg " Hours(s):"
|
||||
for hour in "${PATCHSCHEDULEHOURS[@]}"; do
|
||||
__append_end_msg " - $hour"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# MISC
|
||||
|
||||
[[ $is_helix ]] && __append_end_msg "Helix API key: $HELIXAPIKEY"
|
||||
[[ -n $DOCKERNET ]] && __append_end_msg "Docker network: $DOCKERNET"
|
||||
if [[ -n $MANAGERUPDATES ]]; then
|
||||
__append_end_msg "OS Package Updates: Manager"
|
||||
else
|
||||
__append_end_msg "OS Package Updates: Open"
|
||||
fi
|
||||
if [[ ${#ntp_servers[@]} -gt 0 ]]; then
|
||||
__append_end_msg "NTP Servers:"
|
||||
for server in "${ntp_servers[@]}"; do
|
||||
__append_end_msg " - $server"
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ $NSMSETUP != 'ADVANCED' ]]; then
|
||||
[[ -n $BASICZEEK ]] && __append_end_msg "Zeek Processes: $BASICZEEK"
|
||||
[[ -n $BASICSURI ]] && __append_end_msg "Suricata Processes: $BASICSURI"
|
||||
fi
|
||||
|
||||
# ADVANCED OR REGULAR
|
||||
|
||||
if [[ $NODESETUP == 'NODEADVANCED' ]]; then
|
||||
__append_end_msg "Advanced Node Settings:"
|
||||
__append_end_msg " Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
|
||||
__append_end_msg " Logstash Heap Size: $NODE_LS_HEAP_SIZE"
|
||||
__append_end_msg " Logstash Worker Count: $LSPIPELINEWORKERS"
|
||||
__append_end_msg " Logstash Batch Size: $LSPIPELINEBATCH"
|
||||
__append_end_msg " Logstash Input Threads: $LSINPUTTHREADS"
|
||||
__append_end_msg " Curator Day Cutoff: $CURCLOSEDAYS days"
|
||||
__append_end_msg " Elasticsearch Storage Space: ${log_size_limit}GB"
|
||||
else
|
||||
__append_end_msg "Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
|
||||
__append_end_msg "Logstash Heap Size: $NODE_LS_HEAP_SIZE"
|
||||
__append_end_msg "Logstash Worker Count: $LSPIPELINEWORKERS"
|
||||
__append_end_msg "Logstash Batch Size: $LSPIPELINEBATCH"
|
||||
__append_end_msg "Logstash Input Threads: $LSINPUTTHREADS"
|
||||
__append_end_msg "Curator Close After: $CURCLOSEDAYS days"
|
||||
__append_end_msg "Elasticsearch Storage Space: ${log_size_limit}GB"
|
||||
fi
|
||||
|
||||
|
||||
# ADVANCED
|
||||
if [[ $MANAGERADV == 'ADVANCED' ]]; then
|
||||
__append_end_msg "Advanced Manager Settings:"
|
||||
[[ -n $ESCLUSTERNAME ]] && __append_end_msg " ES Cluster Name: $ESCLUSTERNAME"
|
||||
if [[ ${#BLOGS[@]} -gt 0 ]]; then
|
||||
__append_end_msg " Zeek Logs Enabled:"
|
||||
for log in "${BLOGS[@]}"; do
|
||||
__append_end_msg " - $log"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $NSMSETUP == 'ADVANCED' ]]; then
|
||||
__append_end_msg "Advanced NSM Settings:"
|
||||
if [[ ${#ZEEKPINS[@]} -gt 0 ]]; then
|
||||
local zeek_pin_str
|
||||
for core in "${ZEEKPINS[@]}"; do
|
||||
zeek_pin_str="${zeek_pin_str}${core},"
|
||||
done
|
||||
zeek_pin_str=${zeek_pin_str%,}
|
||||
__append_end_msg " Zeek Pinned Cores: ${zeek_pin_str}"
|
||||
fi
|
||||
if [[ ${#SURIPINS[@]} -gt 0 ]]; then
|
||||
local suri_pin_str
|
||||
for core in "${SURIPINS[@]}"; do
|
||||
suri_pin_str="${suri_pin_str}${core},"
|
||||
done
|
||||
suri_pin_str=${suri_pin_str%,}
|
||||
__append_end_msg " Suricata Pinned Cores: ${suri_pin_str}"
|
||||
fi
|
||||
fi
|
||||
|
||||
whiptail --title "The following options have been set, would you like to proceed?" --yesno "$end_msg" 24 75 --scrolltext
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
echo "$end_msg" > /root/install_summary
|
||||
printf '%s\n' 'Install summary:' "$end_msg" >> "$setup_log"
|
||||
}
|
||||
|
||||
__append_end_msg() {
|
||||
local newline=$1
|
||||
|
||||
read -r -d '' end_msg <<- EOM
|
||||
$end_msg
|
||||
$newline
|
||||
EOM
|
||||
}
|
||||
|
||||
whiptail_eval_adv() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -934,6 +1145,22 @@ whiptail_manager_adv_service_zeeklogs() {
|
||||
|
||||
}
|
||||
|
||||
whiptail_manager_error() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
local msg
|
||||
read -r -d '' msg <<- EOM
|
||||
Setup could not determine if the manager $MSRV is in a good state.
|
||||
|
||||
Continuing without verifying all services on the manager are running may result in a failure.
|
||||
|
||||
Would you like to continue anyway?
|
||||
EOM
|
||||
|
||||
whiptail --title "Security Onion Setup" --yesno "$msg" 13 75 || whiptail_check_exitstatus 1
|
||||
}
|
||||
|
||||
whiptail_manager_updates() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -1044,6 +1271,16 @@ whiptail_node_advanced() {
|
||||
|
||||
}
|
||||
|
||||
whiptail_node_description() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
NODE_DESCRIPTION=$(whiptail --title "Security Onion Setup" \
|
||||
--inputbox "Enter a short description for the node or press ENTER to leave blank:" 10 75 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whiptail_node_es_heap() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -1105,6 +1342,22 @@ whiptail_node_ls_pipeline_worker() {
|
||||
|
||||
}
|
||||
|
||||
whiptail_ntp_ask() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
whiptail --title "Security Onion Setup" --yesno "Would you like to configure ntp servers?" 7 44
|
||||
}
|
||||
|
||||
whiptail_ntp_servers() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
ntp_string=$(whiptail --title "Security Onion Setup" \
|
||||
--inputbox "Input the NTP server(s) you would like to use, separated by commas:" 8 75 "$1" 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whiptail_oinkcode() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -1271,11 +1524,7 @@ whiptail_proxy_auth_pass() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
if [[ $arg != 'confirm' ]]; then
|
||||
proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
|
||||
else
|
||||
proxy_pass_confirm=$(whiptail --title "Security Onion Setup" --passwordbox "Please confirm the proxy password:" 8 60 3>&1 1>&2 2>&3)
|
||||
fi
|
||||
proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
@@ -1469,6 +1718,22 @@ whiptail_so_allow() {
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whitpail_ssh_warning() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
local msg
|
||||
|
||||
read -r -d '' msg <<- EOM
|
||||
NOTE: You will recceive a warning upon SSH reconnect that the host key has changed.
|
||||
|
||||
This is expected due to hardening of the OpenSSH server config.
|
||||
|
||||
The host key algorithm will now be ED25519, follow the instructions given by your SSH client to remove the old key fingerprint then retry the connection.
|
||||
EOM
|
||||
|
||||
whiptail --msgbox "$msg" 14 75
|
||||
}
|
||||
|
||||
whiptail_storage_requirements() {
|
||||
local mount=$1
|
||||
local current_val=$2
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
[saltstack]
|
||||
name=SaltStack repo for RHEL/CentOS $releasever PY3
|
||||
baseurl=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=https://repo.saltstack.com/py3/redhat/7/x86_64/archive/3002.5/SALTSTACK-GPG-KEY.pub
|
||||
@@ -1,7 +0,0 @@
|
||||
[wazuh_repo]
|
||||
gpgcheck=1
|
||||
gpgkey=https://packages.wazuh.com/key/GPG-KEY-WAZUH
|
||||
enabled=1
|
||||
name=Wazuh repository
|
||||
baseurl=https://packages.wazuh.com/3.x/yum/
|
||||
protect=1
|
||||
Reference in New Issue
Block a user