Merge pull request #7667 from Security-Onion-Solutions/analystsetup

Analyst Setup
This commit is contained in:
Mike Reeves
2022-04-04 16:09:13 -04:00
committed by GitHub
5 changed files with 330 additions and 197 deletions

View File

@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt: salt:
master: master:
version: 3004 version: 3004.1

View File

@@ -2,6 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt: salt:
minion: minion:
version: 3004 version: 3004.1
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds. service_start_delay: 30 # in seconds.

View File

@@ -49,14 +49,14 @@ airgap_repo() {
rm -rf /etc/yum.repos.d/* rm -rf /etc/yum.repos.d/*
echo "[airgap_repo]" > /etc/yum.repos.d/airgap_repo.repo echo "[airgap_repo]" > /etc/yum.repos.d/airgap_repo.repo
if $is_manager; then if $is_manager; then
echo "baseurl=https://$HOSTNAME/repo" >> /etc/yum.repos.d/airgap_repo.repo echo "baseurl=https://$HOSTNAME/repo" >> /etc/yum.repos.d/airgap_repo.repo
else else
echo "baseurl=https://$MSRV/repo" >> /etc/yum.repos.d/airgap_repo.repo echo "baseurl=https://$MSRV/repo" >> /etc/yum.repos.d/airgap_repo.repo
fi fi
echo "gpgcheck=1" >> /etc/yum.repos.d/airgap_repo.repo echo "gpgcheck=1" >> /etc/yum.repos.d/airgap_repo.repo
echo "sslverify=0" >> /etc/yum.repos.d/airgap_repo.repo echo "sslverify=0" >> /etc/yum.repos.d/airgap_repo.repo
echo "name=Airgap Repo" >> /etc/yum.repos.d/airgap_repo.repo echo "name=Airgap Repo" >> /etc/yum.repos.d/airgap_repo.repo
echo "enabled=1" >> /etc/yum.repos.d/airgap_repo.repo echo "enabled=1" >> /etc/yum.repos.d/airgap_repo.repo
} }
airgap_rules() { airgap_rules() {
@@ -138,6 +138,21 @@ analyze_system() {
logCmd "ip a" logCmd "ip a"
} }
analyst_workstation_pillar() {
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
# Create the analyst workstation pillar
printf '%s\n'\
"host:"\
" mainint: '$MNIC'"\
"workstation:"\
" gui:"\
" enabled: true" >> "$pillar_file"\
"sensoroni:"\
" node_description: '${NODE_DESCRIPTION//\'/''}'" > $pillar_file
}
calculate_useable_cores() { calculate_useable_cores() {
# Calculate reasonable core usage # Calculate reasonable core usage
@@ -766,6 +781,9 @@ collect_zeek() {
configure_minion() { configure_minion() {
local minion_type=$1 local minion_type=$1
if [[ $is_analyst ]]; then
minion_type=workstation
fi
echo "Configuring minion type as $minion_type" >> "$setup_log" 2>&1 echo "Configuring minion type as $minion_type" >> "$setup_log" 2>&1
echo "role: so-$minion_type" > /etc/salt/grains echo "role: so-$minion_type" > /etc/salt/grains
@@ -774,6 +792,9 @@ configure_minion() {
echo "id: '$MINION_ID'" > "$minion_config" echo "id: '$MINION_ID'" > "$minion_config"
case "$minion_type" in case "$minion_type" in
'workstation')
echo "master: '$MSRV'" >> "$minion_config"
;;
'helix') 'helix')
cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf
echo "master: '$HOSTNAME'" >> "$minion_config" echo "master: '$HOSTNAME'" >> "$minion_config"
@@ -1108,6 +1129,7 @@ detect_os() {
echo "Detecting Base OS" >> "$log" 2>&1 echo "Detecting Base OS" >> "$log" 2>&1
if [ -f /etc/redhat-release ]; then if [ -f /etc/redhat-release ]; then
OS=centos OS=centos
is_centos=true
if grep -q "CentOS Linux release 7" /etc/redhat-release; then if grep -q "CentOS Linux release 7" /etc/redhat-release; then
OSVER=7 OSVER=7
elif grep -q "CentOS Linux release 8" /etc/redhat-release; then elif grep -q "CentOS Linux release 8" /etc/redhat-release; then
@@ -1207,7 +1229,7 @@ disable_ipv6() {
docker_install() { docker_install() {
if [ $OS = 'centos' ]; then if [[ $is_centos ]]; then
logCmd "yum clean expire-cache" logCmd "yum clean expire-cache"
if [[ ! $is_iso ]]; then if [[ ! $is_iso ]]; then
logCmd "yum -y install docker-ce-20.10.5-3.el7 docker-ce-cli-20.10.5-3.el7 docker-ce-rootless-extras-20.10.5-3.el7 containerd.io-1.4.4-3.1.el7" logCmd "yum -y install docker-ce-20.10.5-3.el7 docker-ce-cli-20.10.5-3.el7 docker-ce-rootless-extras-20.10.5-3.el7 containerd.io-1.4.4-3.1.el7"
@@ -1229,15 +1251,15 @@ docker_install() {
;; ;;
esac esac
if [ $OSVER == "bionic" ]; then if [ $OSVER == "bionic" ]; then
service docker stop service docker stop
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.5~3-0~ubuntu-bionic docker-ce-cli=5:20.10.5~3-0~ubuntu-bionic docker-ce-rootless-extras=5:20.10.5~3-0~ubuntu-bionic python3-docker" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.5~3-0~ubuntu-bionic docker-ce-cli=5:20.10.5~3-0~ubuntu-bionic docker-ce-rootless-extras=5:20.10.5~3-0~ubuntu-bionic python3-docker" >> "$setup_log" 2>&1 || exit 1
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
elif [ $OSVER == "focal" ]; then elif [ $OSVER == "focal" ]; then
service docker stop service docker stop
apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras
retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.8~3-0~ubuntu-focal docker-ce-cli=5:20.10.8~3-0~ubuntu-focal docker-ce-rootless-extras=5:20.10.8~3-0~ubuntu-focal python3-docker" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.8~3-0~ubuntu-focal docker-ce-cli=5:20.10.8~3-0~ubuntu-focal docker-ce-rootless-extras=5:20.10.8~3-0~ubuntu-focal python3-docker" >> "$setup_log" 2>&1 || exit 1
apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras
fi fi
fi fi
docker_registry docker_registry
@@ -1368,9 +1390,9 @@ es_heapsize() {
# Set heap size to 33% of available memory # Set heap size to 33% of available memory
ES_HEAP_SIZE=$(( total_mem / 3 )) ES_HEAP_SIZE=$(( total_mem / 3 ))
if [ "$ES_HEAP_SIZE" -ge 25001 ] ; then if [ "$ES_HEAP_SIZE" -ge 25001 ] ; then
ES_HEAP_SIZE="25000m" ES_HEAP_SIZE="25000m"
else else
ES_HEAP_SIZE=$ES_HEAP_SIZE"m" ES_HEAP_SIZE=$ES_HEAP_SIZE"m"
fi fi
fi fi
export ES_HEAP_SIZE export ES_HEAP_SIZE
@@ -1728,7 +1750,7 @@ manager_global() {
" managerip: '$MAINIP'" > "$global_pillar" " managerip: '$MAINIP'" > "$global_pillar"
if [[ $HIGHLANDER == 'True' ]]; then if [[ $HIGHLANDER == 'True' ]]; then
printf '%s\n'\ printf '%s\n'\
" highlander: True"\ >> "$global_pillar" " highlander: True"\ >> "$global_pillar"
fi fi
if [[ $is_airgap ]]; then if [[ $is_airgap ]]; then
@@ -1758,14 +1780,15 @@ manager_global() {
" enabled: $STRELKA"\ " enabled: $STRELKA"\
" rules: 1" >> "$global_pillar" " rules: 1" >> "$global_pillar"
if [[ $is_airgap ]]; then if [[ $is_airgap ]]; then
printf '%s\n'\ printf '%s\n'\
" repos:"\ " repos:"\
" - 'https://$HOSTNAME/repo/rules/strelka'" >> "$global_pillar" " - 'https://$HOSTNAME/repo/rules/strelka'" >> "$global_pillar"
else else
printf '%s\n'\ printf '%s\n'\
" repos:"\ " repos:"\
" - 'https://github.com/Neo23x0/signature-base'" >> "$global_pillar" " - 'https://github.com/Neo23x0/signature-base'" >> "$global_pillar"
fi fi
printf '%s\n'\ printf '%s\n'\
"curator:"\ "curator:"\
" hot_warm: False"\ " hot_warm: False"\
@@ -1793,101 +1816,101 @@ manager_global() {
" cluster_routing_allocation_disk_watermark_flood_stage: '98%'"\ " cluster_routing_allocation_disk_watermark_flood_stage: '98%'"\
" index_settings:"\ " index_settings:"\
" so-beats:"\ " so-beats:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 30"\ " close: 30"\
" delete: 365"\ " delete: 365"\
" so-endgame:"\ " so-endgame:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 30"\ " close: 30"\
" delete: 365"\ " delete: 365"\
" so-firewall:"\ " so-firewall:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 30"\ " close: 30"\
" delete: 365"\ " delete: 365"\
" so-flow:"\ " so-flow:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 30"\ " close: 30"\
" delete: 365"\ " delete: 365"\
" so-ids:"\ " so-ids:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 30"\ " close: 30"\
" delete: 365"\ " delete: 365"\
" so-import:"\ " so-import:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 73000"\ " close: 73000"\
" delete: 73001"\ " delete: 73001"\
" so-osquery:"\ " so-osquery:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 30"\ " close: 30"\
" delete: 365"\ " delete: 365"\
" so-ossec:"\ " so-ossec:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 30"\ " close: 30"\
" delete: 365"\ " delete: 365"\
" so-strelka:"\ " so-strelka:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 30"\ " close: 30"\
" delete: 365"\ " delete: 365"\
" so-syslog:"\ " so-syslog:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 30"\ " close: 30"\
" delete: 365"\ " delete: 365"\
" so-zeek:"\ " so-zeek:"\
" index_template:"\ " index_template:"\
" template:"\ " template:"\
" settings:"\ " settings:"\
" index:"\ " index:"\
" number_of_shards: 1"\ " number_of_shards: 1"\
" warm: 7"\ " warm: 7"\
" close: 45"\ " close: 45"\
" delete: 365"\ " delete: 365"\
@@ -2178,7 +2201,7 @@ reset_proxy() {
[[ -f /etc/gitconfig ]] && rm -f /etc/gitconfig [[ -f /etc/gitconfig ]] && rm -f /etc/gitconfig
if [[ $OS == 'centos' ]]; then if [[ $is_centos ]]; then
sed -i "/proxy=/d" /etc/yum.conf sed -i "/proxy=/d" /etc/yum.conf
else else
[[ -f /etc/apt/apt.conf.d/00-proxy.conf ]] && rm -f /etc/apt/apt.conf.d/00-proxy.conf [[ -f /etc/apt/apt.conf.d/00-proxy.conf ]] && rm -f /etc/apt/apt.conf.d/00-proxy.conf
@@ -2206,7 +2229,7 @@ backup_dir() {
remove_package() { remove_package() {
local package_name=$1 local package_name=$1
if [ $OS = 'centos' ]; then if [[ $is_centos ]]; then
if rpm -qa | grep -q "$package_name"; then if rpm -qa | grep -q "$package_name"; then
logCmd "yum remove -y $package_name" logCmd "yum remove -y $package_name"
fi fi
@@ -2227,39 +2250,42 @@ remove_package() {
saltify() { saltify() {
# Install updates and Salt # Install updates and Salt
if [ $OS = 'centos' ]; then if [[ $is_centos ]]; then
set_progress_str 6 'Installing various dependencies' set_progress_str 6 'Installing various dependencies'
if [[ ! $is_iso ]]; then if [[ ! ( $is_iso || $is_analyst_iso ) ]]; then
logCmd "yum -y install wget nmap-ncat" logCmd "yum -y install wget nmap-ncat"
fi fi
case "$install_type" in
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT') if [[ ! $is_analyst ]]; then
reserve_group_ids case "$install_type" in
if [[ ! $is_iso ]]; then 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT')
logCmd "yum -y install sqlite curl mariadb-devel" reserve_group_ids
fi if [[ ! $is_iso ]]; then
# Download Ubuntu Keys in case manager updates = 1 logCmd "yum -y install sqlite curl mariadb-devel"
logCmd "mkdir -vp /opt/so/gpg" fi
if [[ ! $is_airgap ]]; then # Download Ubuntu Keys in case manager updates = 1
logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3004/SALTSTACK-GPG-KEY.pub" logCmd "mkdir -vp /opt/so/gpg"
logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg" if [[ ! $is_airgap ]]; then
logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH" logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/18.04/amd64/archive/3004.1/SALTSTACK-GPG-KEY.pub"
fi logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg"
set_progress_str 7 'Installing salt-master' logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH"
if [[ ! $is_iso ]]; then fi
logCmd "yum -y install salt-master-3004" set_progress_str 7 'Installing salt-master'
fi if [[ ! $is_iso ]]; then
logCmd "systemctl enable salt-master" logCmd "yum -y install salt-master-3004.1"
;; fi
*) logCmd "systemctl enable salt-master"
;; ;;
esac *)
;;
esac
fi
if [[ ! $is_airgap ]]; then if [[ ! $is_airgap ]]; then
logCmd "yum clean expire-cache" logCmd "yum clean expire-cache"
fi fi
set_progress_str 8 'Installing salt-minion & python modules' set_progress_str 8 'Installing salt-minion & python modules'
if [[ ! $is_iso ]]; then if [[ ! ( $is_iso || $is_analyst_iso ) ]]; then
logCmd "yum -y install salt-minion-3004 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq" logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq"
logCmd "yum -y update --exclude=salt*" logCmd "yum -y update --exclude=salt*"
fi fi
logCmd "systemctl enable salt-minion" logCmd "systemctl enable salt-minion"
@@ -2298,8 +2324,8 @@ saltify() {
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR') 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
# Add saltstack repo(s) # Add saltstack repo(s)
wget -q --inet4-only -O - https://repo.saltstack.com/py3/ubuntu/"$ubuntu_version"/amd64/archive/3004/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 wget -q --inet4-only -O - https://repo.saltstack.com/py3/ubuntu/"$ubuntu_version"/amd64/archive/3004.1/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1
echo "deb http://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3004 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" echo "deb http://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3004.1 $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
# Add Docker repo # Add Docker repo
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - >> "$setup_log" 2>&1
@@ -2307,7 +2333,7 @@ saltify() {
# Get gpg keys # Get gpg keys
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1 mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/"$ubuntu_version"/amd64/archive/3004/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/py3/ubuntu/"$ubuntu_version"/amd64/archive/3004.1/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1 wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH >> "$setup_log" 2>&1
@@ -2320,7 +2346,7 @@ saltify() {
set_progress_str 6 'Installing various dependencies' set_progress_str 6 'Installing various dependencies'
retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1
set_progress_str 7 'Installing salt-master' set_progress_str 7 'Installing salt-master'
retry 50 10 "apt-get -y install salt-master=3004+ds-1" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install salt-master=3004.1+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
;; ;;
*) *)
@@ -2331,14 +2357,14 @@ saltify() {
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1 echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1 apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
echo "deb http://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3004/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" echo "deb http://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3004.1/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log"
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
;; ;;
esac esac
retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1
set_progress_str 8 'Installing salt-minion & python modules' set_progress_str 8 'Installing salt-minion & python modules'
retry 50 10 "apt-get -y install salt-minion=3004+ds-1 salt-common=3004+ds-1" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install salt-minion=3004.1+ds-1 salt-common=3004.1+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" >> "$setup_log" 2>&1 || exit 1
fi fi
@@ -2371,7 +2397,7 @@ secrets_pillar(){
securityonion_repo() { securityonion_repo() {
# Remove all the current repos # Remove all the current repos
if [[ "$OS" == "centos" ]]; then if [[ $is_centos ]]; then
if [[ "$INTERWEBS" == "AIRGAP" ]]; then if [[ "$INTERWEBS" == "AIRGAP" ]]; then
echo "This is airgap I don't need to add this repo" echo "This is airgap I don't need to add this repo"
else else
@@ -2462,7 +2488,7 @@ set_proxy() {
"}" > /root/.docker/config.json "}" > /root/.docker/config.json
# Set proxy for package manager # Set proxy for package manager
if [ "$OS" = 'centos' ]; then if [[ $is_centos ]]; then
echo "proxy=$so_proxy" >> /etc/yum.conf echo "proxy=$so_proxy" >> /etc/yum.conf
else else
# Set it up so the updates roll through the manager # Set it up so the updates roll through the manager
@@ -2633,8 +2659,8 @@ set_initial_firewall_policy() {
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
case "$install_type" in case "$install_type" in
'EVAL') 'EVAL')
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" True $default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" True
@@ -2650,7 +2676,7 @@ set_initial_firewall_policy() {
'HELIXSENSOR') 'HELIXSENSOR')
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
;; ;;
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET' | 'IDH' | 'RECEIVER') 'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET' | 'IDH' | 'RECEIVER')
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" $sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
@@ -2690,6 +2716,13 @@ set_initial_firewall_policy() {
# TODO: implement # TODO: implement
;; ;;
esac esac
# Add some firewall rules for analyst workstations that get added to the grid
if [[ $is_analyst ]]; then
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost analyst "$MAINIP"
fi
} }
# Set up the management interface on the ISO # Set up the management interface on the ISO
@@ -2741,7 +2774,7 @@ set_redirect() {
set_updates() { set_updates() {
if [ "$MANAGERUPDATES" = '1' ]; then if [ "$MANAGERUPDATES" = '1' ]; then
if [ "$OS" = 'centos' ]; then if [[ $is_centos ]]; then
if [[ ! $is_airgap ]] && ! ( grep -q "$MSRV" /etc/yum.conf); then if [[ ! $is_airgap ]] && ! ( grep -q "$MSRV" /etc/yum.conf); then
if grep -q "proxy=" /etc/yum.conf; then if grep -q "proxy=" /etc/yum.conf; then
sed -i "s/proxy=.*/proxy=http:\/\/$MSRV:3142/" /etc/yum.conf sed -i "s/proxy=.*/proxy=http:\/\/$MSRV:3142/" /etc/yum.conf
@@ -2808,9 +2841,9 @@ update_sudoers() {
} }
update_packages() { update_packages() {
if [ "$OS" = 'centos' ]; then if [[ $is_centos ]]; then
logCmd "yum repolist" logCmd "yum repolist"
logCmd "yum -y update --exclude=salt*,wazuh*,docker*,containerd*" logCmd "yum -y update --exclude=salt*,wazuh*,docker*,containerd*"
else else
retry 50 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1
@@ -2870,7 +2903,7 @@ write_out_idh_services() {
" services:" >> "$pillar_file" " services:" >> "$pillar_file"
for service in ${idh_services[@]}; do for service in ${idh_services[@]}; do
echo " - $service" | tr '[:upper:]' '[:lower:]' >> "$pillar_file" echo " - $service" | tr '[:upper:]' '[:lower:]' >> "$pillar_file"
done done
} }
# Enable Zeek Logs # Enable Zeek Logs

View File

@@ -71,9 +71,17 @@ while [[ $# -gt 0 ]]; do
done done
detect_os detect_os
is_analyst=
if [ "$setup_type" = 'analyst' ]; then
is_analyst=true
# Check to see if this is an ISO
if [ -d /root/SecurityOnion ]; then
is_analyst_iso=true
fi
fi
if [[ "$setup_type" == 'iso' ]]; then if [[ "$setup_type" == 'iso' ]]; then
if [[ "$OS" == 'centos' ]]; then if [[ $is_centos ]]; then
is_iso=true is_iso=true
else else
echo "Only use 'so-setup iso' for an ISO install on CentOS. Please run 'so-setup network' instead." echo "Only use 'so-setup iso' for an ISO install on CentOS. Please run 'so-setup network' instead."
@@ -81,6 +89,31 @@ if [[ "$setup_type" == 'iso' ]]; then
fi fi
fi fi
# Check to see if this is an analyst install. If it is let's run things differently
if [[ $is_analyst ]]; then
# Make sure it's CentOS
if [[ ! $is_centos ]]; then
echo "Analyst Workstation is only supported on CentOS 7"
exit 1
fi
if ! whiptail_analyst_install; then
# Lets make this a standalone
echo "Enabling graphical interface and setting it to load at boot"
systemctl set-default graphical.target
startx
exit 0
fi
# If you got this far then you want to join the grid
is_minion=true
fi
if ! [ -f $install_opt_file ] && [ -d /root/manager_setup/securityonion ] && [[ $(pwd) != /root/manager_setup/securityonion/setup ]]; then if ! [ -f $install_opt_file ] && [ -d /root/manager_setup/securityonion ] && [[ $(pwd) != /root/manager_setup/securityonion/setup ]]; then
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}" exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
fi fi
@@ -112,7 +145,6 @@ catch() {
whiptail_setup_failed whiptail_setup_failed
exit 1 exit 1
} }
automated=no automated=no
progress() { progress() {
local msg=${1:-'Please wait while installing...'} local msg=${1:-'Please wait while installing...'}
@@ -156,11 +188,11 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th
fi fi
case "$setup_type" in case "$setup_type" in
iso | network) # Accepted values iso | network | analyst) # Accepted values
echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1 echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1
;; ;;
*) *)
echo "Invalid install type, must be 'iso' or 'network'" | tee -a $setup_log echo "Invalid install type, must be 'iso', 'network' or 'analyst'." | tee -a $setup_log
exit 1 exit 1
;; ;;
esac esac
@@ -202,6 +234,37 @@ if ! [[ -f $install_opt_file ]]; then
echo "User cancelled setup." | tee -a "$setup_log" echo "User cancelled setup." | tee -a "$setup_log"
whiptail_cancel whiptail_cancel
fi fi
if [[ $is_analyst ]]; then
collect_hostname
if [[ $is_analyst_iso ]]; then
# Prompt Network Setup
whiptail_management_nic
whiptail_dhcp_or_static
if [ "$address_type" != 'DHCP' ]; then
collect_int_ip_mask
collect_gateway
collect_dns
collect_dns_domain
fi
fi
if [[ ! $is_analyst_iso ]]; then
# This should be a network install
whiptail_network_notice
whiptail_dhcp_warn
whiptail_management_nic
fi
whiptail_network_init_notice
network_init
printf '%s\n' \
"MNIC=$MNIC" \
"HOSTNAME=$HOSTNAME" > "$net_init_file"
set_main_ip >> $setup_log 2>&1
compare_main_nic_ip
fi
if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then
whiptail_first_menu_iso whiptail_first_menu_iso
if [[ $option == "CONFIGURENETWORK" ]]; then if [[ $option == "CONFIGURENETWORK" ]]; then
@@ -219,7 +282,9 @@ if ! [[ -f $install_opt_file ]]; then
true true
fi fi
fi fi
whiptail_install_type if [[ ! $is_analyst ]]; then
whiptail_install_type
fi
else else
source $install_opt_file source $install_opt_file
fi fi
@@ -269,7 +334,7 @@ elif [ "$install_type" = 'RECEIVER' ]; then
is_receiver=true is_receiver=true
elif [ "$install_type" = 'ANALYST' ]; then elif [ "$install_type" = 'ANALYST' ]; then
cd .. || exit 255 cd .. || exit 255
exec bash so-analyst-install exec bash so-setup analyst
fi fi
if [[ $is_manager || $is_import ]]; then if [[ $is_manager || $is_import ]]; then
@@ -289,7 +354,7 @@ if ! [[ -f $install_opt_file ]]; then
check_requirements "dist" "idh" check_requirements "dist" "idh"
elif [[ $is_sensor && ! $is_eval ]]; then elif [[ $is_sensor && ! $is_eval ]]; then
check_requirements "dist" "sensor" check_requirements "dist" "sensor"
elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then elif [[ $is_distmanager || $is_minion ]] && [[ ! ( $is_import || $is_analyst ) ]]; then
check_requirements "dist" check_requirements "dist"
elif [[ $is_import ]]; then elif [[ $is_import ]]; then
check_requirements "import" check_requirements "import"
@@ -320,9 +385,6 @@ if ! [[ -f $install_opt_file ]]; then
if [[ $is_minion ]]; then if [[ $is_minion ]]; then
collect_mngr_hostname collect_mngr_hostname
add_mngr_ip_to_hosts add_mngr_ip_to_hosts
fi
if [[ $is_minion ]]; then
whiptail_ssh_key_copy_notice whiptail_ssh_key_copy_notice
copy_ssh_key >> $setup_log 2>&1 copy_ssh_key >> $setup_log 2>&1
fi fi
@@ -333,7 +395,7 @@ if ! [[ -f $install_opt_file ]]; then
if [[ "$INTERWEBS" == 'AIRGAP' ]]; then if [[ "$INTERWEBS" == 'AIRGAP' ]]; then
is_airgap=true is_airgap=true
fi fi
elif [[ $is_minion && $is_iso ]]; then elif [[ $is_minion && ( $is_iso || $is_analyst ) ]]; then
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" [[ -f /etc/yum.repos.d/airgap_repo.repo ]] >> $setup_log 2>&1 $sshcmd -i /root/.ssh/so.key soremote@"$MSRV" [[ -f /etc/yum.repos.d/airgap_repo.repo ]] >> $setup_log 2>&1
airgap_check=$? airgap_check=$?
[[ $airgap_check == 0 ]] && is_airgap=true >> $setup_log 2>&1 [[ $airgap_check == 0 ]] && is_airgap=true >> $setup_log 2>&1
@@ -399,7 +461,12 @@ detect_cloud
short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}') short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}')
MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]') if [[ $is_analyst ]]; then
MINION_ID=$(echo "${short_name}_workstation" | tr '[:upper:]' '[:lower:]')
fi
if [[ ! $is_analyst ]]; then
MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]')
fi
export MINION_ID export MINION_ID
echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1 echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1
@@ -562,7 +629,7 @@ if [[ $is_sensor && ! $is_eval ]]; then
fi fi
fi fi
[[ $is_iso ]] && collect_ntp_servers [[ ( $is_iso || $is_analyst ) ]] && collect_ntp_servers
if [[ ($is_node || $is_receiver) && ! $is_eval ]]; then if [[ ($is_node || $is_receiver) && ! $is_eval ]]; then
whiptail_node_advanced whiptail_node_advanced
@@ -620,7 +687,9 @@ echo "1" > /root/accept_changes
[[ ${#ntp_servers[@]} -gt 0 ]] && configure_ntp >> $setup_log 2>&1 [[ ${#ntp_servers[@]} -gt 0 ]] && configure_ntp >> $setup_log 2>&1
reserve_ports if [[ ! $is_analyst ]]; then
reserve_ports
fi
set_path set_path
@@ -650,8 +719,12 @@ echo "1" > /root/accept_changes
if [[ $is_manager && ! $is_eval ]]; then if [[ $is_manager && ! $is_eval ]]; then
add_soremote_user_manager >> $setup_log 2>&1 add_soremote_user_manager >> $setup_log 2>&1
fi fi
if [[ ! $is_analyst ]]; then
host_pillar >> $setup_log 2>&1 host_pillar >> $setup_log 2>&1
fi
if [[ $is_analyst ]]; then
analyst_workstation_pillar
fi
ntp_pillar >> $setup_log 2>&1 ntp_pillar >> $setup_log 2>&1
@@ -674,12 +747,12 @@ echo "1" > /root/accept_changes
# Import the gpg keys # Import the gpg keys
gpg_rpm_import >> $setup_log 2>&1 gpg_rpm_import >> $setup_log 2>&1
info "Disabling fastestmirror" info "Disabling fastestmirror"
[[ $OS == 'centos' ]] && disable_fastestmirror [[ $is_centos ]] && disable_fastestmirror
if [[ ! $is_airgap ]]; then if [[ ! $is_airgap ]]; then
securityonion_repo >> $setup_log 2>&1 securityonion_repo >> $setup_log 2>&1
update_packages >> $setup_log 2>&1 update_packages >> $setup_log 2>&1
else else
airgap_repo >> $setup_log 2>&1 airgap_repo >> $setup_log 2>&1
fi fi
if [[ $is_sensor || $is_helix || $is_import ]]; then if [[ $is_sensor || $is_helix || $is_import ]]; then
@@ -699,15 +772,20 @@ echo "1" > /root/accept_changes
set_progress_str 5 'Installing Salt and dependencies' set_progress_str 5 'Installing Salt and dependencies'
saltify 2>> $setup_log saltify 2>> $setup_log
set_progress_str 6 'Installing Docker and dependencies' if [[ ! $is_analyst ]]; then
docker_install >> $setup_log 2>&1 set_progress_str 6 'Installing Docker and dependencies'
docker_install >> $setup_log 2>&1
fi
set_progress_str 7 'Generating patch pillar' set_progress_str 7 'Generating patch pillar'
patch_pillar >> $setup_log 2>&1 patch_pillar >> $setup_log 2>&1
set_progress_str 8 'Initializing Salt minion' set_progress_str 8 'Initializing Salt minion'
configure_minion "$minion_type" >> $setup_log 2>&1 configure_minion "$minion_type" >> $setup_log 2>&1
check_sos_appliance >> $setup_log 2>&1
if [[ ! $is_analyst ]]; then
check_sos_appliance >> $setup_log 2>&1
fi
update_sudoers_for_testing >> $setup_log 2>&1 update_sudoers_for_testing >> $setup_log 2>&1
@@ -786,8 +864,10 @@ echo "1" > /root/accept_changes
generate_ca >> $setup_log 2>&1 generate_ca >> $setup_log 2>&1
fi fi
set_progress_str 24 'Generating SSL' if [[ ! $is_analyst ]]; then
generate_ssl >> $setup_log 2>&1 set_progress_str 24 'Generating SSL'
generate_ssl >> $setup_log 2>&1
fi
if [[ $is_manager || $is_helix || $is_import ]]; then if [[ $is_manager || $is_helix || $is_import ]]; then
set_progress_str 25 'Configuring firewall' set_progress_str 25 'Configuring firewall'
@@ -814,18 +894,22 @@ echo "1" > /root/accept_changes
echo "Finished so-elastic-auth..." >> $setup_log 2>&1 echo "Finished so-elastic-auth..." >> $setup_log 2>&1
fi fi
set_progress_str 61 "$(print_salt_state_apply 'firewall')" if [[ ! $is_analyst ]]; then
salt-call state.apply -l info firewall >> $setup_log 2>&1 set_progress_str 61 "$(print_salt_state_apply 'firewall')"
salt-call state.apply -l info firewall >> $setup_log 2>&1
fi
if [ $OS = 'centos' ]; then if [[ $is_centos ]]; then
set_progress_str 61 'Installing Yum utilities' set_progress_str 61 'Installing Yum utilities'
salt-call state.apply -l info yum.packages >> $setup_log 2>&1 salt-call state.apply -l info yum.packages >> $setup_log 2>&1
fi fi
set_progress_str 62 "$(print_salt_state_apply 'common')" if [[ ! $is_analyst ]]; then
salt-call state.apply -l info common >> $setup_log 2>&1 set_progress_str 62 "$(print_salt_state_apply 'common')"
salt-call state.apply -l info common >> $setup_log 2>&1
fi
if [[ ! $is_helix && ! $is_receiver && ! $is_idh ]]; then if [[ ! $is_helix && ! $is_receiver && ! $is_idh && ! $is_analyst ]]; then
set_progress_str 62 "$(print_salt_state_apply 'nginx')" set_progress_str 62 "$(print_salt_state_apply 'nginx')"
salt-call state.apply -l info nginx >> $setup_log 2>&1 salt-call state.apply -l info nginx >> $setup_log 2>&1
fi fi
@@ -968,10 +1052,12 @@ echo "1" > /root/accept_changes
salt-call state.apply -l info filebeat >> $setup_log 2>&1 salt-call state.apply -l info filebeat >> $setup_log 2>&1
fi fi
set_progress_str 85 'Applying finishing touches' if [[ ! $is_analyst ]]; then
filter_unused_nics >> $setup_log 2>&1 set_progress_str 85 'Applying finishing touches'
network_setup >> $setup_log 2>&1 filter_unused_nics >> $setup_log 2>&1
so-ssh-harden >> $setup_log 2>&1 network_setup >> $setup_log 2>&1
so-ssh-harden >> $setup_log 2>&1
fi
if [[ $is_manager || $is_import ]]; then if [[ $is_manager || $is_import ]]; then
set_progress_str 87 'Adding user to SOC' set_progress_str 87 'Adding user to SOC'
@@ -1025,9 +1111,9 @@ else
fi fi
if [[ -n $ENDGAMEHOST ]]; then if [[ -n $ENDGAMEHOST ]]; then
set_progress_str 99 'Configuring firewall for Endgame SMP' set_progress_str 99 'Configuring firewall for Endgame SMP'
so-firewall --apply includehost endgame $ENDGAMEHOST >> $setup_log 2>&1 so-firewall --apply includehost endgame $ENDGAMEHOST >> $setup_log 2>&1
fi fi
} | whiptail_gauge_post_setup "Running post-installation steps..." } | whiptail_gauge_post_setup "Running post-installation steps..."

View File

@@ -33,6 +33,23 @@ whiptail_airgap() {
INTERWEBS=$(echo "${INTERWEBS^^}" | tr -d ' ') INTERWEBS=$(echo "${INTERWEBS^^}" | tr -d ' ')
} }
whiptail_analyst_install() {
[ -n "$TESTING" ] && return
read -r -d '' message <<- EOM
Welcome to the Security Onion Analyst Workstation install!
Would you like to join this workstation to an existing grid?
NOTE: Selecting "no" will enable X Windows and set it to load at boot.
EOM
whiptail --title "$whiptail_title" \
--yesno "$message" 11 75 --defaultno
}
whiptail_avoid_default_hostname() { whiptail_avoid_default_hostname() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -434,9 +451,9 @@ whiptail_end_settings() {
if [[ $is_idh ]]; then if [[ $is_idh ]]; then
__append_end_msg "IDH Services Enabled:" __append_end_msg "IDH Services Enabled:"
for service in ${idh_services[@]}; do for service in ${idh_services[@]}; do
__append_end_msg "- $service" __append_end_msg "- $service"
done done
fi fi
@@ -1545,40 +1562,37 @@ whiptail_patch_schedule_select_hours() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
# Select the hours to patch # Select the hours to patch
PATCHSCHEDULEHOURS=$(whiptail --title "$whiptail_title" --checklist \ PATCHSCHEDULEHOURS=$(whiptail --title "$whiptail_title" --checklist \
"At which time, UTC, do you want to apply OS patches on the selected days?" 22 75 13 \ "At which time, UTC, do you want to apply OS patches on the selected days?" 22 75 13 \
00:00 "" OFF \ 00:00 "" OFF \
01:00 "" OFF \ 01:00 "" OFF \
02:00 "" ON \ 02:00 "" ON \
03:00 "" OFF \ 03:00 "" OFF \
04:00 "" OFF \ 04:00 "" OFF \
05:00 "" OFF \ 05:00 "" OFF \
06:00 "" OFF \ 06:00 "" OFF \
07:00 "" OFF \ 07:00 "" OFF \
08:00 "" OFF \ 08:00 "" OFF \
09:00 "" OFF \ 09:00 "" OFF \
10:00 "" OFF \ 10:00 "" OFF \
11:00 "" OFF \ 11:00 "" OFF \
12:00 "" OFF \ 12:00 "" OFF \
13:00 "" OFF \ 13:00 "" OFF \
14:00 "" OFF \ 14:00 "" OFF \
15:00 "" OFF \ 15:00 "" OFF \
16:00 "" OFF \ 16:00 "" OFF \
17:00 "" OFF \ 17:00 "" OFF \
18:00 "" OFF \ 18:00 "" OFF \
19:00 "" OFF \ 19:00 "" OFF \
20:00 "" OFF \ 20:00 "" OFF \
21:00 "" OFF \ 21:00 "" OFF \
22:00 "" OFF \ 22:00 "" OFF \
23:00 "" OFF 3>&1 1>&2 2>&3) 23:00 "" OFF 3>&1 1>&2 2>&3)
local exitstatus=$?
local exitstatus=$? whiptail_check_exitstatus $exitstatus
whiptail_check_exitstatus $exitstatus PATCHSCHEDULEHOURS=$(echo "$PATCHSCHEDULEHOURS" | tr -d '"')
IFS=' ' read -ra PATCHSCHEDULEHOURS <<< "$PATCHSCHEDULEHOURS"
PATCHSCHEDULEHOURS=$(echo "$PATCHSCHEDULEHOURS" | tr -d '"')
IFS=' ' read -ra PATCHSCHEDULEHOURS <<< "$PATCHSCHEDULEHOURS"
} }
@@ -1923,10 +1937,10 @@ whiptail_suricata_pins() {
done done
if [[ $is_node && $is_sensor && ! $is_eval ]]; then if [[ $is_node && $is_sensor && ! $is_eval ]]; then
local PROCS=$(expr $lb_procs / 2) local PROCS=$(expr $lb_procs / 2)
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
else else
local PROCS=$lb_procs local PROCS=$lb_procs
fi fi
SURIPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Suricata to:" 20 75 12 "${filtered_core_str[@]}" 3>&1 1>&2 2>&3 ) SURIPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Suricata to:" 20 75 12 "${filtered_core_str[@]}" 3>&1 1>&2 2>&3 )
@@ -2006,10 +2020,10 @@ whiptail_zeek_pins() {
done done
if [[ $is_smooshed ]]; then if [[ $is_smooshed ]]; then
local PROCS=$(expr $lb_procs / 2) local PROCS=$(expr $lb_procs / 2)
if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi
else else
local PROCS=$lb_procs local PROCS=$lb_procs
fi fi
ZEEKPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 ) ZEEKPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 )