mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge branch 'feature/setup' into foxtrot
This commit is contained in:
@@ -340,6 +340,26 @@ valid_int() {
|
|||||||
|
|
||||||
# {% raw %}
|
# {% raw %}
|
||||||
|
|
||||||
|
valid_proxy() {
|
||||||
|
local proxy=$1
|
||||||
|
local url_prefixes=( 'http://' 'https://' )
|
||||||
|
|
||||||
|
local has_prefix=false
|
||||||
|
for prefix in "${url_prefixes[@]}"; do
|
||||||
|
echo "$proxy" | grep -q "$prefix" && has_prefix=true && proxy=${proxy#"$prefix"} && break
|
||||||
|
done
|
||||||
|
|
||||||
|
local url_arr
|
||||||
|
mapfile -t url_arr <<< "$(echo "$proxy" | tr ":" "\n")"
|
||||||
|
|
||||||
|
local valid_url=true
|
||||||
|
if ! valid_ip4 "${url_arr[0]}" && ! valid_fqdn "${url_arr[0]}"; then
|
||||||
|
valid_url=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
[[ $has_prefix == true ]] && [[ $valid_url == true ]] && return 0 || return 1
|
||||||
|
}
|
||||||
|
|
||||||
valid_string() {
|
valid_string() {
|
||||||
local str=$1
|
local str=$1
|
||||||
local min_length=${2:-1}
|
local min_length=${2:-1}
|
||||||
|
|||||||
@@ -304,7 +304,6 @@ rc1_to_rc2() {
|
|||||||
done </tmp/nodes.txt
|
done </tmp/nodes.txt
|
||||||
|
|
||||||
INSTALLEDVERSION=rc.2
|
INSTALLEDVERSION=rc.2
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rc2_to_rc3() {
|
rc2_to_rc3() {
|
||||||
@@ -385,7 +384,6 @@ up_2.3.0_to_2.3.20(){
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
INSTALLEDVERSION=2.3.20
|
INSTALLEDVERSION=2.3.20
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
up_2.3.2X_to_2.3.30() {
|
up_2.3.2X_to_2.3.30() {
|
||||||
@@ -395,11 +393,11 @@ up_2.3.2X_to_2.3.30() {
|
|||||||
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
|
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
|
||||||
done
|
done
|
||||||
|
|
||||||
# Change the IMAGEREPO
|
# Change the IMAGEREPO
|
||||||
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
||||||
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
|
||||||
# Strelka rule repo pillar addition
|
# Strelka rule repo pillar addition
|
||||||
if [ $is_airgap -eq 0 ]; then
|
if [ $is_airgap -eq 0 ]; then
|
||||||
# Add manager as default Strelka YARA rule repo
|
# Add manager as default Strelka YARA rule repo
|
||||||
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
|
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
|
||||||
@@ -558,7 +556,8 @@ while getopts ":b" opt; do
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
\? ) echo "Usage: cmd [-b]"
|
\? )
|
||||||
|
echo "Usage: cmd [-b]"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|||||||
@@ -4,12 +4,11 @@
|
|||||||
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
|
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
|
||||||
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||||
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
|
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
|
||||||
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
|
|
||||||
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
|
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
|
||||||
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
|
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
|
||||||
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
|
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
|
||||||
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
|
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
|
||||||
|
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
|
|||||||
@@ -19,6 +19,7 @@
|
|||||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||||
{% set MANAGER = salt['grains.get']('master') %}
|
{% set MANAGER = salt['grains.get']('master') %}
|
||||||
{% set ENGINE = salt['pillar.get']('global:mdengine', '') %}
|
{% set ENGINE = salt['pillar.get']('global:mdengine', '') %}
|
||||||
|
{% set proxy = salt['pillar.get']('manager:proxy') %}
|
||||||
# IDSTools Setup
|
# IDSTools Setup
|
||||||
idstoolsdir:
|
idstoolsdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
@@ -71,6 +72,12 @@ so-idstools:
|
|||||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}
|
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}
|
||||||
- hostname: so-idstools
|
- hostname: so-idstools
|
||||||
- user: socore
|
- user: socore
|
||||||
|
{% if proxy is not none %}
|
||||||
|
- environment:
|
||||||
|
- http_proxy={{ proxy }}
|
||||||
|
- https_proxy={{ proxy }}
|
||||||
|
- no_proxy={{ salt['pillar.get']('manager:no_proxy') }}
|
||||||
|
{% endif %}
|
||||||
- binds:
|
- binds:
|
||||||
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
|
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
|
||||||
- /opt/so/rules/nids:/opt/so/rules/nids:rw
|
- /opt/so/rules/nids:/opt/so/rules/nids:rw
|
||||||
|
|||||||
@@ -14,4 +14,6 @@ clean_requirements_on_remove=1
|
|||||||
|
|
||||||
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') %}
|
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') %}
|
||||||
proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
|
proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
|
||||||
|
{% elif salt['pillar.get']('manager:proxy') is not None %}
|
||||||
|
proxy={{salt['pillar.get']('manager:proxy')}}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
78
setup/automation/standalone-iso-proxy
Normal file
78
setup/automation/standalone-iso-proxy
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
TESTING=true
|
||||||
|
|
||||||
|
address_type=DHCP
|
||||||
|
ADMINUSER=onionuser
|
||||||
|
ADMINPASS1=onionuser
|
||||||
|
ADMINPASS2=onionuser
|
||||||
|
ALLOW_CIDR=0.0.0.0/0
|
||||||
|
ALLOW_ROLE=a
|
||||||
|
BASICZEEK=2
|
||||||
|
BASICSURI=2
|
||||||
|
# BLOGS=
|
||||||
|
BNICS=eth1
|
||||||
|
ZEEKVERSION=ZEEK
|
||||||
|
# CURCLOSEDAYS=
|
||||||
|
# EVALADVANCED=BASIC
|
||||||
|
GRAFANA=1
|
||||||
|
# HELIXAPIKEY=
|
||||||
|
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
|
||||||
|
HNSENSOR=inherit
|
||||||
|
HOSTNAME=standalone
|
||||||
|
install_type=STANDALONE
|
||||||
|
# LSINPUTBATCHCOUNT=
|
||||||
|
# LSINPUTTHREADS=
|
||||||
|
# LSPIPELINEBATCH=
|
||||||
|
# LSPIPELINEWORKERS=
|
||||||
|
MANAGERADV=BASIC
|
||||||
|
MANAGERUPDATES=1
|
||||||
|
# MDNS=
|
||||||
|
# MGATEWAY=
|
||||||
|
# MIP=
|
||||||
|
# MMASK=
|
||||||
|
MNIC=eth0
|
||||||
|
# MSEARCH=
|
||||||
|
# MSRV=
|
||||||
|
# MTU=
|
||||||
|
NIDS=Suricata
|
||||||
|
# NODE_ES_HEAP_SIZE=
|
||||||
|
# NODE_LS_HEAP_SIZE=
|
||||||
|
NODESETUP=NODEBASIC
|
||||||
|
NSMSETUP=BASIC
|
||||||
|
NODEUPDATES=MANAGER
|
||||||
|
# OINKCODE=
|
||||||
|
OSQUERY=1
|
||||||
|
# PATCHSCHEDULEDAYS=
|
||||||
|
# PATCHSCHEDULEHOURS=
|
||||||
|
PATCHSCHEDULENAME=auto
|
||||||
|
PLAYBOOK=1
|
||||||
|
proxy_addr=http://10.66.166.30:3128
|
||||||
|
# REDIRECTHOST=
|
||||||
|
REDIRECTINFO=IP
|
||||||
|
RULESETUP=ETOPEN
|
||||||
|
# SHARDCOUNT=
|
||||||
|
# SKIP_REBOOT=
|
||||||
|
SOREMOTEPASS1=onionuser
|
||||||
|
SOREMOTEPASS2=onionuser
|
||||||
|
STRELKA=1
|
||||||
|
THEHIVE=1
|
||||||
|
WAZUH=1
|
||||||
|
WEBUSER=onionuser@somewhere.invalid
|
||||||
|
WEBPASSWD1=0n10nus3r
|
||||||
|
WEBPASSWD2=0n10nus3r
|
||||||
0
setup/files/.curlrc
Normal file
0
setup/files/.curlrc
Normal file
0
setup/files/.wgetrc
Normal file
0
setup/files/.wgetrc
Normal file
@@ -1,2 +0,0 @@
|
|||||||
[Service]
|
|
||||||
ExecStart=/usr/bin/dockerd /usr/bin/dockerd -H fd:// --registry-mirror "$proxy_addr"
|
|
||||||
@@ -535,6 +535,55 @@ collect_patch_schedule_name_import() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
collect_proxy() {
|
||||||
|
collect_proxy_details
|
||||||
|
while ! proxy_validate; do
|
||||||
|
if whiptail_invalid_proxy; then
|
||||||
|
collect_proxy_details no_ask
|
||||||
|
else
|
||||||
|
so_proxy=""
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
collect_proxy_details() {
|
||||||
|
local ask=${1:-true}
|
||||||
|
local use_proxy
|
||||||
|
if [[ $ask != true ]]; then
|
||||||
|
use_proxy=0
|
||||||
|
else
|
||||||
|
whiptail_proxy_ask
|
||||||
|
use_proxy=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $use_proxy == 0 ]]; then
|
||||||
|
whiptail_proxy_addr "$proxy_addr"
|
||||||
|
|
||||||
|
while ! valid_proxy "$proxy_addr"; do
|
||||||
|
whiptail_invalid_input
|
||||||
|
whiptail_proxy_addr "$proxy_addr"
|
||||||
|
done
|
||||||
|
|
||||||
|
if whiptail_proxy_auth_ask; then
|
||||||
|
whiptail_proxy_auth_user "$proxy_user"
|
||||||
|
whiptail_proxy_auth_pass # Don't pass in existing pass since it's obfuscated
|
||||||
|
|
||||||
|
local url_prefixes=( 'http://' 'https://' )
|
||||||
|
for prefix in "${url_prefixes[@]}"; do
|
||||||
|
if echo "$proxy_addr" | grep "$prefix"; then
|
||||||
|
local proxy=${proxy_addr#"$prefix"}
|
||||||
|
so_proxy="${prefix}${proxy_user}:${proxy_pass}@${proxy}"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
so_proxy="$proxy_addr"
|
||||||
|
fi
|
||||||
|
export proxy
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
collect_redirect_host() {
|
collect_redirect_host() {
|
||||||
whiptail_set_redirect_host "$HOSTNAME"
|
whiptail_set_redirect_host "$HOSTNAME"
|
||||||
|
|
||||||
@@ -1432,6 +1481,8 @@ manager_pillar() {
|
|||||||
"manager:"\
|
"manager:"\
|
||||||
" mainip: '$MAINIP'"\
|
" mainip: '$MAINIP'"\
|
||||||
" mainint: '$MNIC'"\
|
" mainint: '$MNIC'"\
|
||||||
|
" proxy: '$so_proxy'"\
|
||||||
|
" no_proxy: '$no_proxy_string'"\
|
||||||
" esheap: '$ES_HEAP_SIZE'"\
|
" esheap: '$ES_HEAP_SIZE'"\
|
||||||
" esclustername: '{{ grains.host }}'"\
|
" esclustername: '{{ grains.host }}'"\
|
||||||
" freq: 0"\
|
" freq: 0"\
|
||||||
@@ -1446,7 +1497,6 @@ manager_pillar() {
|
|||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
" elastalert: 1"\
|
" elastalert: 1"\
|
||||||
" es_port: $node_es_port"\
|
" es_port: $node_es_port"\
|
||||||
" log_size_limit: $log_size_limit"\
|
|
||||||
" cur_close_days: $CURCLOSEDAYS"\
|
" cur_close_days: $CURCLOSEDAYS"\
|
||||||
" grafana: $GRAFANA"\
|
" grafana: $GRAFANA"\
|
||||||
" osquery: $OSQUERY"\
|
" osquery: $OSQUERY"\
|
||||||
@@ -1512,7 +1562,6 @@ manager_global() {
|
|||||||
" hnmanager: '$HNMANAGER'"\
|
" hnmanager: '$HNMANAGER'"\
|
||||||
" ntpserver: '$NTPSERVER'"\
|
" ntpserver: '$NTPSERVER'"\
|
||||||
" dockernet: '$DOCKERNET'"\
|
" dockernet: '$DOCKERNET'"\
|
||||||
" proxy: '$PROXY'"\
|
|
||||||
" mdengine: '$ZEEKVERSION'"\
|
" mdengine: '$ZEEKVERSION'"\
|
||||||
" ids: '$NIDS'"\
|
" ids: '$NIDS'"\
|
||||||
" url_base: '$REDIRECTIT'"\
|
" url_base: '$REDIRECTIT'"\
|
||||||
@@ -1690,6 +1739,8 @@ network_init() {
|
|||||||
if [[ "$setup_type" == 'iso' ]]; then
|
if [[ "$setup_type" == 'iso' ]]; then
|
||||||
set_management_interface
|
set_management_interface
|
||||||
fi
|
fi
|
||||||
|
set_main_ip >> $setup_log 2>&1
|
||||||
|
compare_main_nic_ip
|
||||||
}
|
}
|
||||||
|
|
||||||
network_init_whiptail() {
|
network_init_whiptail() {
|
||||||
@@ -1777,6 +1828,21 @@ print_salt_state_apply() {
|
|||||||
echo "Applying $state Salt state"
|
echo "Applying $state Salt state"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proxy_validate() {
|
||||||
|
local test_url="https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS"
|
||||||
|
curl "$test_url" --proxy "$so_proxy" &> /dev/null
|
||||||
|
local ret=$?
|
||||||
|
|
||||||
|
if [[ $ret != 0 ]]; then
|
||||||
|
error "Could not reach $test_url using proxy $so_proxy"
|
||||||
|
if [[ -n $TESTING ]]; then
|
||||||
|
error "Exiting setup"
|
||||||
|
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
return $ret
|
||||||
|
}
|
||||||
|
|
||||||
reserve_group_ids() {
|
reserve_group_ids() {
|
||||||
# This is a hack to fix CentOS from taking group IDs that we need
|
# This is a hack to fix CentOS from taking group IDs that we need
|
||||||
groupadd -g 928 kratos
|
groupadd -g 928 kratos
|
||||||
@@ -2195,7 +2261,67 @@ set_main_ip() {
|
|||||||
|
|
||||||
# Add /usr/sbin to everyone's path
|
# Add /usr/sbin to everyone's path
|
||||||
set_path() {
|
set_path() {
|
||||||
echo "complete -cf sudo" > /etc/profile.d/securityonion.sh
|
echo "complete -cf sudo" >> /etc/profile.d/securityonion.sh
|
||||||
|
}
|
||||||
|
|
||||||
|
set_proxy() {
|
||||||
|
|
||||||
|
# Don't proxy localhost, local ip, and management ip
|
||||||
|
no_proxy_string="localhost, 127.0.0.1, ${MAINIP}, ${HOSTNAME}"
|
||||||
|
|
||||||
|
# Set proxy environment variables used by curl, wget, docker, and others
|
||||||
|
{
|
||||||
|
echo "export use_proxy=on"
|
||||||
|
echo "export http_proxy=\"${so_proxy}\""
|
||||||
|
echo "export https_proxy=\"\$http_proxy\""
|
||||||
|
echo "export ftp_proxy=\"\$http_proxy\""
|
||||||
|
echo "export no_proxy=\"${no_proxy_string}\""
|
||||||
|
} > /etc/profile.d/so-proxy.sh
|
||||||
|
|
||||||
|
source /etc/profile.d/so-proxy.sh
|
||||||
|
|
||||||
|
[[ -d '/etc/systemd/system/docker.service.d' ]] || mkdir -p /etc/systemd/system/docker.service.d
|
||||||
|
|
||||||
|
# Create proxy config for dockerd
|
||||||
|
printf '%s\n'\
|
||||||
|
"[Service]"\
|
||||||
|
"Environment=\"HTTP_PROXY=${so_proxy}\""\
|
||||||
|
"Environment=\"HTTPS_PROXY=${so_proxy}\""\
|
||||||
|
"Environment=\"NO_PROXY=${no_proxy_string}\"" > /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
command -v docker &> /dev/null && systemctl restart docker
|
||||||
|
|
||||||
|
# Create config.json for docker containers
|
||||||
|
[[ -d /root/.docker ]] || mkdir /root/.docker
|
||||||
|
printf '%s\n'\
|
||||||
|
"{"\
|
||||||
|
" \"proxies\":"\
|
||||||
|
" {"\
|
||||||
|
" \"default\":"\
|
||||||
|
" {"\
|
||||||
|
" \"httpProxy\":\"${so_proxy}\","\
|
||||||
|
" \"httpsProxy\":\"${so_proxy}\","\
|
||||||
|
" \"ftpProxy\":\"${so_proxy}\","\
|
||||||
|
" \"noProxy\":\"${no_proxy_string}\""\
|
||||||
|
" }"\
|
||||||
|
" }"\
|
||||||
|
"}" > /root/.docker/config.json
|
||||||
|
|
||||||
|
# Set proxy for package manager
|
||||||
|
if [ "$OS" = 'centos' ]; then
|
||||||
|
echo "proxy=$so_proxy" >> /etc/yum.conf
|
||||||
|
else
|
||||||
|
# Set it up so the updates roll through the manager
|
||||||
|
printf '%s\n'\
|
||||||
|
"Acquire::http::Proxy \"$so_proxy\";"\
|
||||||
|
"Acquire::https::Proxy \"$so_proxy\";" > /etc/apt/apt.conf.d/00-proxy.conf
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set global git proxy
|
||||||
|
printf '%s\n'\
|
||||||
|
"[http]"\
|
||||||
|
" proxy = ${so_proxy}" > /etc/gitconfig
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_salt_master_dirs() {
|
setup_salt_master_dirs() {
|
||||||
|
|||||||
@@ -198,6 +198,10 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
printf '%s\n' \
|
printf '%s\n' \
|
||||||
"MNIC=$MNIC" \
|
"MNIC=$MNIC" \
|
||||||
"HOSTNAME=$HOSTNAME" > "$net_init_file"
|
"HOSTNAME=$HOSTNAME" > "$net_init_file"
|
||||||
|
if [[ $is_manager ]]; then
|
||||||
|
collect_proxy
|
||||||
|
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
|
||||||
|
fi
|
||||||
whiptail_net_setup_complete
|
whiptail_net_setup_complete
|
||||||
else
|
else
|
||||||
whiptail_install_type
|
whiptail_install_type
|
||||||
@@ -290,6 +294,10 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
network_init
|
network_init
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ $is_manager ]]; then
|
||||||
|
collect_proxy
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ -n "$TURBO" ]]; then
|
if [[ -n "$TURBO" ]]; then
|
||||||
use_turbo_proxy
|
use_turbo_proxy
|
||||||
fi
|
fi
|
||||||
@@ -537,10 +545,10 @@ catch() {
|
|||||||
exit
|
exit
|
||||||
}
|
}
|
||||||
|
|
||||||
# This block sets REDIRECTIT which is used by a function outside the below subshell
|
# Set REDIRECTIT variable, which is used by a function outside the below subshell
|
||||||
set_main_ip >> $setup_log 2>&1
|
|
||||||
compare_main_nic_ip
|
|
||||||
set_redirect >> $setup_log 2>&1
|
set_redirect >> $setup_log 2>&1
|
||||||
|
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
|
||||||
|
|
||||||
|
|
||||||
# Begin install
|
# Begin install
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -588,6 +588,19 @@ whiptail_invalid_input() { # TODO: This should accept a list of arguments to spe
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
whiptail_invalid_proxy() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
local message
|
||||||
|
read -r -d '' message <<- EOM
|
||||||
|
Could not reach test url using proxy ${proxy_addr}.
|
||||||
|
|
||||||
|
Check log (${setup_log}) for details.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
whiptail --title "Security Onion Setup" --yesno "$message" --yes-button "Enter Again" --no-button "Skip" 10 60
|
||||||
|
}
|
||||||
|
|
||||||
whiptail_invalid_string() {
|
whiptail_invalid_string() {
|
||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
@@ -632,10 +645,22 @@ whiptail_log_size_limit() {
|
|||||||
|
|
||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
case $install_type in
|
||||||
|
STANDALONE | EVAL | HEAVYNODE)
|
||||||
|
percentage=50
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
percentage=80
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
|
read -r -d '' message <<- EOM
|
||||||
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage: \n\
|
Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage.
|
||||||
By default, this is set to 80% of the disk space allotted for /nsm." 10 75 "$1" 3>&1 1>&2 2>&3)
|
|
||||||
|
By default, this is set to ${percentage}% of the disk space allotted for /nsm.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox "$message" 11 75 "$1" 3>&1 1>&2 2>&3)
|
||||||
|
|
||||||
local exitstatus=$?
|
local exitstatus=$?
|
||||||
whiptail_check_exitstatus $exitstatus
|
whiptail_check_exitstatus $exitstatus
|
||||||
@@ -1204,6 +1229,45 @@ whiptail_patch_schedule_select_hours() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_ask() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
whiptail --title "Security Onion Setup" --yesno "Do you want to set a proxy server for this installation?" 7 60
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_addr() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
local message
|
||||||
|
read -r -d '' message <<- EOM
|
||||||
|
Please input the proxy server you wish to use, including the URL prefix (ex: https://your.proxy.com:1234).
|
||||||
|
|
||||||
|
If your proxy requires a username and password do not include them in your input. Setup will ask for those values next.
|
||||||
|
EOM
|
||||||
|
|
||||||
|
proxy_addr=$(whiptail --title "Security Onion Setup" --inputbox "$message" 13 60 "$1" 3>&1 1>&2 2>&3)
|
||||||
|
|
||||||
|
local exitstatus=$?
|
||||||
|
whiptail_check_exitstatus $exitstatus
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_auth_ask() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
whiptail --title "Security Onion Setup" --yesno "Does your proxy require authentication?" 7 60
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_auth_user() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
proxy_user=$(whiptail --title "Security Onion Setup" --inputbox "Please input the proxy user:" 8 60 "$1" 3>&1 1>&2 2>&3)
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_proxy_auth_pass() {
|
||||||
|
[ -n "$TESTING" ] && return
|
||||||
|
|
||||||
|
proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
|
||||||
|
}
|
||||||
|
|
||||||
whiptail_requirements_error() {
|
whiptail_requirements_error() {
|
||||||
|
|
||||||
local requirement_needed=$1
|
local requirement_needed=$1
|
||||||
|
|||||||
Reference in New Issue
Block a user