Merge branch 'feature/setup' into foxtrot

This commit is contained in:
William Wernert
2021-03-05 12:53:31 -05:00
12 changed files with 428 additions and 127 deletions

View File

@@ -340,6 +340,26 @@ valid_int() {
# {% raw %}
valid_proxy() {
local proxy=$1
local url_prefixes=( 'http://' 'https://' )
local has_prefix=false
for prefix in "${url_prefixes[@]}"; do
echo "$proxy" | grep -q "$prefix" && has_prefix=true && proxy=${proxy#"$prefix"} && break
done
local url_arr
mapfile -t url_arr <<< "$(echo "$proxy" | tr ":" "\n")"
local valid_url=true
if ! valid_ip4 "${url_arr[0]}" && ! valid_fqdn "${url_arr[0]}"; then
valid_url=false
fi
[[ $has_prefix == true ]] && [[ $valid_url == true ]] && return 0 || return 1
}
valid_string() {
local str=$1
local min_length=${2:-1}

View File

@@ -105,12 +105,12 @@ check_airgap() {
# See if this is an airgap install
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap: | awk '{print $2}')
if [[ "$AIRGAP" == "True" ]]; then
is_airgap=0
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
AGDOCKER=/tmp/soagupdate/docker
AGREPO=/tmp/soagupdate/Packages
is_airgap=0
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
AGDOCKER=/tmp/soagupdate/docker
AGREPO=/tmp/soagupdate/Packages
else
is_airgap=1
is_airgap=1
fi
}
@@ -256,14 +256,14 @@ playbook() {
}
pillar_changes() {
# This function is to add any new pillar items if needed.
echo "Checking to see if pillar changes are needed."
# This function is to add any new pillar items if needed.
echo "Checking to see if pillar changes are needed."
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
}
rc1_to_rc2() {
@@ -296,15 +296,14 @@ rc1_to_rc2() {
done </tmp/nodes.txt
# Add the nodes back using hostname
while read p; do
local NAME=$(echo $p | awk '{print $1}')
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
local IP=$(echo $p | awk '{print $2}')
echo "Adding the new cross cluster config for $NAME"
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
local NAME=$(echo $p | awk '{print $1}')
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
local IP=$(echo $p | awk '{print $2}')
echo "Adding the new cross cluster config for $NAME"
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
done </tmp/nodes.txt
INSTALLEDVERSION=rc.2
}
rc2_to_rc3() {
@@ -334,10 +333,10 @@ rc3_to_2.3.0() {
fi
{
echo "redis_settings:"
echo " redis_maxmemory: 827"
echo "playbook:"
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
echo "redis_settings:"
echo " redis_maxmemory: 827"
echo "playbook:"
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
} >> /opt/so/saltstack/local/pillar/global.sls
sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls
@@ -385,7 +384,6 @@ up_2.3.0_to_2.3.20(){
fi
INSTALLEDVERSION=2.3.20
}
up_2.3.2X_to_2.3.30() {
@@ -395,11 +393,11 @@ up_2.3.2X_to_2.3.30() {
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
done
# Change the IMAGEREPO
# Change the IMAGEREPO
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
# Strelka rule repo pillar addition
# Strelka rule repo pillar addition
if [ $is_airgap -eq 0 ]; then
# Add manager as default Strelka YARA rule repo
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
@@ -414,10 +412,10 @@ space_check() {
# Check to see if there is enough space
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
if [ "$CURRENTSPACE" -lt "10" ]; then
echo "You are low on disk space. Upgrade will try and clean up space.";
clean_dockers
echo "You are low on disk space. Upgrade will try and clean up space.";
clean_dockers
else
echo "Plenty of space for upgrading"
echo "Plenty of space for upgrading"
fi
}
@@ -427,16 +425,16 @@ thehive_maint() {
COUNT=0
THEHIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
echo "Migrating thehive databases if needed."
@@ -471,80 +469,80 @@ update_version() {
}
upgrade_check() {
# Let's make sure we actually need to update.
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
fi
# Let's make sure we actually need to update.
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
fi
}
upgrade_check_salt() {
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
UPGRADESALT=1
fi
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
UPGRADESALT=1
fi
}
upgrade_salt() {
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [ "$OS" == "centos" ]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages and restarting services."
echo ""
if [ $is_airgap -eq 0 ]; then
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
else
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
fi
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [ "$OS" == "ubuntu" ]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages and restarting services."
echo ""
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [ "$OS" == "centos" ]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages and restarting services."
echo ""
if [ $is_airgap -eq 0 ]; then
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
else
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
fi
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [ "$OS" == "ubuntu" ]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages and restarting services."
echo ""
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
}
verify_latest_update_script() {
# Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
# Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
echo "This version of the soup script is up to date. Proceeding."
else
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common queue=True
echo ""
echo "soup has been updated. Please run soup again."
exit 0
fi
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
echo "This version of the soup script is up to date. Proceeding."
else
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common queue=True
echo ""
echo "soup has been updated. Please run soup again."
exit 0
fi
}
main () {
@@ -557,9 +555,10 @@ while getopts ":b" opt; do
echo "Batch size must be a number greater than 0."
exit 1
fi
;;
\? ) echo "Usage: cmd [-b]"
;;
;;
\? )
echo "Usage: cmd [-b]"
;;
esac
done

View File

@@ -4,12 +4,11 @@
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#

View File

@@ -19,6 +19,7 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set ENGINE = salt['pillar.get']('global:mdengine', '') %}
{% set proxy = salt['pillar.get']('manager:proxy') %}
# IDSTools Setup
idstoolsdir:
file.directory:
@@ -71,6 +72,12 @@ so-idstools:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}
- hostname: so-idstools
- user: socore
{% if proxy is not none %}
- environment:
- http_proxy={{ proxy }}
- https_proxy={{ proxy }}
- no_proxy={{ salt['pillar.get']('manager:no_proxy') }}
{% endif %}
- binds:
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
- /opt/so/rules/nids:/opt/so/rules/nids:rw

View File

@@ -14,4 +14,6 @@ clean_requirements_on_remove=1
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') %}
proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
{% elif salt['pillar.get']('manager:proxy') is not None %}
proxy={{salt['pillar.get']('manager:proxy')}}
{% endif %}

View File

@@ -0,0 +1,78 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TESTING=true
address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
ALLOW_CIDR=0.0.0.0/0
ALLOW_ROLE=a
BASICZEEK=2
BASICSURI=2
# BLOGS=
BNICS=eth1
ZEEKVERSION=ZEEK
# CURCLOSEDAYS=
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
MANAGERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=
# MMASK=
MNIC=eth0
# MSEARCH=
# MSRV=
# MTU=
NIDS=Suricata
# NODE_ES_HEAP_SIZE=
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
NODEUPDATES=MANAGER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
proxy_addr=http://10.66.166.30:3128
# REDIRECTHOST=
REDIRECTINFO=IP
RULESETUP=ETOPEN
# SHARDCOUNT=
# SKIP_REBOOT=
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
THEHIVE=1
WAZUH=1
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=0n10nus3r
WEBPASSWD2=0n10nus3r

0
setup/files/.curlrc Normal file
View File

0
setup/files/.wgetrc Normal file
View File

View File

@@ -1,2 +0,0 @@
[Service]
ExecStart=/usr/bin/dockerd /usr/bin/dockerd -H fd:// --registry-mirror "$proxy_addr"

View File

@@ -535,6 +535,55 @@ collect_patch_schedule_name_import() {
done
}
collect_proxy() {
collect_proxy_details
while ! proxy_validate; do
if whiptail_invalid_proxy; then
collect_proxy_details no_ask
else
so_proxy=""
break
fi
done
}
collect_proxy_details() {
local ask=${1:-true}
local use_proxy
if [[ $ask != true ]]; then
use_proxy=0
else
whiptail_proxy_ask
use_proxy=$?
fi
if [[ $use_proxy == 0 ]]; then
whiptail_proxy_addr "$proxy_addr"
while ! valid_proxy "$proxy_addr"; do
whiptail_invalid_input
whiptail_proxy_addr "$proxy_addr"
done
if whiptail_proxy_auth_ask; then
whiptail_proxy_auth_user "$proxy_user"
whiptail_proxy_auth_pass # Don't pass in existing pass since it's obfuscated
local url_prefixes=( 'http://' 'https://' )
for prefix in "${url_prefixes[@]}"; do
if echo "$proxy_addr" | grep "$prefix"; then
local proxy=${proxy_addr#"$prefix"}
so_proxy="${prefix}${proxy_user}:${proxy_pass}@${proxy}"
break
fi
done
else
so_proxy="$proxy_addr"
fi
export proxy
fi
}
collect_redirect_host() {
whiptail_set_redirect_host "$HOSTNAME"
@@ -744,10 +793,10 @@ compare_main_nic_ip() {
if ! [[ $MNIC =~ ^(tun|wg|vpn).*$ ]]; then
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
read -r -d '' message <<- EOM
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
This is not a supported configuration, please remediate and rerun setup.
EOM
EOM
whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
fi
@@ -1432,6 +1481,8 @@ manager_pillar() {
"manager:"\
" mainip: '$MAINIP'"\
" mainint: '$MNIC'"\
" proxy: '$so_proxy'"\
" no_proxy: '$no_proxy_string'"\
" esheap: '$ES_HEAP_SIZE'"\
" esclustername: '{{ grains.host }}'"\
" freq: 0"\
@@ -1446,7 +1497,6 @@ manager_pillar() {
printf '%s\n'\
" elastalert: 1"\
" es_port: $node_es_port"\
" log_size_limit: $log_size_limit"\
" cur_close_days: $CURCLOSEDAYS"\
" grafana: $GRAFANA"\
" osquery: $OSQUERY"\
@@ -1512,7 +1562,6 @@ manager_global() {
" hnmanager: '$HNMANAGER'"\
" ntpserver: '$NTPSERVER'"\
" dockernet: '$DOCKERNET'"\
" proxy: '$PROXY'"\
" mdengine: '$ZEEKVERSION'"\
" ids: '$NIDS'"\
" url_base: '$REDIRECTIT'"\
@@ -1690,6 +1739,8 @@ network_init() {
if [[ "$setup_type" == 'iso' ]]; then
set_management_interface
fi
set_main_ip >> $setup_log 2>&1
compare_main_nic_ip
}
network_init_whiptail() {
@@ -1777,6 +1828,21 @@ print_salt_state_apply() {
echo "Applying $state Salt state"
}
proxy_validate() {
local test_url="https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS"
curl "$test_url" --proxy "$so_proxy" &> /dev/null
local ret=$?
if [[ $ret != 0 ]]; then
error "Could not reach $test_url using proxy $so_proxy"
if [[ -n $TESTING ]]; then
error "Exiting setup"
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
fi
fi
return $ret
}
reserve_group_ids() {
# This is a hack to fix CentOS from taking group IDs that we need
groupadd -g 928 kratos
@@ -2195,7 +2261,67 @@ set_main_ip() {
# Add /usr/sbin to everyone's path
set_path() {
echo "complete -cf sudo" > /etc/profile.d/securityonion.sh
echo "complete -cf sudo" >> /etc/profile.d/securityonion.sh
}
set_proxy() {
# Don't proxy localhost, local ip, and management ip
no_proxy_string="localhost, 127.0.0.1, ${MAINIP}, ${HOSTNAME}"
# Set proxy environment variables used by curl, wget, docker, and others
{
echo "export use_proxy=on"
echo "export http_proxy=\"${so_proxy}\""
echo "export https_proxy=\"\$http_proxy\""
echo "export ftp_proxy=\"\$http_proxy\""
echo "export no_proxy=\"${no_proxy_string}\""
} > /etc/profile.d/so-proxy.sh
source /etc/profile.d/so-proxy.sh
[[ -d '/etc/systemd/system/docker.service.d' ]] || mkdir -p /etc/systemd/system/docker.service.d
# Create proxy config for dockerd
printf '%s\n'\
"[Service]"\
"Environment=\"HTTP_PROXY=${so_proxy}\""\
"Environment=\"HTTPS_PROXY=${so_proxy}\""\
"Environment=\"NO_PROXY=${no_proxy_string}\"" > /etc/systemd/system/docker.service.d/http-proxy.conf
systemctl daemon-reload
command -v docker &> /dev/null && systemctl restart docker
# Create config.json for docker containers
[[ -d /root/.docker ]] || mkdir /root/.docker
printf '%s\n'\
"{"\
" \"proxies\":"\
" {"\
" \"default\":"\
" {"\
" \"httpProxy\":\"${so_proxy}\","\
" \"httpsProxy\":\"${so_proxy}\","\
" \"ftpProxy\":\"${so_proxy}\","\
" \"noProxy\":\"${no_proxy_string}\""\
" }"\
" }"\
"}" > /root/.docker/config.json
# Set proxy for package manager
if [ "$OS" = 'centos' ]; then
echo "proxy=$so_proxy" >> /etc/yum.conf
else
# Set it up so the updates roll through the manager
printf '%s\n'\
"Acquire::http::Proxy \"$so_proxy\";"\
"Acquire::https::Proxy \"$so_proxy\";" > /etc/apt/apt.conf.d/00-proxy.conf
fi
# Set global git proxy
printf '%s\n'\
"[http]"\
" proxy = ${so_proxy}" > /etc/gitconfig
}
setup_salt_master_dirs() {

View File

@@ -198,6 +198,10 @@ if ! [[ -f $install_opt_file ]]; then
printf '%s\n' \
"MNIC=$MNIC" \
"HOSTNAME=$HOSTNAME" > "$net_init_file"
if [[ $is_manager ]]; then
collect_proxy
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
fi
whiptail_net_setup_complete
else
whiptail_install_type
@@ -290,6 +294,10 @@ if ! [[ -f $install_opt_file ]]; then
network_init
fi
if [[ $is_manager ]]; then
collect_proxy
fi
if [[ -n "$TURBO" ]]; then
use_turbo_proxy
fi
@@ -336,14 +344,14 @@ minion_type=$(get_minion_type)
set_default_log_size >> $setup_log 2>&1
if [[ $is_helix ]]; then
RULESETUP=${RULESETUP:-ETOPEN}
RULESETUP=${RULESETUP:-ETOPEN}
NSMSETUP=${NSMSETUP:-BASIC}
HNSENSOR=${HNSENSOR:-inherit}
MANAGERUPDATES=${MANAGERUPDATES:-0}
fi
if [[ $is_helix || ( $is_manager && $is_node ) ]]; then
RULESETUP=${RULESETUP:-ETOPEN}
RULESETUP=${RULESETUP:-ETOPEN}
NSMSETUP=${NSMSETUP:-BASIC}
fi
@@ -363,7 +371,7 @@ fi
if [[ $is_import ]]; then
PATCHSCHEDULENAME=${PATCHSCHEDULENAME:-auto}
MTU=${MTU:-1500}
RULESETUP=${RULESETUP:-ETOPEN}
RULESETUP=${RULESETUP:-ETOPEN}
NSMSETUP=${NSMSETUP:-BASIC}
HNSENSOR=${HNSENSOR:-inherit}
MANAGERUPDATES=${MANAGERUPDATES:-0}
@@ -537,10 +545,10 @@ catch() {
exit
}
# This block sets REDIRECTIT which is used by a function outside the below subshell
set_main_ip >> $setup_log 2>&1
compare_main_nic_ip
# Set REDIRECTIT variable, which is used by a function outside the below subshell
set_redirect >> $setup_log 2>&1
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
# Begin install
{

View File

@@ -588,8 +588,21 @@ whiptail_invalid_input() { # TODO: This should accept a list of arguments to spe
}
whiptail_invalid_proxy() {
[ -n "$TESTING" ] && return
local message
read -r -d '' message <<- EOM
Could not reach test url using proxy ${proxy_addr}.
Check log (${setup_log}) for details.
EOM
whiptail --title "Security Onion Setup" --yesno "$message" --yes-button "Enter Again" --no-button "Skip" 10 60
}
whiptail_invalid_string() {
[ -n "$TESTING" ] && return
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Invalid input, please try again.\n\nThe $1 cannot contain spaces." 9 45
@@ -632,10 +645,22 @@ whiptail_log_size_limit() {
[ -n "$TESTING" ] && return
case $install_type in
STANDALONE | EVAL | HEAVYNODE)
percentage=50
;;
*)
percentage=80
;;
esac
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage: \n\
By default, this is set to 80% of the disk space allotted for /nsm." 10 75 "$1" 3>&1 1>&2 2>&3)
read -r -d '' message <<- EOM
Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage.
By default, this is set to ${percentage}% of the disk space allotted for /nsm.
EOM
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox "$message" 11 75 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -1204,6 +1229,45 @@ whiptail_patch_schedule_select_hours() {
}
whiptail_proxy_ask() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "Do you want to set a proxy server for this installation?" 7 60
}
whiptail_proxy_addr() {
[ -n "$TESTING" ] && return
local message
read -r -d '' message <<- EOM
Please input the proxy server you wish to use, including the URL prefix (ex: https://your.proxy.com:1234).
If your proxy requires a username and password do not include them in your input. Setup will ask for those values next.
EOM
proxy_addr=$(whiptail --title "Security Onion Setup" --inputbox "$message" 13 60 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_proxy_auth_ask() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "Does your proxy require authentication?" 7 60
}
whiptail_proxy_auth_user() {
[ -n "$TESTING" ] && return
proxy_user=$(whiptail --title "Security Onion Setup" --inputbox "Please input the proxy user:" 8 60 "$1" 3>&1 1>&2 2>&3)
}
whiptail_proxy_auth_pass() {
[ -n "$TESTING" ] && return
proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
}
whiptail_requirements_error() {
local requirement_needed=$1