Merge pull request #3503 from Security-Onion-Solutions/foxtrot

Foxtrot
This commit is contained in:
William Wernert
2021-03-17 12:07:40 -04:00
committed by GitHub
20 changed files with 607 additions and 259 deletions

View File

@@ -399,6 +399,26 @@ valid_int() {
# {% raw %}
valid_proxy() {
local proxy=$1
local url_prefixes=( 'http://' 'https://' )
local has_prefix=false
for prefix in "${url_prefixes[@]}"; do
echo "$proxy" | grep -q "$prefix" && has_prefix=true && proxy=${proxy#"$prefix"} && break
done
local url_arr
mapfile -t url_arr <<< "$(echo "$proxy" | tr ":" "\n")"
local valid_url=true
if ! valid_ip4 "${url_arr[0]}" && ! valid_fqdn "${url_arr[0]}"; then
valid_url=false
fi
[[ $has_prefix == true ]] && [[ $valid_url == true ]] && return 0 || return 1
}
valid_string() {
local str=$1
local min_length=${2:-1}

View File

@@ -30,7 +30,7 @@ fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexkey)
CORTEX_KEY=$(lookup_pillar cortexorguserkey)
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
CORTEX_ORG_NAME=$(lookup_pillar cortexorgname)
CORTEX_USER=$USER

View File

@@ -30,7 +30,7 @@ fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexkey)
CORTEX_KEY=$(lookup_pillar cortexorguserkey)
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
CORTEX_USER=$USER

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env python3
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, argparse, re, docker
from packaging.version import Version, InvalidVersion
from itertools import groupby, chain
def get_image_name(string) -> str:
return ':'.join(string.split(':')[:-1])
def get_so_image_basename(string) -> str:
return get_image_name(string).split('/so-')[-1]
def get_image_version(string) -> str:
ver = string.split(':')[-1]
if ver == 'latest':
# Version doesn't like "latest", so use a high semver
return '999999.9.9'
else:
try:
Version(ver)
except InvalidVersion:
# Strip the last substring following a hyphen for automated branches
ver = '-'.join(ver.split('-')[:-1])
return ver
def main(quiet):
client = docker.from_env()
image_list = client.images.list(filters={ 'dangling': False })
# Map list of image objects to flattened list of tags (format: "name:version")
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
# Filter to only SO images (base name begins with "so-")
tag_list = list(filter(lambda x: re.match(r'^.*\/so-[^\/]*$', get_image_name(x)), tag_list))
# Group tags into lists by base name (sort by same projection first)
tag_list.sort(key=lambda x: get_so_image_basename(x))
grouped_tag_lists = [ list(it) for _, it in groupby(tag_list, lambda x: get_so_image_basename(x)) ]
no_prunable = True
for t_list in grouped_tag_lists:
try:
# Keep the 2 most current images
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
if len(t_list) <= 2:
continue
else:
no_prunable = False
for tag in t_list[2:]:
if not quiet: print(f'Removing image {tag}')
client.images.remove(tag)
except InvalidVersion as e:
print(f'so-{get_so_image_basename(t_list[0])}: {e.args[0]}', file=sys.stderr)
exit(1)
if no_prunable and not quiet:
print('No Security Onion images to prune')
if __name__ == "__main__":
main_parser = argparse.ArgumentParser(add_help=False)
main_parser.add_argument('-q', '--quiet', action='store_const', const=True, required=False)
args = main_parser.parse_args(sys.argv[1:])
main(args.quiet)

0
salt/common/tools/sbin/so-monitor-add Normal file → Executable file
View File

0
salt/common/tools/sbin/so-playbook-sigma-refresh Normal file → Executable file
View File

0
salt/common/tools/sbin/so-raid-status Normal file → Executable file
View File

0
salt/common/tools/sbin/so-rule Normal file → Executable file
View File

0
salt/common/tools/sbin/so-suricata-testrule Normal file → Executable file
View File

View File

@@ -290,15 +290,14 @@ rc1_to_rc2() {
done </tmp/nodes.txt
# Add the nodes back using hostname
while read p; do
local NAME=$(echo $p | awk '{print $1}')
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
local IP=$(echo $p | awk '{print $2}')
echo "Adding the new cross cluster config for $NAME"
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
local NAME=$(echo $p | awk '{print $1}')
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
local IP=$(echo $p | awk '{print $2}')
echo "Adding the new cross cluster config for $NAME"
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
done </tmp/nodes.txt
INSTALLEDVERSION=rc.2
}
rc2_to_rc3() {
@@ -328,10 +327,10 @@ rc3_to_2.3.0() {
fi
{
echo "redis_settings:"
echo " redis_maxmemory: 827"
echo "playbook:"
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
echo "redis_settings:"
echo " redis_maxmemory: 827"
echo "playbook:"
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
} >> /opt/so/saltstack/local/pillar/global.sls
sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls
@@ -379,7 +378,6 @@ up_2.3.0_to_2.3.20(){
fi
INSTALLEDVERSION=2.3.20
}
up_2.3.2X_to_2.3.30() {
@@ -389,11 +387,11 @@ up_2.3.2X_to_2.3.30() {
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
done
# Change the IMAGEREPO
# Change the IMAGEREPO
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
# Strelka rule repo pillar addition
# Strelka rule repo pillar addition
if [ $is_airgap -eq 0 ]; then
# Add manager as default Strelka YARA rule repo
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
@@ -431,16 +429,16 @@ thehive_maint() {
COUNT=0
THEHIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
echo "Migrating thehive databases if needed."
@@ -475,80 +473,80 @@ update_version() {
}
upgrade_check() {
# Let's make sure we actually need to update.
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
fi
# Let's make sure we actually need to update.
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
echo "You are already running the latest version of Security Onion."
exit 0
fi
}
upgrade_check_salt() {
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
UPGRADESALT=1
fi
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
UPGRADESALT=1
fi
}
upgrade_salt() {
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [ "$OS" == "centos" ]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages and restarting services."
echo ""
if [ $is_airgap -eq 0 ]; then
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
else
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
fi
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [ "$OS" == "ubuntu" ]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages and restarting services."
echo ""
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [ "$OS" == "centos" ]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages and restarting services."
echo ""
if [ $is_airgap -eq 0 ]; then
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
else
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
fi
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [ "$OS" == "ubuntu" ]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages and restarting services."
echo ""
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
}
verify_latest_update_script() {
# Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
# Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
echo "This version of the soup script is up to date. Proceeding."
else
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common queue=True
echo ""
echo "soup has been updated. Please run soup again."
exit 0
fi
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
echo "This version of the soup script is up to date. Proceeding."
else
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
salt-call state.apply common queue=True
echo ""
echo "soup has been updated. Please run soup again."
exit 0
fi
}
main () {
@@ -562,9 +560,10 @@ while getopts ":b" opt; do
echo "Batch size must be a number greater than 0."
exit 1
fi
;;
\? ) echo "Usage: cmd [-b]"
;;
;;
\? )
echo "Usage: cmd [-b]"
;;
esac
done

View File

@@ -4,12 +4,11 @@
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#

View File

@@ -1,86 +1,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0','2.3.1','2.3.2','2.3.10','2.3.20']%}
{% for VERSION in OLDVERSIONS %}
remove_images_{{ VERSION }}:
docker_image.absent:
- force: True
- images:
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-acng:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-cortex:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-curator:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elastalert:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-filebeat:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-fleet:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-fleet-launcher:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-freqserver:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-grafana:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-influxdb:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kibana:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-kratos:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logstash:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-minio:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-mysql:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-nginx:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-playbook:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-redis:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soctopus:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-steno:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-frontend:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-manager:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-backend:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-strelka-filestream:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-telegraf:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-es:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-wazuh:{{ VERSION }}'
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-zeek:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-acng:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-thehive-cortex:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-curator:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-domainstats:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-elastalert:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-elasticsearch:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-filebeat:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-fleet:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-fleet-launcher:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-freqserver:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-grafana:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-idstools:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-influxdb:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-kibana:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-kratos:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-logstash:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-minio:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-mysql:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-nginx:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-pcaptools:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-playbook:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-redis:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-soc:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-soctopus:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-steno:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-strelka-frontend:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-strelka-manager:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-strelka-backend:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-strelka-filestream:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-suricata:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-telegraf:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-thehive:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-thehive-es:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-wazuh:{{ VERSION }}'
- '{{ MANAGER }}:5000/securityonion/so-zeek:{{ VERSION }}'
{% endfor %}
prune_images:
cmd.run:
- name: so-docker-prune
{% else %}

View File

@@ -45,6 +45,12 @@ so-idstools:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-idstools:{{ VERSION }}
- hostname: so-idstools
- user: socore
{% if proxy %}
- environment:
- http_proxy={{ proxy }}
- https_proxy={{ proxy }}
- no_proxy={{ salt['pillar.get']('manager:no_proxy') }}
{% endif %}
- binds:
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
- /opt/so/rules/nids:/opt/so/rules/nids:rw

File diff suppressed because one or more lines are too long

View File

@@ -1,3 +1,4 @@
{% set proxy = salt['pillar.get']('manager:proxy') -%}
[main]
cachedir=/var/cache/yum/$basearch/$releasever
keepcache=0
@@ -11,7 +12,8 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }}
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release
clean_requirements_on_remove=1
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') %}
{% if (grains['role'] not in ['so-eval','so-managersearch', 'so-manager', 'so-standalone']) and salt['pillar.get']('global:managerupdate', '0') -%}
proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
{% elif proxy -%}
proxy={{ proxy }}
{% endif %}

View File

@@ -0,0 +1,78 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TESTING=true
# address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
ALLOW_CIDR=0.0.0.0/0
ALLOW_ROLE=a
BASICZEEK=2
BASICSURI=2
# BLOGS=
BNICS=eth1
ZEEKVERSION=ZEEK
# CURCLOSEDAYS=
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
MANAGERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=
# MMASK=
MNIC=eth0
# MSEARCH=
# MSRV=
# MTU=
NIDS=Suricata
# NODE_ES_HEAP_SIZE=
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
NODEUPDATES=MANAGER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
so_proxy=http://onionuser:0n10nus3r@10.66.166.30:3128
# REDIRECTHOST=
REDIRECTINFO=IP
RULESETUP=ETOPEN
# SHARDCOUNT=
# SKIP_REBOOT=
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
THEHIVE=1
WAZUH=1
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=0n10nus3r
WEBPASSWD2=0n10nus3r

View File

@@ -1,2 +0,0 @@
[Service]
ExecStart=/usr/bin/dockerd /usr/bin/dockerd -H fd:// --registry-mirror "$proxy_addr"

View File

@@ -535,6 +535,56 @@ collect_patch_schedule_name_import() {
done
}
collect_proxy() {
[[ -n $TESTING ]] && return
collect_proxy_details
while ! proxy_validate; do
if whiptail_invalid_proxy; then
collect_proxy_details no_ask
else
so_proxy=""
break
fi
done
}
collect_proxy_details() {
local ask=${1:-true}
local use_proxy
if [[ $ask != true ]]; then
use_proxy=0
else
whiptail_proxy_ask
use_proxy=$?
fi
if [[ $use_proxy == 0 ]]; then
whiptail_proxy_addr "$proxy_addr"
while ! valid_proxy "$proxy_addr"; do
whiptail_invalid_input
whiptail_proxy_addr "$proxy_addr"
done
if whiptail_proxy_auth_ask; then
whiptail_proxy_auth_user "$proxy_user"
whiptail_proxy_auth_pass "$proxy_pass"
local url_prefixes=( 'http://' 'https://' )
for prefix in "${url_prefixes[@]}"; do
if echo "$proxy_addr" | grep -q "$prefix"; then
local proxy=${proxy_addr#"$prefix"}
so_proxy="${prefix}${proxy_user}:${proxy_pass}@${proxy}"
break
fi
done
else
so_proxy="$proxy_addr"
fi
export proxy
fi
}
collect_redirect_host() {
whiptail_set_redirect_host "$HOSTNAME"
@@ -691,10 +741,10 @@ check_requirements() {
else
req_storage=100
fi
if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then
if [[ $free_space_root -lt $req_storage ]]; then
whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB"
fi
if (( $(echo "$free_space_nsm < $req_storage" | bc -l) )); then
if [[ $free_space_nsm -lt $req_storage ]]; then
whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB"
fi
else
@@ -703,7 +753,7 @@ check_requirements() {
else
req_storage=200
fi
if (( $(echo "$free_space_root < $req_storage" | bc -l) )); then
if [[ $free_space_root -lt $req_storage ]]; then
whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB"
fi
fi
@@ -743,12 +793,14 @@ check_sos_appliance() {
compare_main_nic_ip() {
if ! [[ $MNIC =~ ^(tun|wg|vpn).*$ ]]; then
if [[ "$MAINIP" != "$MNIC_IP" ]]; then
error "[ERROR] Main gateway ($MAINIP) does not match ip address of managament NIC ($MNIC_IP)."
read -r -d '' message <<- EOM
The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC).
This is not a supported configuration, please remediate and rerun setup.
EOM
whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
EOM
[[ -n $TESTING ]] || whiptail --title "Security Onion Setup" --msgbox "$message" 10 75
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
fi
else
@@ -939,36 +991,26 @@ detect_os() {
}
installer_prereq_packages() {
installer_progress_loop() {
local i=0
while true; do
[[ $i -lt 98 ]] && ((i++))
set_progress_str "$i" 'Checking that all required packages are installed and enabled...' nolog
[[ $i -gt 0 ]] && sleep 5s
done
}
installer_prereq_packages() {
if [ "$OS" == centos ]; then
# Print message to stdout so the user knows setup is doing something
echo "Installing required packages to run installer..."
# Install bind-utils so the host command exists
if [[ ! $is_iso ]]; then
if ! command -v host > /dev/null 2>&1; then
yum -y install bind-utils >> "$setup_log" 2>&1
fi
if ! command -v nmcli > /dev/null 2>&1; then
{
yum -y install NetworkManager;
systemctl enable NetworkManager;
systemctl start NetworkManager;
} >> "$setup_log" 2<&1
fi
if ! command -v bc > /dev/null 2>&1; then
yum -y install bc >> "$setup_log" 2>&1
fi
if ! yum versionlock > /dev/null 2>&1; then
yum -y install yum-plugin-versionlock >> "$setup_log" 2>&1
yum -y install yum-plugin-versionlock >> "$setup_log" 2>&1
fi
else
logCmd "systemctl enable NetworkManager"
logCmd "systemctl start NetworkManager"
fi
fi
logCmd "systemctl enable NetworkManager"
logCmd "systemctl start NetworkManager"
elif [ "$OS" == ubuntu ]; then
# Print message to stdout so the user knows setup is doing something
echo "Installing required packages to run installer..."
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
# Install network manager so we can do interface stuff
if ! command -v nmcli > /dev/null 2>&1; then
@@ -978,7 +1020,7 @@ installer_prereq_packages() {
systemctl start NetworkManager
} >> "$setup_log" 2<&1
fi
retry 50 10 "apt-get -y install bc curl" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install curl" >> "$setup_log" 2>&1 || exit 1
fi
}
@@ -1002,11 +1044,11 @@ disable_ipv6() {
sysctl -w net.ipv6.conf.all.disable_ipv6=1
sysctl -w net.ipv6.conf.default.disable_ipv6=1
} >> "$setup_log" 2>&1
{
echo "net.ipv6.conf.all.disable_ipv6 = 1"
echo "net.ipv6.conf.default.disable_ipv6 = 1"
echo "net.ipv6.conf.lo.disable_ipv6 = 1"
} >> /etc/sysctl.conf
{
echo "net.ipv6.conf.all.disable_ipv6 = 1"
echo "net.ipv6.conf.default.disable_ipv6 = 1"
echo "net.ipv6.conf.lo.disable_ipv6 = 1"
} >> /etc/sysctl.conf
}
#disable_misc_network_features() {
@@ -1390,6 +1432,8 @@ install_cleanup() {
info "Removing so-setup permission entry from sudoers file"
sed -i '/so-setup/d' /etc/sudoers
fi
so-ssh-harden -q
}
import_registry_docker() {
@@ -1437,6 +1481,8 @@ manager_pillar() {
"manager:"\
" mainip: '$MAINIP'"\
" mainint: '$MNIC'"\
" proxy: '$so_proxy'"\
" no_proxy: '$no_proxy_string'"\
" esheap: '$ES_HEAP_SIZE'"\
" esclustername: '{{ grains.host }}'"\
" freq: 0"\
@@ -1451,7 +1497,6 @@ manager_pillar() {
printf '%s\n'\
" elastalert: 1"\
" es_port: $node_es_port"\
" log_size_limit: $log_size_limit"\
" cur_close_days: $CURCLOSEDAYS"\
" grafana: $GRAFANA"\
" osquery: $OSQUERY"\
@@ -1517,7 +1562,6 @@ manager_global() {
" hnmanager: '$HNMANAGER'"\
" ntpserver: '$NTPSERVER'"\
" dockernet: '$DOCKERNET'"\
" proxy: '$PROXY'"\
" mdengine: '$ZEEKVERSION'"\
" ids: '$NIDS'"\
" url_base: '$REDIRECTIT'"\
@@ -1700,7 +1744,6 @@ network_init() {
network_init_whiptail() {
case "$setup_type" in
'iso')
collect_hostname
whiptail_management_nic
whiptail_dhcp_or_static
@@ -1714,7 +1757,6 @@ network_init_whiptail() {
'network')
whiptail_network_notice
whiptail_dhcp_warn
collect_hostname
whiptail_management_nic
;;
esac
@@ -1782,6 +1824,22 @@ print_salt_state_apply() {
echo "Applying $state Salt state"
}
proxy_validate() {
local test_url="https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS"
proxy_test_err=$(curl -sS "$test_url" --proxy "$so_proxy" 2>&1)
local ret=$?
if [[ $ret != 0 ]]; then
error "Could not reach $test_url using proxy $so_proxy"
error "Received error: $proxy_test_err"
if [[ -n $TESTING ]]; then
error "Exiting setup"
kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1
fi
fi
return $ret
}
reserve_group_ids() {
# This is a hack to fix CentOS from taking group IDs that we need
groupadd -g 928 kratos
@@ -1875,6 +1933,24 @@ reinstall_init() {
} >> "$setup_log" 2>&1
}
reset_proxy() {
[[ -f /etc/profile.d/so-proxy.sh ]] && rm -f /etc/profile.d/so-proxy.sh
[[ -f /etc/systemd/system/docker.service.d/http-proxy.conf ]] && rm -f /etc/systemd/system/docker.service.d/http-proxy.conf
systemctl daemon-reload
command -v docker &> /dev/null && systemctl restart docker
[[ -f /root/.docker/config.json ]] && rm -f /root/.docker/config.json
[[ -f /etc/gitconfig ]] && rm -f /etc/gitconfig
if [[ $OS == 'centos' ]]; then
sed -i "/proxy=/d" /etc/yum.conf
else
[[ -f /etc/apt/apt.conf.d/00-proxy.conf ]] && rm -f /etc/apt/apt.conf.d/00-proxy.conf
fi
}
backup_dir() {
dir=$1
backup_suffix=$2
@@ -1968,6 +2044,7 @@ saltify() {
python36-dateutil\
python36-m2crypto\
python36-mysql\
python36-packaging\
yum-utils\
device-mapper-persistent-data\
lvm2\
@@ -2056,9 +2133,9 @@ saltify() {
retry 50 10 "apt-get -y install salt-minion=3002.5+ds-1 salt-common=3002.5+ds-1" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-mark hold salt-minion salt-common" >> "$setup_log" 2>&1 || exit 1
if [[ $OSVER != 'xenial' ]]; then
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging" >> "$setup_log" 2>&1 || exit 1
else
retry 50 10 "apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb python-packaging" >> "$setup_log" 2>&1 || exit 1
fi
fi
}
@@ -2200,7 +2277,70 @@ set_main_ip() {
# Add /usr/sbin to everyone's path
set_path() {
echo "complete -cf sudo" > /etc/profile.d/securityonion.sh
echo "complete -cf sudo" >> /etc/profile.d/securityonion.sh
}
set_proxy() {
# Don't proxy localhost, local ip, and management ip
no_proxy_string="localhost, 127.0.0.1, ${MAINIP}, ${HOSTNAME}"
if [[ -n $MSRV ]] && [[ -n $MSRVIP ]];then
no_proxy_string="${no_proxy_string}, ${MSRVIP}, ${MSRV}"
fi
# Set proxy environment variables used by curl, wget, docker, and others
{
echo "export use_proxy=on"
echo "export http_proxy=\"${so_proxy}\""
echo "export https_proxy=\"\$http_proxy\""
echo "export ftp_proxy=\"\$http_proxy\""
echo "export no_proxy=\"${no_proxy_string}\""
} > /etc/profile.d/so-proxy.sh
source /etc/profile.d/so-proxy.sh
[[ -d '/etc/systemd/system/docker.service.d' ]] || mkdir -p /etc/systemd/system/docker.service.d
# Create proxy config for dockerd
printf '%s\n'\
"[Service]"\
"Environment=\"HTTP_PROXY=${so_proxy}\""\
"Environment=\"HTTPS_PROXY=${so_proxy}\""\
"Environment=\"NO_PROXY=${no_proxy_string}\"" > /etc/systemd/system/docker.service.d/http-proxy.conf
systemctl daemon-reload
command -v docker &> /dev/null && systemctl restart docker
# Create config.json for docker containers
[[ -d /root/.docker ]] || mkdir /root/.docker
printf '%s\n'\
"{"\
" \"proxies\":"\
" {"\
" \"default\":"\
" {"\
" \"httpProxy\":\"${so_proxy}\","\
" \"httpsProxy\":\"${so_proxy}\","\
" \"ftpProxy\":\"${so_proxy}\","\
" \"noProxy\":\"${no_proxy_string}\""\
" }"\
" }"\
"}" > /root/.docker/config.json
# Set proxy for package manager
if [ "$OS" = 'centos' ]; then
echo "proxy=$so_proxy" >> /etc/yum.conf
else
# Set it up so the updates roll through the manager
printf '%s\n'\
"Acquire::http::Proxy \"$so_proxy\";"\
"Acquire::https::Proxy \"$so_proxy\";" > /etc/apt/apt.conf.d/00-proxy.conf
fi
# Set global git proxy
printf '%s\n'\
"[http]"\
" proxy = ${so_proxy}" > /etc/gitconfig
}
setup_salt_master_dirs() {
@@ -2231,6 +2371,7 @@ set_progress_str() {
local percentage_input=$1
progress_bar_text=$2
export progress_bar_text
local nolog=$2
if (( "$percentage_input" >= "$percentage" )); then
percentage="$percentage_input"
@@ -2240,12 +2381,14 @@ set_progress_str() {
echo -e "$percentage_str"
info "Progressing ($percentage%): $progress_bar_text"
if [[ -z $nolog ]]; then
info "Progressing ($percentage%): $progress_bar_text"
printf '%s\n' \
'----'\
"$percentage% - ${progress_bar_text^^}"\
"----" >> "$setup_log" 2>&1
# printf '%s\n' \
# '----'\
# "$percentage% - ${progress_bar_text^^}"\
# "----" >> "$setup_log" 2>&1
fi
}
set_ssh_cmds() {

View File

@@ -27,6 +27,8 @@ original_args=("$@")
cd "$(dirname "$0")" || exit 255
echo "Getting started..."
# Source the generic function libraries that are also used by the product after
# setup. These functions are intended to be reusable outside of the setup process.
source ../salt/common/tools/sbin/so-common
@@ -93,12 +95,23 @@ if ! [ -f $install_opt_file ]; then
analyze_system
fi
# Set up handler for setup to exit early (use `kill -SIGUSR1 "$setup_proc"; exit 1` in child scripts)
trap 'catch $LINENO' SIGUSR1
setup_proc="$$"
catch() {
info "Fatal error occurred at $1 in so-setup, failing setup."
grep --color=never "ERROR" "$setup_log" > "$error_log"
whiptail_setup_failed
exit 1
}
automated=no
function progress() {
local title='Security Onion Install'
progress() {
local title='Security Onion Setup'
local msg=${1:-'Please wait while installing...'}
if [ $automated == no ]; then
whiptail --title "$title" --gauge 'Please wait while installing...' 6 60 0 # append to text
whiptail --title "$title" --gauge "$msg" 6 70 0 # append to text
else
cat >> $setup_log 2>&1
fi
@@ -154,7 +167,6 @@ set_ssh_cmds $automated
local_sbin="$(pwd)/../salt/common/tools/sbin"
export PATH=$PATH:$local_sbin
installer_prereq_packages && detect_cloud
set_network_dev_status_list
set_palette >> $setup_log 2>&1
@@ -190,19 +202,24 @@ if ! [[ -f $install_opt_file ]]; then
if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then
whiptail_first_menu_iso
if [[ $option == "CONFIGURENETWORK" ]]; then
collect_hostname
network_init_whiptail
whiptail_management_interface_setup
network_init
printf '%s\n' \
"MNIC=$MNIC" \
"HOSTNAME=$HOSTNAME" > "$net_init_file"
set_main_ip >> $setup_log 2>&1
compare_main_nic_ip
reset_proxy >> $setup_log 2>&1
collect_proxy
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
whiptail_net_setup_complete
else
whiptail_install_type
true
fi
else
whiptail_install_type
fi
whiptail_install_type
else
source $install_opt_file
fi
@@ -275,25 +292,32 @@ if ! [[ -f $install_opt_file ]]; then
[[ -f $net_init_file ]] && whiptail_net_reinit && reinit_networking=true
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
collect_hostname
network_init_whiptail
else
source "$net_init_file"
fi
if [[ $is_minion ]]; then
collect_mngr_hostname
fi
if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
whiptail_management_interface_setup
fi
if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
network_init
fi
if [[ -n "$TURBO" ]]; then
use_turbo_proxy
set_main_ip >> $setup_log 2>&1
compare_main_nic_ip
if [[ $is_minion ]]; then
collect_mngr_hostname
fi
reset_proxy >> $setup_log 2>&1
if [[ -z $is_airgap ]]; then
collect_proxy
[[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1
fi
if [[ $is_minion ]] || [[ $reinit_networking ]] || [[ $is_iso ]] && ! [[ -f $net_init_file ]]; then
whiptail_management_interface_setup
fi
if [[ $is_minion ]]; then
@@ -312,6 +336,7 @@ if ! [[ -f $install_opt_file ]]; then
"HOSTNAME=$HOSTNAME" \
"MSRV=$MSRV" \
"MSRVIP=$MSRVIP" > "$install_opt_file"
[[ -n $so_proxy ]] && echo "so_proxy=$so_proxy" >> "$install_opt_file"
download_repo_tarball
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
fi
@@ -325,6 +350,22 @@ else
rm -rf $install_opt_file >> "$setup_log" 2>&1
fi
percentage=0
{
installer_progress_loop & # Run progress bar to 98 in ~8 minutes while waiting for package installs
progress_bg_proc=$!
installer_prereq_packages
install_success=$?
kill -9 "$progress_bg_proc"
wait "$progress_bg_proc" &> /dev/null # Kill just sends signal, redirect output of wait to catch stdout
if [[ $install_success -gt 0 ]]; then
echo "Could not install packages required for setup, exiting now." >> "$setup_log" 2>&1
kill -SIGUSR1 "$setup_proc"; exit 1
fi
set_progress_str 99 "Detecting whether setup is running in the cloud." nolog
detect_cloud
} | progress '...'
short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}')
MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]')
@@ -338,14 +379,14 @@ minion_type=$(get_minion_type)
set_default_log_size >> $setup_log 2>&1
if [[ $is_helix ]]; then
RULESETUP=${RULESETUP:-ETOPEN}
RULESETUP=${RULESETUP:-ETOPEN}
NSMSETUP=${NSMSETUP:-BASIC}
HNSENSOR=${HNSENSOR:-inherit}
MANAGERUPDATES=${MANAGERUPDATES:-0}
fi
if [[ $is_helix || ( $is_manager && $is_node ) ]]; then
RULESETUP=${RULESETUP:-ETOPEN}
RULESETUP=${RULESETUP:-ETOPEN}
NSMSETUP=${NSMSETUP:-BASIC}
fi
@@ -365,7 +406,7 @@ fi
if [[ $is_import ]]; then
PATCHSCHEDULENAME=${PATCHSCHEDULENAME:-auto}
MTU=${MTU:-1500}
RULESETUP=${RULESETUP:-ETOPEN}
RULESETUP=${RULESETUP:-ETOPEN}
NSMSETUP=${NSMSETUP:-BASIC}
HNSENSOR=${HNSENSOR:-inherit}
MANAGERUPDATES=${MANAGERUPDATES:-0}
@@ -529,21 +570,10 @@ whiptail_make_changes
# From here on changes will be made.
echo "1" > /root/accept_changes
# Set up handler for setup to exit early (use `kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1` in child scripts)
trap 'catch $LINENO' SIGUSR1
catch() {
info "Fatal error occurred at $1 in so-setup, failing setup."
grep --color=never "ERROR" "$setup_log" > "$error_log"
whiptail_setup_failed
exit
}
# This block sets REDIRECTIT which is used by a function outside the below subshell
set_main_ip >> $setup_log 2>&1
compare_main_nic_ip
set_redirect >> $setup_log 2>&1
# Begin install
{
# Set initial percentage to 0

View File

@@ -588,8 +588,21 @@ whiptail_invalid_input() { # TODO: This should accept a list of arguments to spe
}
whiptail_invalid_proxy() {
[ -n "$TESTING" ] && return
local message
read -r -d '' message <<- EOM
Could not reach test url using proxy ${proxy_addr}.
Error was: ${proxy_test_err}
EOM
whiptail --title "Security Onion Setup" --yesno "$message" --yes-button "Enter Again" --no-button "Skip" 11 60
}
whiptail_invalid_string() {
[ -n "$TESTING" ] && return
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Invalid input, please try again.\n\nThe $1 cannot contain spaces." 9 45
@@ -1216,6 +1229,58 @@ whiptail_patch_schedule_select_hours() {
}
whiptail_proxy_ask() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "Do you want to set a proxy server for this installation?" 7 60
}
whiptail_proxy_addr() {
[ -n "$TESTING" ] && return
local message
read -r -d '' message <<- EOM
Please input the proxy server you wish to use, including the URL prefix (ex: https://your.proxy.com:1234).
If your proxy requires a username and password do not include them in your input. Setup will ask for those values next.
EOM
proxy_addr=$(whiptail --title "Security Onion Setup" --inputbox "$message" 13 60 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_proxy_auth_ask() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "Does your proxy require authentication?" 7 60
}
whiptail_proxy_auth_user() {
[ -n "$TESTING" ] && return
proxy_user=$(whiptail --title "Security Onion Setup" --inputbox "Please input the proxy user:" 8 60 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_proxy_auth_pass() {
local arg=$1
[ -n "$TESTING" ] && return
if [[ $arg != 'confirm' ]]; then
proxy_pass=$(whiptail --title "Security Onion Setup" --passwordbox "Please input the proxy password:" 8 60 3>&1 1>&2 2>&3)
else
proxy_pass_confirm=$(whiptail --title "Security Onion Setup" --passwordbox "Please confirm the proxy password:" 8 60 3>&1 1>&2 2>&3)
fi
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_requirements_error() {
local requirement_needed=$1