Merge pull request #2979 from Security-Onion-Solutions/foxtrot

Setup fixes/improvements
This commit is contained in:
William Wernert
2021-02-16 17:14:59 -05:00
committed by GitHub
5 changed files with 106 additions and 69 deletions

View File

@@ -34,7 +34,7 @@ GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
HOSTNAME=eval
install_type=EVAL
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=

View File

@@ -34,8 +34,8 @@ GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
HOSTNAME=eval
install_type=EVAL
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=

View File

@@ -398,6 +398,13 @@ collect_hostname() {
whiptail_set_hostname "$HOSTNAME"
if [[ $HOSTNAME == 'securityonion' ]]; then # Will only check HOSTNAME=securityonion once
if ! (whiptail_avoid_default_hostname); then
whiptail_set_hostname
fi
fi
while ! valid_hostname "$HOSTNAME"; do
whiptail_invalid_hostname
whiptail_set_hostname "$HOSTNAME"
@@ -963,15 +970,16 @@ installer_prereq_packages() {
elif [ "$OS" == ubuntu ]; then
# Print message to stdout so the user knows setup is doing something
echo "Installing required packages to run installer..."
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
# Install network manager so we can do interface stuff
if ! command -v nmcli > /dev/null 2>&1; then
retry 50 10 "apt-get install -y network-manager" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install network-manager" >> "$setup_log" 2>&1 || exit 1
{
systemctl enable NetworkManager
systemctl start NetworkManager
} >> "$setup_log" 2<&1
fi
retry 50 10 "apt-get install -y bc curl" >> "$setup_log" 2>&1 || exit 1
retry 50 10 "apt-get -y install bc curl" >> "$setup_log" 2>&1 || exit 1
fi
}
@@ -1487,50 +1495,50 @@ manager_global() {
fi
if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
DOCKERNET=172.17.0.0
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
else
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
fi
# Create a global file for global values
printf '%s\n'\
"global:"\
" soversion: '$SOVERSION'"\
" hnmanager: '$HNMANAGER'"\
" ntpserver: '$NTPSERVER'"\
" dockernet: '$DOCKERNET'"\
" proxy: '$PROXY'"\
" mdengine: '$ZEEKVERSION'"\
" ids: '$NIDS'"\
" url_base: '$REDIRECTIT'"\
" managerip: '$MAINIP'" > "$global_pillar"
if [[ $is_airgap ]]; then
printf '%s\n'\
" airgap: True"\ >> "$global_pillar"
else
printf '%s\n'\
" airgap: False"\ >> "$global_pillar"
fi
printf '%s\n'\
"global:"\
" soversion: '$SOVERSION'"\
" hnmanager: '$HNMANAGER'"\
" ntpserver: '$NTPSERVER'"\
" dockernet: '$DOCKERNET'"\
" proxy: '$PROXY'"\
" mdengine: '$ZEEKVERSION'"\
" ids: '$NIDS'"\
" url_base: '$REDIRECTIT'"\
" managerip: '$MAINIP'" > "$global_pillar"
if [[ $is_airgap ]]; then
printf '%s\n'\
" airgap: True"\ >> "$global_pillar"
else
printf '%s\n'\
" airgap: False"\ >> "$global_pillar"
fi
# Check if TheHive is enabled. If so, add creds and other details
if [[ "$THEHIVE" == "1" ]]; then
printf '%s\n'\
" hiveuser: '$WEBUSER'"\
" hivepassword: '$WEBPASSWD1'"\
" hivekey: '$HIVEKEY'"\
" hiveplaysecret: '$HIVEPLAYSECRET'"\
" cortexuser: '$WEBUSER'"\
" cortexpassword: '$WEBPASSWD1'"\
" cortexkey: '$CORTEXKEY'"\
" cortexorgname: 'SecurityOnion'"\
" cortexorguser: 'soadmin'"\
" cortexorguserkey: '$CORTEXORGUSERKEY'"\
" cortexplaysecret: '$CORTEXPLAYSECRET'" >> "$global_pillar"
fi
# Check if TheHive is enabled. If so, add creds and other details
if [[ "$THEHIVE" == "1" ]]; then
printf '%s\n'\
" hiveuser: '$WEBUSER'"\
" hivepassword: '$WEBPASSWD1'"\
" hivekey: '$HIVEKEY'"\
" hiveplaysecret: '$HIVEPLAYSECRET'"\
" cortexuser: '$WEBUSER'"\
" cortexpassword: '$WEBPASSWD1'"\
" cortexkey: '$CORTEXKEY'"\
" cortexorgname: 'SecurityOnion'"\
" cortexorguser: 'soadmin'"\
" cortexorguserkey: '$CORTEXORGUSERKEY'"\
" cortexplaysecret: '$CORTEXPLAYSECRET'" >> "$global_pillar"
fi
# Continue adding other details
# Continue adding other details
printf '%s\n'\
" fleet_custom_hostname: "\
" fleet_manager: False"\
@@ -1541,7 +1549,7 @@ manager_global() {
" fleet_ip: 'N/A'"\
" sensoronikey: '$SENSORONIKEY'"\
" wazuh: $WAZUH"\
" managerupdate: $MANAGERUPDATES"\
" managerupdate: $MANAGERUPDATES"\
" imagerepo: '$IMAGEREPO'"\
" pipeline: 'redis'"\
"sensoroni:"\
@@ -1557,22 +1565,22 @@ manager_global() {
" features: False"\
"elasticsearch:"\
" replicas: 0" >> "$global_pillar"
if [ -n "$ESCLUSTERNAME" ]; then
printf '%s\n'\
" true_cluster: True"\
" true_cluster_name: '$ESCLUSTERNAME'" >> "$global_pillar"
else
printf '%s\n'\
" true_cluster: False"\
" true_cluster_name: 'so'" >> "$global_pillar"
fi
if [ -n "$ESCLUSTERNAME" ]; then
printf '%s\n'\
" true_cluster: True"\
" true_cluster_name: '$ESCLUSTERNAME'" >> "$global_pillar"
else
printf '%s\n'\
" true_cluster: False"\
" true_cluster_name: 'so'" >> "$global_pillar"
fi
printf '%s\n'\
" discovery_nodes: 1"\
" hot_warm_enabled: False"\
" cluster_routing_allocation_disk.threshold_enabled: true"\
" cluster_routing_allocation_disk_watermark_low: '95%'"\
" cluster_routing_allocation_disk_watermark_high: '98%'"\
" cluster_routing_allocation_disk_watermark_flood_stage: '98%'"\
" cluster_routing_allocation_disk_watermark_low: '95%'"\
" cluster_routing_allocation_disk_watermark_high: '98%'"\
" cluster_routing_allocation_disk_watermark_flood_stage: '98%'"\
" index_settings:"\
" so-beats:"\
" shards: 1"\
@@ -1633,10 +1641,10 @@ manager_global() {
" upload_queue_size: 4"\
" encoding: 'gzip'"\
" interval: 5"\
"backup:"\
" locations:"\
" - /opt/so/saltstack/local"\
"soctopus:"\
"backup:"\
" locations:"\
" - /opt/so/saltstack/local"\
"soctopus:"\
" playbook:"\
" rulesets:"\
" - windows"\
@@ -1790,7 +1798,7 @@ reinstall_init() {
local service_retry_count=20
{
if command -v salt-call &> /dev/null; then
if command -v salt-call &> /dev/null && grep -q "master:" /etc/salt/minion 2> /dev/null; then
# Disable schedule so highstate doesn't start running during the install
salt-call -l info schedule.disable
@@ -1950,9 +1958,7 @@ saltify() {
} >> "$setup_log" 2>&1
yum versionlock salt*
else
if ! (DEBIAN_FRONTEND=noninteractive retry 50 10 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1); then
exit 1
fi
DEBIAN_FRONTEND=noninteractive retry 50 10 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || exit 1
if [ $OSVER != "xenial" ]; then
# Switch to Python 3 as default if this is not xenial

View File

@@ -379,6 +379,11 @@ if [[ $is_import ]]; then
PLAYBOOK=0
fi
if [[ $is_airgap ]]; then
PATCHSCHEDULENAME=manual
MANAGERUPDATES=0
fi
# Start user prompts
if [[ $is_helix ]]; then
@@ -393,7 +398,7 @@ if [[ $is_helix || $is_sensor || $is_import ]]; then
calculate_useable_cores
fi
if [[ ! $is_import ]]; then
if [[ ! $is_airgap && ! $is_import ]]; then
collect_patch_schedule
fi
@@ -446,7 +451,7 @@ if [[ $is_manager || $is_import ]]; then
get_redirect
fi
if [[ $is_distmanager || ( $is_sensor || $is_node || $is_fleet_standalone ) && ! $is_eval ]]; then
if [[ ! $is_airgap && ( $is_distmanager || ( $is_sensor || $is_node || $is_fleet_standalone ) && ! $is_eval ) ]]; then
whiptail_manager_updates
if [[ $setup_type == 'network' && $MANAGERUPDATES == 1 ]]; then
whiptail_manager_updates_warning

View File

@@ -28,6 +28,20 @@ whiptail_airgap() {
whiptail_check_exitstatus $exitstatus
}
whiptail_avoid_default_hostname() {
[ -n "$TESTING" ] && return
read -r -d '' message <<- EOM
To prevent hostname conflicts, avoid using the default 'securityonion' hostname in a distributed environment.
You can choose to use this default hostname anyway, or change it to a new hostname.
EOM
whiptail --title "Security Onion Setup" \
--yesno "$message" 11 75 \
--yes-button "Use Anyway" --no-button "Change" --defaultno
}
whiptail_basic_suri() {
[ -n "$TESTING" ] && return
@@ -937,13 +951,25 @@ whiptail_metadata_tool() {
[ -n "$TESTING" ] && return
read -r -d '' message <<- EOM
What tool would you like to use to generate metadata?
This question is asking specifically about metadata, which would be things like the connection log, DNS log, HTTP log, etc. This does not include NIDS alerts.
If you choose Zeek for metadata, Suricata will still run to generate NIDS alerts.
If you choose Suricata for metadata, it will generate NIDS alerts and metadata, and Zeek will not run at all.
EOM
# Legacy variable naming
ZEEKVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate metadata?" 20 75 4 \
"ZEEK" "Zeek (formerly known as Bro)" ON \
"SURICATA" "Suricata" OFF 3>&1 1>&2 2>&3)
ZEEKVERSION=$(whiptail --title "Security Onion Setup" --menu "$message" 20 75 2 \
"Zeek " "Use Zeek (Bro) for metadata and Suricata for NIDS alerts" \
"Suricata " "Use Suricata for both metadata and NIDS alerts" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
ZEEKVERSION=$(echo "${ZEEKVERSION^^}" | tr -d ' ')
}
whiptail_nids() {