diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 02ab437fb..aeb33ad8f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,7 +15,7 @@ ### Contributing code -* **All commits must be signed** with a valid key that has been added to your GitHub account. The commits should have all the "**Verified**" tag when viewed on GitHub as shown below: +* **All commits must be signed** with a valid key that has been added to your GitHub account. Each commit should have the "**Verified**" tag when viewed on GitHub as shown below: diff --git a/salt/common/init.sls b/salt/common/init.sls index 05dd7023f..17cea3480 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -9,6 +9,11 @@ rmvariablesfile: file.absent: - name: /tmp/variables.txt +dockergroup: + group.present: + - name: docker + - gid: 920 + # Add socore Group socoregroup: group.present: diff --git a/salt/common/tools/sbin/so-import-evtx b/salt/common/tools/sbin/so-import-evtx index 9e640beaa..83815eecd 100755 --- a/salt/common/tools/sbin/so-import-evtx +++ b/salt/common/tools/sbin/so-import-evtx @@ -25,6 +25,7 @@ INDEX_DATE=$(date +'%Y.%m.%d') RUNID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1) +LOG_FILE=/nsm/import/evtx-import.log . /usr/sbin/so-common @@ -41,14 +42,17 @@ function evtx2es() { EVTX=$1 HASH=$2 + ES_PW=$(lookup_pillar "auth:users:so_elastic_user:pass" "elasticsearch") + ES_USER=$(lookup_pillar "auth:users:so_elastic_user:user" "elasticsearch") + docker run --rm \ -v "$EVTX:/tmp/$RUNID.evtx" \ --entrypoint evtx2es \ {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} \ --host {{ MANAGERIP }} --scheme https \ --index so-beats-$INDEX_DATE --pipeline import.wel \ - --login {{ES_USER}} --pwd {{ES_PW}} \ - "/tmp/$RUNID.evtx" 1>/dev/null 2>/dev/null + --login $ES_USER --pwd $ES_PW \ + "/tmp/$RUNID.evtx" >> $LOG_FILE 2>&1 docker run --rm \ -v "$EVTX:/tmp/import.evtx" \ diff --git a/salt/elasticsearch/auth.sls b/salt/elasticsearch/auth.sls index 373f2fbed..35d669892 100644 --- a/salt/elasticsearch/auth.sls +++ b/salt/elasticsearch/auth.sls @@ -1,8 +1,8 @@ -{% set so_elastic_user_pass = salt['random.get_str'](20) %} -{% set so_kibana_user_pass = salt['random.get_str'](20) %} -{% set so_logstash_user_pass = salt['random.get_str'](20) %} -{% set so_beats_user_pass = salt['random.get_str'](20) %} -{% set so_monitor_user_pass = salt['random.get_str'](20) %} +{% set so_elastic_user_pass = salt['random.get_str'](72) %} +{% set so_kibana_user_pass = salt['random.get_str'](72) %} +{% set so_logstash_user_pass = salt['random.get_str'](72) %} +{% set so_beats_user_pass = salt['random.get_str'](72) %} +{% set so_monitor_user_pass = salt['random.get_str'](72) %} elastic_auth_pillar: file.managed: diff --git a/salt/elasticsearch/files/ingest/beats.common b/salt/elasticsearch/files/ingest/beats.common index 4e358582e..3cfa33521 100644 --- a/salt/elasticsearch/files/ingest/beats.common +++ b/salt/elasticsearch/files/ingest/beats.common @@ -2,7 +2,7 @@ "description" : "beats.common", "processors" : [ { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, - { "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } }, + { "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational' && ctx.containsKey('winlog')", "name":"win.eventlogs" } }, { "pipeline": { "name": "common" } } ] } \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/ecs b/salt/elasticsearch/files/ingest/ecs new file mode 100644 index 000000000..e52ab6e71 --- /dev/null +++ b/salt/elasticsearch/files/ingest/ecs @@ -0,0 +1,155 @@ +{ + "description" : "ECS Testing Pipeline", + "processors": [ + { + "append": { + "field": "event.category", + "value": [ + "process" + ], + "if": "ctx?.wazuh?.data?.type == 'process'", + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "event.type", + "value": [ + "start" + ], + "if": "ctx?.wazuh?.data?.type == 'process'", + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "event.type", + "value": "end", + "if": "ctx?.wazuh?.data?.type == 'process_end'", + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "user.name", + "copy_from": "process.user", + "ignore_empty_value": true, + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "host.os.type", + "copy_from": "wazuh.data.os.sysname", + "ignore_empty_value": true, + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "host.os.platform", + "copy_from": "wazuh.data.os.platform", + "ignore_empty_value": true, + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "host.os.name", + "copy_from": "wazuh.data.os.name", + "ignore_empty_value": true, + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "host.os.version", + "copy_from": "wazuh.data.os.version", + "ignore_empty_value": true, + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "signal.rule.name", + "copy_from": "rule.name", + "ignore_empty_value": true, + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "signal.rule.type", + "copy_from": "rule.category", + "ignore_empty_value": true, + "ignore_failure": true + } + }, + { + "set": { + "field": "signal.rule.threat.tactic.name", + "copy_from": "rule.mitre.tactic", + "ignore_empty_value": true, + "tag": "test", + "ignore_failure": true + } + }, + { + "append": { + "field": "event.category", + "value": [ + "authentication" + ], + "if": "if(ctx?.rule?.groups != null) {\n if(ctx?.rule?.groups?.contains('authentication_success')) {\n return true\n }\n if(ctx?.rule?.groups?.contains('authentication_failed')) {\n return true\n }\n return false\n}", + "ignore_failure": true + } + }, + { + "set": { + "field": "event.outcome", + "value": "success", + "ignore_empty_value": true, + "if": "ctx?.rule?.groups != null && ctx?.rule?.groups.contains('authentication_success')", + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "event.outcome", + "value": "failure", + "ignore_empty_value": true, + "if": "ctx?.rule?.groups != null && ctx?.rule?.groups.contains('authentication_failed')", + "tag": "test", + "ignore_failure": true + } + }, + { + "set": { + "field": "url.path", + "ignore_empty_value": true, + "tag": "test", + "ignore_failure": true, + "copy_from": "url.original" + } + }, + { + "set": { + "field": "url.domain", + "ignore_empty_value": true, + "tag": "test", + "ignore_failure": true, + "copy_from": "kibana.log.meta.req.headers.origin" + } + } + ] +} diff --git a/salt/elasticsearch/templates/so/so-common-template.json.jinja b/salt/elasticsearch/templates/so/so-common-template.json.jinja index 4394ebb65..4a41cba8a 100644 --- a/salt/elasticsearch/templates/so/so-common-template.json.jinja +++ b/salt/elasticsearch/templates/so/so-common-template.json.jinja @@ -1,12 +1,14 @@ {%- set INDEX_SORTING = salt['pillar.get']('elasticsearch:index_sorting', True) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-common:refresh', '30s') %} { "index_patterns": ["so-*"], "version":50001, "order":10, "settings":{ - "number_of_replicas":0, + "number_of_replicas":{{ REPLICAS }}, "number_of_shards":1, - "index.refresh_interval":"30s", + "index.refresh_interval":"{{ REFRESH }}", "index.routing.allocation.require.box_type":"hot", "index.mapping.total_fields.limit": "1500", {%- if INDEX_SORTING is sameas true %} diff --git a/salt/manager/files/acng/acng.conf b/salt/manager/files/acng/acng.conf index 993452b57..1b7f05e04 100644 --- a/salt/manager/files/acng/acng.conf +++ b/salt/manager/files/acng/acng.conf @@ -77,7 +77,7 @@ FreshIndexMaxAge: 300 # AllowUserPorts: 80 RedirMax: 6 # VfileUseRangeOps is set for fedora volatile files on mirrors that dont to range -VfileUseRangeOps: 0 +VfileUseRangeOps: -1 # PassThroughPattern: private-ppa\.launchpad\.net:443$ # PassThroughPattern: .* # this would allow CONNECT to everything PassThroughPattern: (repo\.securityonion\.net:443|download\.docker\.com:443|mirrors\.fedoraproject\.org:443|packages\.wazuh\.com:443|repo\.saltstack\.com:443|yum\.dockerproject\.org:443|download\.docker\.com:443|registry\.npmjs\.org:443|registry\.yarnpkg\.com:443)$ # yarn/npm pkg, cant to http :/ diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 3db257d1b..04fc1769c 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -87,4 +87,4 @@ salt_minion_service: patch_pkg: pkg.installed: - - name: patch \ No newline at end of file + - name: patch diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index e18c71fc5..a9aa66703 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -462,7 +462,7 @@ fleet_crt: - name: /etc/pki/fleet.crt - signing_private_key: /etc/pki/fleet.key - CN: {{ manager }} - - subjectAltName: DNS:{{ manager }},IP:{{ managerip }} + - subjectAltName: DNS:{{ manager }},IP:{{ managerip }}{% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }}{% endif %} - days_remaining: 0 - days_valid: 820 - backup: True diff --git a/salt/zeek/init.sls b/salt/zeek/init.sls index 2c9fb9846..fa4cf4f0b 100644 --- a/salt/zeek/init.sls +++ b/salt/zeek/init.sls @@ -90,11 +90,14 @@ zeekpolicysync: # Ensure the zeek spool tree (and state.db) ownership is correct zeekspoolownership: file.directory: - - name: /nsm/zeek + - name: /nsm/zeek/spool - user: 937 - - max_depth: 1 - - recurse: - - user +zeekstatedbownership: + file.managed: + - name: /nsm/zeek/spool/state.db + - user: 937 + - replace: False + - create: False # Sync Intel zeekintelloadsync: diff --git a/setup/automation/distributed-airgap-search b/setup/automation/distributed-airgap-search index 1acee9b1a..7a0888fee 100644 --- a/setup/automation/distributed-airgap-search +++ b/setup/automation/distributed-airgap-search @@ -35,7 +35,6 @@ ADMINPASS2=onionuser HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNSENSOR=inherit HOSTNAME=distributed-search -INTERWEBS=AIRGAP install_type=SEARCHNODE # LSINPUTBATCHCOUNT= # LSINPUTTHREADS= diff --git a/setup/automation/distributed-airgap-sensor b/setup/automation/distributed-airgap-sensor index c8186bf8a..91b9c24a9 100644 --- a/setup/automation/distributed-airgap-sensor +++ b/setup/automation/distributed-airgap-sensor @@ -35,7 +35,6 @@ ZEEKVERSION=ZEEK HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNSENSOR=inherit HOSTNAME=distributed-sensor -INTERWEBS=AIRGAP install_type=SENSOR # LSINPUTBATCHCOUNT= # LSINPUTTHREADS= diff --git a/setup/so-functions b/setup/so-functions index 62d458911..a9925c80d 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -475,10 +475,15 @@ collect_mngr_hostname() { whiptail_management_server "$MSRV" done + while [[ $MSRV == "$HOSTNAME" ]]; do + whiptail_invalid_hostname 0 + whiptail_management_server "$MSRV" + done + if ! getent hosts "$MSRV"; then whiptail_manager_ip - while ! valid_ip4 "$MSRVIP"; do + while ! valid_ip4 "$MSRVIP" || [[ $MSRVIP == "$MAINIP" || $MSRVIP == "127.0.0.1" ]]; do whiptail_invalid_input whiptail_manager_ip "$MSRVIP" done @@ -846,7 +851,7 @@ check_requirements() { local req_cores local req_storage local nic_list - readarray -t nic_list <<< "$(ip link| awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "bond0" | sed 's/ //g')" + readarray -t nic_list <<< "$(ip link| awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "bond0" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g')" local num_nics=${#nic_list[@]} if [[ "$standalone_or_dist" == 'standalone' ]]; then @@ -1124,9 +1129,10 @@ detect_os() { installer_progress_loop() { local i=0 + local msg="${1:-Performing background actions...}" while true; do [[ $i -lt 98 ]] && ((i++)) - set_progress_str "$i" 'Checking that all required packages are installed and enabled...' nolog + set_progress_str "$i" "$msg" nolog [[ $i -gt 0 ]] && sleep 5s done } @@ -1374,7 +1380,7 @@ filter_unused_nics() { fi # Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use) - filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g') + filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g') readarray -t filtered_nics <<< "$filtered_nics" nic_list=() diff --git a/setup/so-preflight b/setup/so-preflight old mode 100644 new mode 100755 index 2943191eb..fd6b5f736 --- a/setup/so-preflight +++ b/setup/so-preflight @@ -18,7 +18,13 @@ source ../salt/common/tools/sbin/so-common source ./so-functions -preflight_log='/root/preflight.log' +script_run="$1" + +if [[ $script_run == true ]]; then + preflight_log="${2:-'/root/preflight.log'}" +else + preflight_log='/root/preflight.log' +fi check_default_repos() { local ret_code=0 @@ -27,7 +33,7 @@ check_default_repos() { if [[ $OS == 'centos' ]]; then printf '%s' 'yum update.' | tee -a "$preflight_log" echo "" >> "$preflight_log" - yum -y update >> $preflight_log 2>&1 + yum -y check-update >> $preflight_log 2>&1 ret_code=$? else printf '%s' 'apt update.' | tee -a "$preflight_log" @@ -72,7 +78,8 @@ check_new_repos() { check_misc_urls() { printf ' Checking various other URLs used by setup.' | tee -a "$preflight_log" - local so_version=$(cat ../VERSION) + local so_version + so_version=$(cat ../VERSION) local url_arr=( "https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS" "https://github.com/Neo23x0/signature-base" @@ -112,10 +119,18 @@ __check_url_arr() { } main() { + local intro_str="Beginning pre-flight checks." + local success_str="Pre-flight checks completed successfully!" + local fail_str="Pre-flight checks could not complete." + detect_os "$preflight_log" [[ -f $preflight_log ]] || touch "$preflight_log" - echo "Beginning pre-flight checks." | tee "$preflight_log" + if [[ $script_run == true ]]; then + echo "$intro_str" + else + echo "$intro_str" | tee "$preflight_log" + fi check_default_repos &&\ check_new_repos &&\ check_misc_urls @@ -124,12 +139,23 @@ main() { echo "" if [[ $success == 0 ]]; then - echo -e "Pre-flight checks completed successfully!\n" | tee -a "$preflight_log" + if [[ $script_run == true ]]; then + echo "$success_str" + else + echo "$success_str" | tee -a "$preflight_log" + echo "" + fi else - echo -e "Pre-flight checks could not complete." | tee -a "$preflight_log" - echo -e " Check $preflight_log for details.\n" - exit 1 + if [[ $script_run == true ]]; then + echo "$fail_str" + else + echo "$fail_str" | tee -a "$preflight_log" + echo "Check $preflight_log for details." + echo "" + fi fi + + exit $success } main diff --git a/setup/so-setup b/setup/so-setup index 119a0d2ff..d71511971 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -265,14 +265,6 @@ if [[ $is_manager || $is_import ]]; then fi if ! [[ -f $install_opt_file ]]; then - # Check if this is an airgap install - if [[ ( $is_manager || $is_import || $is_minion ) && $is_iso ]]; then - whiptail_airgap - if [[ "$INTERWEBS" == 'AIRGAP' ]]; then - is_airgap=true - fi - fi - if [[ $is_manager && $is_sensor ]]; then check_requirements "standalone" elif [[ $is_fleet_standalone ]]; then @@ -312,17 +304,29 @@ if ! [[ -f $install_opt_file ]]; then add_mngr_ip_to_hosts fi + if [[ $is_minion ]]; then + whiptail_ssh_key_copy_notice + copy_ssh_key >> $setup_log 2>&1 + fi + + # Check if this is an airgap install + if [[ ( $is_manager || $is_import) && $is_iso ]]; then + whiptail_airgap + if [[ "$INTERWEBS" == 'AIRGAP' ]]; then + is_airgap=true + fi + elif [[ $is_minion && $is_iso ]]; then + $sshcmd -i /root/.ssh/so.key soremote@"$MSRV" [[ -f /etc/yum.repos.d/airgap_repo.repo ]] >> $setup_log 2>&1 + airgap_check=$? + [[ $airgap_check ]] && is_airgap=true >> $setup_log 2>&1 + fi + reset_proxy if [[ -z $is_airgap ]]; then collect_net_method [[ -n "$so_proxy" ]] && set_proxy >> $setup_log 2>&1 fi - if [[ $is_minion ]]; then - whiptail_ssh_key_copy_notice - copy_ssh_key >> $setup_log 2>&1 - fi - if [[ $is_minion ]] && ! (compare_versions); then info "Installer version mismatch, downloading correct version from manager" printf '%s\n' \ @@ -337,14 +341,31 @@ if ! [[ -f $install_opt_file ]]; then download_repo_tarball exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}" fi - else rm -rf $install_opt_file >> "$setup_log" 2>&1 fi +if [[ -z $is_airgap ]]; then + percentage=0 + { + installer_progress_loop 'Running preflight checks...' & + progress_bg_proc=$! + ./so-preflight true "$setup_log" >> $setup_log 2>&1 + preflight_ret=$? + echo "$preflight_ret" > /tmp/preflight_ret + kill -9 "$progress_bg_proc" + wait "$progress_bg_proc" &> /dev/null + } | progress '...' + [[ -f /tmp/preflight_ret ]] && preflight_ret=$(cat /tmp/preflight_ret) + rm /tmp/preflight_ret + if [[ -n $preflight_ret && $preflight_ret -gt 0 ]] && ! ( whiptail_preflight_err ); then + whiptail_cancel + fi +fi + percentage=0 { - installer_progress_loop & # Run progress bar to 98 in ~8 minutes while waiting for package installs + installer_progress_loop 'Checking that all required packages are installed and enabled...' & # Run progress bar to 98 in ~8 minutes while waiting for package installs progress_bg_proc=$! installer_prereq_packages install_success=$? diff --git a/setup/so-whiptail b/setup/so-whiptail index e74529438..13bfa82b4 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -83,8 +83,8 @@ whiptail_bond_nics_mtu() { } whiptail_cancel() { - - whiptail --title "$whiptail_title" --msgbox "Cancelling Setup." 8 75 + [ -z "$TESTING" ] && whiptail --title "$whiptail_title" --msgbox "Cancelling Setup." 8 75 + if [ -d "/root/installtmp" ]; then { echo "/root/installtmp exists"; @@ -95,7 +95,7 @@ whiptail_cancel() { title "User cancelled setup." - exit + exit 1 } whiptail_check_exitstatus() { @@ -505,6 +505,8 @@ whiptail_end_settings() { [[ -n $WEBUSER ]] && __append_end_msg "Web User: $WEBUSER" [[ -n $FLEETNODEUSER ]] && __append_end_msg "Fleet User: $FLEETNODEUSER" + + [[ -n $FLEETCUSTOMHOSTNAME ]] && __append_end_msg "Fleet Custom Hostname: $FLEETCUSTOMHOSTNAME" if [[ $is_manager ]]; then __append_end_msg "Enabled Optional Components:" @@ -733,7 +735,7 @@ whiptail_install_type() { # What kind of install are we doing? install_type=$(whiptail --title "$whiptail_title" --radiolist \ - "Choose install type:" 12 65 5 \ + "Choose install type. See https://docs.securityonion.net/architecture for details." 12 65 5 \ "EVAL" "Evaluation mode (not for production) " ON \ "STANDALONE" "Standalone production install " OFF \ "DISTRIBUTED" "Distributed install submenu " OFF \ @@ -747,6 +749,11 @@ whiptail_install_type() { if [[ $install_type == "DISTRIBUTED" ]]; then whiptail_install_type_dist + if [[ $dist_option == "NEWDEPLOYMENT" ]]; then + whiptail_install_type_dist_new + else + whiptail_install_type_dist_existing + fi elif [[ $install_type == "OTHER" ]]; then whiptail_install_type_other fi @@ -757,13 +764,55 @@ whiptail_install_type() { whiptail_install_type_dist() { [ -n "$TESTING" ] && return + + dist_option=$(whiptail --title "$whiptail_title" --menu "Do you want to start a new deployment or join this box to \nan existing deployment?" 11 75 2 \ + "New Deployment " "Create a new Security Onion deployment" \ + "Existing Deployment " "Join to an exisiting Security Onion deployment " \ + 3>&1 1>&2 2>&3 + ) + local exitstatus=$? + whiptail_check_exitstatus $exitstatus + + dist_option=$(echo "${dist_option^^}" | tr -d ' ') +} + +whiptail_install_type_dist_new() { + [ -n "$TESTING" ] && return + + local mngr_msg + read -r -d '' mngr_msg <<- EOM + Choose a distributed manager type to start a new grid. - install_type=$(whiptail --title "$whiptail_title" --radiolist \ - "Choose distributed node type:" 13 60 6 \ - "MANAGER" "Start a new grid " ON \ - "SENSOR" "Create a forward only sensor " OFF \ + See https://docs.securityonion.net/architecture for details. + + Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations. + EOM + + install_type=$(whiptail --title "$whiptail_title" --radiolist "$mngr_msg" 15 75 2 \ + "MANAGER" "New grid, requires separate search node(s) " ON \ + "MANAGERSEARCH" "New grid, separate search node(s) are optional " OFF \ + 3>&1 1>&2 2>&3 + ) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} + +whiptail_install_type_dist_existing() { + [ -n "$TESTING" ] && return + + local node_msg + read -r -d '' node_msg <<- EOM + Choose a distributed node type to join to an existing grid. + + See https://docs.securityonion.net/architecture for details. + + Note: Heavy nodes (HEAVYNODE) are NOT recommended for most users. + EOM + + install_type=$(whiptail --title "$whiptail_title" --radiolist "$node_msg" 17 57 4 \ + "SENSOR" "Create a forward only sensor " ON \ "SEARCHNODE" "Add a search node with parsing " OFF \ - "MANAGERSEARCH" "Manager + search node " OFF \ "FLEET" "Dedicated Fleet Osquery Node " OFF \ "HEAVYNODE" "Sensor + Search Node " OFF \ 3>&1 1>&2 2>&3 @@ -775,8 +824,6 @@ whiptail_install_type_dist() { local exitstatus=$? whiptail_check_exitstatus $exitstatus - - export install_type } whiptail_install_type_other() { @@ -810,7 +857,6 @@ whiptail_invalid_input() { # TODO: This should accept a list of arguments to spe [ -n "$TESTING" ] && return whiptail --title "$whiptail_title" --msgbox " Invalid input, please try again." 7 40 - } whiptail_invalid_proxy() { @@ -857,10 +903,21 @@ whiptail_invalid_user_warning() { whiptail_invalid_hostname() { [ -n "$TESTING" ] && return + local is_manager_hostname + is_manager_hostname="$1" + local error_message - error_message=$(echo "Please choose a valid hostname. It cannot be localhost; and must contain only \ - the ASCII letters 'A-Z' and 'a-z' (case-sensitive), the digits '0' through '9', \ - and hyphen ('-')" | tr -d '\t') + read -r -d '' error_message <<- EOM + Please choose a valid hostname. It cannot be localhost. It must contain only the ASCII letters 'A-Z' and 'a-z' (case-sensitive), the digits '0' through '9', and hyphen ('-'). + EOM + + if [[ $is_manager_hostname = 0 ]]; then + local error_message + read -r -d '' error_message <<- EOM + Please enter a valid hostname. The manager hostname cannot be localhost or the chosen hostname for this machine. + EOM + + fi whiptail --title "$whiptail_title" \ --msgbox "$error_message" 10 75 @@ -905,6 +962,7 @@ whiptail_first_menu_iso() { option=$(echo "${option^^}" | tr -d ' ') } + whiptail_make_changes() { [ -n "$TESTING" ] && return @@ -1487,6 +1545,20 @@ whiptail_patch_schedule_select_hours() { } +whiptail_preflight_err() { + [ -n "$TESTING" ] && return 1 + + read -r -d '' message <<- EOM + The so-preflight script failed checking one or more URLs required by setup. Check $setup_log for more details. + + Would you like to exit setup? + EOM + + whiptail --title "$whiptail_title" \ + --yesno "$message" 11 75 \ + --yes-button "Continue" --no-button "Exit" --defaultno +} + whiptail_proxy_ask() { [ -n "$TESTING" ] && return