diff --git a/README.md b/README.md index e248f3453..fc302d2a8 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -## Security Onion 2.4 Beta 2 +## Security Onion 2.4 Beta 3 -Security Onion 2.4 Beta 2 is here! +Security Onion 2.4 Beta 3 is here! ## Screenshots diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers index 2dd92d21b..84a519d37 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers @@ -10,12 +10,12 @@ . /usr/sbin/so-common -FLEETHOST="https://{{ GLOBALS.manager_ip }}:8220" +#FLEETHOST="https://{{ GLOBALS.manager_ip }}:8220" for i in {1..30} do - ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints")) | .api_key') - #FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts' | jq -r '.items[].host_urls[]' | paste -sd ',') + ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key') + FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',') if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi done if [[ -z $FLEETHOST ]] || [[ -z $ENROLLMENTOKEN ]]; then printf "\nFleet Host URL or Enrollment Token empty - exiting..." && exit; fi diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index 6ad97a223..c81d69282 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -35,9 +35,16 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl printf "\n\n" {%- endif %} +# Add Manager IP & URL Base to Fleet Host URLs printf "\nAdd SO-Manager Fleet URL\n" +if [ "{{ GLOBALS.manager_ip }}" = "{{ GLOBALS.url_base }}" ]; then + JSON_STRING=$( jq -n '{"id":"grid-default","name":"grid-default","is_default":true,"host_urls":["https://{{ GLOBALS.url_base }}:8220"]}') +else + JSON_STRING=$( jq -n '{"id":"grid-default","name":"grid-default","is_default":true,"host_urls":["https://{{ GLOBALS.url_base }}:8220", "https://{{ GLOBALS.manager_ip }}:8220"]}') +fi + ## This array replaces whatever URLs are currently configured -curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/settings" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"fleet_server_hosts":["https://{{ GLOBALS.manager_ip }}:8220", "https://{{ GLOBALS.manager }}:8220"]}' +curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/fleet_server_hosts" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" printf "\n\n" @@ -74,7 +81,7 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl ### Finalization ### # Query for Enrollment Tokens for default policies -ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-default")) | .api_key') +ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key') GRIDNODESENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes")) | .api_key') # Store needed data in minion pillar diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index 6861affd7..14d637d50 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -6,8 +6,13 @@ global: managerip: description: The IP address of the grid manager. global: True + advanced: True + regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ + regexFailureMessage: You must enter a valid IP address or CIDR. mdengine: description: What engine to use for meta data generation. Options are ZEEK and SURICATA. + regex: ^(ZEEK|SURICATA)$ + regexFailureMessage: You must enter either ZEEK or SURICATA. global: True ids: description: Which IDS engine to use. Currently only Suricata is supported. diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index f154b5beb..4651b7268 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -2,56 +2,83 @@ suricata: enabled: False config: threading: - set-cpu-affinity: 'no' - detect-thread-ratio: 1.0 - cpu-affinity: - - management-cpu-set: - cpu: [] - - receive-cpu-set: - cpu: [] - - worker-cpu-set: - cpu: [] - mode: exclusive - threads: 1 - prio: - default: high + set-cpu-affinity: "no" + cpu-affinity: + management-cpu-set: + cpu: + - 1 + worker-cpu-set: + cpu: + - 2-3 + mode: exclusive + prio: + default: high af-packet: - interface: bond0 - cluster-id: 59 - cluster-type: cluster_flow - defrag: true - use-mmap: true - threads: 1 - tpacket-v3: true - ring-size: 5000 + interface: bond0 + cluster-id: 59 + cluster-type: cluster_flow + defrag: "yes" + use-mmap: "yes" + threads: 1 + tpacket-v3: "yes" + ring-size: 5000 vars: address-groups: - HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]" - EXTERNAL_NET: "any" - HTTP_SERVERS: "$HOME_NET" - SMTP_SERVERS: "$HOME_NET" - SQL_SERVERS: "$HOME_NET" - DNS_SERVERS: "$HOME_NET" - TELNET_SERVERS: "$HOME_NET" - AIM_SERVERS: "$EXTERNAL_NET" - DC_SERVERS: "$HOME_NET" - DNP3_SERVER: "$HOME_NET" - DNP3_CLIENT: "$HOME_NET" - MODBUS_CLIENT: "$HOME_NET" - MODBUS_SERVER: "$HOME_NET" - ENIP_CLIENT: "$HOME_NET" - ENIP_SERVER: "$HOME_NET" + HOME_NET: + - 192.168.0.0/16 + - 10.0.0.0/8 + - 172.16.0.0/12 + EXTERNAL_NET: + - any + HTTP_SERVERS: + - $HOME_NET + SMTP_SERVERS: + - $HOME_NET + SQL_SERVERS: + - $HOME_NET + DNS_SERVERS: + - $HOME_NET + TELNET_SERVERS: + - $HOME_NET + AIM_SERVERS: + - $EXTERNAL_NET + DC_SERVERS: + - $HOME_NET + DNP3_SERVER: + - $HOME_NET + DNP3_CLIENT: + - $HOME_NET + MODBUS_CLIENT: + - $HOME_NET + MODBUS_SERVER: + - $HOME_NET + ENIP_CLIENT: + - $HOME_NET + ENIP_SERVER: + - $HOME_NET port-groups: - HTTP_PORTS: "80" - SHELLCODE_PORTS: "!80" - ORACLE_PORTS: "1521" - SSH_PORTS: "22" - DNP3_PORTS: "20000" - MODBUS_PORTS: "502" - FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]" - FTP_PORTS: "21" - VXLAN_PORTS: "4789" - TEREDO_PORTS: "3544" + HTTP_PORTS: + - 80 + SHELLCODE_PORTS: + - "!80" + ORACLE_PORTS: + - 1521 + SSH_PORTS: + - 22 + DNP3_PORTS: + - 20000 + MODBUS_PORTS: + - 502 + FILE_DATA_PORTS: + - $HTTP_PORTS + - 110 + - 143 + FTP_PORTS: + - 21 + VXLAN_PORTS: + - 4789 + TEREDO_PORTS: + - 3544 default-log-dir: /var/log/suricata/ stats: enabled: "yes" @@ -69,24 +96,24 @@ suricata: pcap-file: false community-id: true community-id-seed: 0 - xff: - enabled: "no" - mode: extra-data - deployment: reverse - header: X-Forwarded-For types: - - alert: - payload: "no" - payload-buffer-size: 4kb - payload-printable: "yes" - packet: "yes" - metadata: - app-layer: false - flow: false - rule: - metadata: true - raw: true - tagged-packets: "no" + alert: + payload: "no" + payload-buffer-size: 4kb + payload-printable: "yes" + packet: "yes" + metadata: + app-layer: false + flow: false + rule: + metadata: true + raw: true + tagged-packets: "no" + xff: + enabled: "no" + mode: extra-data + deployment: reverse + header: X-Forwarded-For unified2-alert: enabled: "no" http-log: diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index b238405c8..5576117cc 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -1,4 +1,4 @@ -{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'vars/globals.map.jinja' import GLOBALS %} {% import_yaml 'suricata/defaults.yaml' as SURICATADEFAULTS %} {% set SURICATAMERGED = salt['pillar.get']('suricata', SURICATADEFAULTS.suricata, merge=True) %} {% import_yaml 'suricata/suricata_mdengine.yaml' as suricata_mdengine %} @@ -23,6 +23,45 @@ {% do SURICATAMERGED.config.pop('af-packet') %} {% do SURICATAMERGED.config.update({'af-packet': afpacket}) %} +{# eve-log.types is a list but we convert to dict in defaults to work with ui #} +{# below they are converted back to lists #} +{% load_yaml as evelogtypes %} +{% for le, ld in SURICATAMERGED.config.outputs['eve-log'].types.items() %} + - {{ le }}: {{ ld }} +{% endfor %} +{% endload %} +{% do SURICATAMERGED.config.outputs['eve-log'].pop('types') %} +{% do SURICATAMERGED.config.outputs['eve-log'].update({'types': evelogtypes}) %} + +{# threading.cpu-affinity is a list but we convert to dict in defaults to work with ui #} +{# below they are converted back to lists #} +{% load_yaml as cpuaffinity %} +{% for le, ld in SURICATAMERGED.config.threading['cpu-affinity'].items() %} + - {{ le }}: {{ ld }} +{% endfor %} +{% endload %} +{% do SURICATAMERGED.config.threading.pop('cpu-affinity') %} +{% do SURICATAMERGED.config.threading.update({'cpu-affinity': cpuaffinity}) %} + +{# Find the index of eve-log and file-store in suricata_mdengine.suricata.config.outputs #} +{# update outputs eve-log.types and filestore with config for Suricata metadata engine #} +{% if GLOBALS.md_engine == 'SURICATA' %} +{% for li in suricata_mdengine.suricata.config.outputs %} +{% if 'eve-log' in li.keys() %} +{% do surimeta_evelog_index.append(loop.index0) %} +{% endif %} +{% if 'file-store' in li.keys() %} +{% do surimeta_filestore_index.append(loop.index0) %} +{% endif %} +{% endfor %} +{% set surimeta_evelog_index = surimeta_evelog_index[0] %} +{% set surimeta_filestore_index = surimeta_filestore_index[0] %} +{% do SURICATAMERGED.config.outputs['eve-log'].types.extend(suricata_mdengine.suricata.config.outputs[surimeta_evelog_index]['eve-log'].types) %} +{% do SURICATAMERGED.config.outputs['file-store'].update({'enabled':suricata_mdengine.suricata.config.outputs[surimeta_filestore_index]['file-store']['enabled']}) %} +{% endif %} + +{# outputs is a list but we convert to dict in defaults to work with ui #} +{# below they are converted back to lists #} {% load_yaml as outputs %} {% for le, ld in SURICATAMERGED.config.outputs.items() %} - {{ le }}: {{ ld }} @@ -31,31 +70,22 @@ {% do SURICATAMERGED.config.pop('outputs') %} {% do SURICATAMERGED.config.update({'outputs': outputs}) %} -{# Find the index of eve-log so it can be updated later #} -{% for li in SURICATAMERGED.config.outputs %} - {% if 'eve-log' in li.keys() %} - {% do default_evelog_index.append(loop.index0) %} - {% endif %} - {% if 'file-store' in li.keys() %} - {% do default_filestore_index.append(loop.index0) %} - {% endif %} +{# change address-groups vars from list to comma seperated string #} +{% for k, v in SURICATAMERGED.config.vars['address-groups'].items() %} +{# if address-group value is a list #} +{% if v is iterable and (v is not string and v is not mapping and v | length > 1) %} +{% do SURICATAMERGED.config.vars['address-groups'].update({k: '[' ~ v | join(',') ~ ']'}) %} +{% else %} +{% do SURICATAMERGED.config.vars['address-groups'].update({k: v[0]}) %} +{% endif %} {% endfor %} -{% set default_evelog_index = default_evelog_index[0] %} -{% set default_filestore_index = default_filestore_index[0] %} -{# Find the index of eve-log so it can be grabbed later #} -{% for li in suricata_mdengine.suricata.config.outputs %} - {% if 'eve-log' in li.keys() %} - {% do surimeta_evelog_index.append(loop.index0) %} - {% endif %} - {% if 'file-store' in li.keys() %} - {% do surimeta_filestore_index.append(loop.index0) %} - {% endif %} +{# change port-groups vars from list to comma seperated string #} +{% for k, v in SURICATAMERGED.config.vars['port-groups'].items() %} +{# if address-group value is a list #} +{% if v is iterable and (v is not string and v is not mapping and v | length > 1) %} +{% do SURICATAMERGED.config.vars['port-groups'].update({k: '[' ~ v | join(',') ~ ']'}) %} +{% else %} +{% do SURICATAMERGED.config.vars['port-groups'].update({k: v[0]}) %} +{% endif %} {% endfor %} -{% set surimeta_evelog_index = surimeta_evelog_index[0] %} -{% set surimeta_filestore_index = surimeta_filestore_index[0] %} - -{% if GLOBALS.md_engine == 'SURICATA' %} - {% do SURICATAMERGED.config.outputs[default_evelog_index]['eve-log'].types.extend(suricata_mdengine.suricata.config.outputs[surimeta_evelog_index]['eve-log'].types) %} - {% do SURICATAMERGED.config.outputs[default_filestore_index]['file-store'].update({'enabled':suricata_mdengine.suricata.config.outputs[surimeta_filestore_index]['file-store']['enabled']}) %} -{% endif %} diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index f1971f17f..f13e89618 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -12,10 +12,54 @@ suricata: title: SIDS helpLink: suricata.html config: + af-packet: + interface: + description: The network interface that Suricata will monitor. + helpLink: suricata.html + cluster-id: + advanced: True + cluster-type: + advanced: True + regex: ^(cluster_flow|cluster_qm)$ + defrag: + advanced: True + regex: ^(yes|no)$ + use-mmap: + advanced: True + readonly: True + threads: + description: The amount of worker threads. + helpLink: suricata.html + forcedType: int + tpacket-v3: + advanced: True + readonly: True + ring-size: + description: Buffer size for packets per thread. + forcedType: int + helpLink: suricata.html + threading: + set-cpu-affinity: + description: Bind(yes) or unbind(no) management and worker threads to a core or range of cores. + regex: ^(yes|no)$ + helpLink: suricata.html + cpu-affinity: + management-cpu-set: + cpu: + description: Bind management threads to a core or range of cores. This can be a sigle core, list of cores, or list of range of cores. set-cpu-affinity must be set to 'yes' for this to be used. + forcedType: "[]string" + helpLink: suricata.html + worker-cpu-set: + cpu: + description: Bind worker threads to a core or range of cores. This can be a sigle core, list of cores, or list of range of cores. set-cpu-affinity must be set to 'yes' for this to be used. + forcedType: "[]string" + helpLink: suricata.html vars: address-groups: HOME_NET: description: List of hosts or networks. + regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ + regexFailureMessage: You must enter a valid IP address or CIDR. helpLink: suricata.html EXTERNAL_NET: description: List of hosts or networks. @@ -92,19 +136,21 @@ suricata: helpLink: suricata.html outputs: eve-log: - xff: - enabled: - description: Enable X-Forward-For support. - helpLink: suricata.html - mode: - description: Operation mode. This should always be extra-data if you use PCAP. - helpLink: suricata.html - deployment: - description: forward would use the first IP address and reverse would use the last. - helpLink: suricata.html - header: - description: Header name where the actual IP address will be reported. - helpLink: suricata.html + types: + alert: + xff: + enabled: + description: Enable X-Forward-For support. + helpLink: suricata.html + mode: + description: Operation mode. This should always be extra-data if you use PCAP. + helpLink: suricata.html + deployment: + description: forward would use the first IP address and reverse would use the last. + helpLink: suricata.html + header: + description: Header name where the actual IP address will be reported. + helpLink: suricata.html asn1-max-frames: description: Maximum nuber of asn1 frames to decode. helpLink: suricata.html diff --git a/setup/so-functions b/setup/so-functions index 3e487abfe..37cb20c19 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -28,6 +28,12 @@ title() { echo -e "\n-----------------------------\n $1\n-----------------------------\n" >> "$setup_log" 2>&1 } +fail_setup() { + error "Setup encounted an unrecoverable failure, exiting" + touch /root/failure + exit 1 +} + logCmd() { cmd=$1 info "Executing command: $cmd" @@ -796,7 +802,7 @@ compare_main_nic_ip() { EOM [[ -n $TESTING ]] || whiptail --title "$whiptail_title" --msgbox "$message" 11 75 - kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1 + kill -SIGINT "$(ps --pid $$ -oppid=)"; fail_setup fi else # Setup uses MAINIP, but since we ignore the equality condition when using a VPN @@ -921,9 +927,10 @@ create_repo() { detect_cloud() { info "Testing if setup is running on a cloud instance..." - if dmidecode -s bios-version | grep -q amazon || \ - dmidecode -s bios-vendor | grep -q Amazon || \ - dmidecode -s bios-vendor | grep -q Google || \ + if [ -f /etc/SOCLOUD ] || \ + dmidecode -s bios-version 2>&1 | grep -q amazon || \ + dmidecode -s bios-vendor 2>&1 | grep -q Amazon || \ + dmidecode -s bios-vendor 2>&1 | grep -q Google || \ [ -f /var/log/waagent.log ]; then info "Detected a cloud installation..." @@ -943,7 +950,7 @@ detect_os() { pkgman="dnf" else info "We do not support the operating system you are trying to use." - exit 1 + fail_setup fi elif [ -f /etc/os-release ]; then @@ -953,12 +960,12 @@ detect_os() { is_ubuntu=true else info "We do not support your current version of Ubuntu." - exit 1 + fail_setup fi else info "We were unable to determine if you are using a supported OS." - exit 1 + fail_setup fi info "Found OS: $OS $OSVER" @@ -971,8 +978,20 @@ download_elastic_agent_artifacts() { logCmd "tar -xf /nsm/elastic-fleet/artifacts/beats/elastic-agent_SO-$SOVERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/" else logCmd "mkdir -p /nsm/elastic-fleet/artifacts/beats/elastic-agent/" - logCmd "curl --retry 5 --retry-delay 60 https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.tar.gz --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz" - logCmd "tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/" + logCmd "curl --retry 5 --retry-delay 60 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.tar.gz --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz" + logCmd "curl --retry 5 --retry-delay 60 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.md5 --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.md5" + + SOURCEHASH=$(md5sum /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz | awk '{ print $1 }') + HASH=$(cat /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.md5) + + if [[ "$HASH" == "$SOURCEHASH" ]]; then + info "Elastic Agent source hash is good." + else + info "Unable to download the Elastic Agent source files." + fail_setup + fi + + logCmd "tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/" fi } @@ -1000,18 +1019,18 @@ installer_prereq_packages() { if [ "$OS" == ubuntu ]; then # Print message to stdout so the user knows setup is doing something info "Running apt-get update" - retry 150 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1 + retry 150 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || fail_setup # Install network manager so we can do interface stuff if ! command -v nmcli > /dev/null 2>&1; then info "Installing network-manager" - retry 150 10 "apt-get -y install network-manager" >> "$setup_log" 2>&1 || exit 1 + retry 150 10 "apt-get -y install network-manager" >> "$setup_log" 2>&1 || fail_setup { systemctl enable NetworkManager systemctl start NetworkManager } >> "$setup_log" 2<&1 fi if ! command -v curl > /dev/null 2>&1; then - retry 150 10 "apt-get -y install curl" >> "$setup_log" 2>&1 || exit 1 + retry 150 10 "apt-get -y install curl" >> "$setup_log" 2>&1 || fail_setup fi fi } @@ -1728,7 +1747,7 @@ proxy_validate() { error "Received error: $proxy_test_err" if [[ -n $TESTING ]]; then error "Exiting setup" - kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1 + kill -SIGINT "$(ps --pid $$ -oppid=)"; fail_setup fi fi return $ret @@ -1799,7 +1818,7 @@ reinstall_init() { # Stop the systemctl process trying to kill the service, show user a message, then exit setup kill -9 $pid - exit 1 + fail_setup fi sleep 5 @@ -2002,7 +2021,7 @@ saltify() { SALTVERSION=$(egrep 'version: [0-9]{4}' ../salt/salt/master.defaults.yaml | sed 's/^.*version: //') if [[ $is_ubuntu ]]; then - DEBIAN_FRONTEND=noninteractive retry 150 20 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || exit 1 + DEBIAN_FRONTEND=noninteractive retry 150 20 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || fail_setup update-alternatives --install /usr/bin/python python /usr/bin/python3.8 10 local pkg_arr=( 'apache2-utils' @@ -2014,7 +2033,7 @@ saltify() { 'netcat' 'jq' ) - retry 150 20 "apt-get -y install ${pkg_arr[*]}" || exit 1 + retry 150 20 "apt-get -y install ${pkg_arr[*]}" || fail_setup logCmd "mkdir -vp /etc/apt/keyrings" #logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.securityonion.net/file/securityonion-repo/ubuntu/20.04/amd64/salt/SALTSTACK-GPG-KEY.pub" @@ -2035,9 +2054,9 @@ saltify() { # Ain't nothing but a GPG - retry 150 20 "apt-get update" "" "Err:" || exit 1 - retry 150 20 "apt-get -y install salt-common-$SALTVERSION salt-minion-$SALTVERSION" || exit 1 - retry 150 20 "apt-mark hold salt-minion salt-common" || exit 1 + retry 150 20 "apt-get update" "" "Err:" || fail_setup + retry 150 20 "apt-get -y install salt-common-$SALTVERSION salt-minion-$SALTVERSION" || fail_setup + retry 150 20 "apt-mark hold salt-minion salt-common" || fail_setup #retry 150 20 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" || exit 1 fi @@ -2104,7 +2123,7 @@ set_main_ip() { info "MAINIP=$MAINIP" info "MNIC_IP=$MNIC_IP" whiptail_error_message "The management IP could not be determined. Please check the log at /root/sosetup.log and verify the network configuration. Select OK to exit." - exit 1 + fail_setup fi sleep 1 done @@ -2296,8 +2315,8 @@ set_initial_firewall_access() { so-firewall includehost analyst $ALLOW_CIDR --apply fi if [[ ! -z "$MINION_CIDR" ]]; then - so-firewall includehost sensors $MINION_CIDR - so-firewall includehost searchnodes $MINION_CIDR --apply + so-firewall includehost sensor $MINION_CIDR + so-firewall includehost searchnode $MINION_CIDR --apply fi } @@ -2354,13 +2373,13 @@ ubuntu_check() { if [[ $OS == "ubuntu" ]]; then if [[ $waitforstate ]]; then whiptail_ubuntu_notsupported - exit 1 + fail_setup else if [[ $UBUNTUINSTALL == "needtoupgrade" ]]; then whiptail_ubuntu_warning else whiptail_ubuntu_notsupported - exit 1 + fail_setup fi fi fi @@ -2379,9 +2398,9 @@ update_packages() { logCmd "dnf -y update --allowerasing --exclude=salt*,wazuh*,docker*,containerd*" else info "Running apt-get update" - retry 150 10 "apt-get -y update" "" "Err:" >> "$setup_log" 2>&1 || exit 1 + retry 150 10 "apt-get -y update" "" "Err:" >> "$setup_log" 2>&1 || fail_setup info "Running apt-get upgrade" - retry 150 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1 + retry 150 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || fail_setup fi } @@ -2427,7 +2446,7 @@ wait_for_file() { } wait_for_salt_minion() { - retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || exit 1 + retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup } verify_setup() { diff --git a/setup/so-setup b/setup/so-setup index 36487b6bd..c018d63d1 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -10,13 +10,13 @@ uid="$(id -u)" if [ "$uid" -ne 0 ]; then echo "This script must be run using sudo!" - exit 1 + fail_setup fi # Save the original argument array since we modify it original_args=("$@") -cd "$(dirname "$0")" || exit 255 +cd "$(dirname "$0")" || fail_setup echo "Getting started..." @@ -82,7 +82,7 @@ if [[ "$setup_type" == 'iso' ]]; then is_iso=true else echo "Only use 'so-setup iso' for an ISO install on Security Onion ISO images. Please run 'so-setup network' instead." - exit 1 + fail_setup fi fi @@ -161,7 +161,7 @@ catch() { info "Fatal error occurred at $1 in so-setup, failing setup." grep --color=never "ERROR" "$setup_log" > "$error_log" whiptail_setup_failed - exit 1 + fail_setup } # Add the progress function for manager node type installs @@ -236,7 +236,7 @@ case "$setup_type" in ;; *) error "Invalid install type, must be 'iso', 'network' or 'analyst'." - exit 1 + fail_setup ;; esac