diff --git a/pillar/top.sls b/pillar/top.sls index 627fed80b..a795e03c1 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -3,7 +3,7 @@ base: - patch.needs_restarting - logrotate - '*_eval or *_helix or *_heavynode or *_sensor or *_standalone or *_import': + '*_eval or *_helixsensor or *_heavynode or *_sensor or *_standalone or *_import': - match: compound - zeek @@ -62,7 +62,7 @@ base: - global - minions.{{ grains.id }} - '*_helix': + '*_helixsensor': - fireeye - zeeklogs - logstash diff --git a/salt/common/files/daemon.json b/salt/common/files/daemon.json new file mode 100644 index 000000000..bc047bc80 --- /dev/null +++ b/salt/common/files/daemon.json @@ -0,0 +1,12 @@ +{%- set DOCKERRANGE = salt['pillar.get']('docker:range') %} +{%- set DOCKERBIND = salt['pillar.get']('docker:bip') %} +{ + "registry-mirrors": [ "https://:5000" ], + "bip": "{{ DOCKERBIND }}", + "default-address-pools": [ + { + "base" : "{{ DOCKERRANGE }}", + "size" : 24 + } + ] +} \ No newline at end of file diff --git a/salt/common/init.sls b/salt/common/init.sls index 1192923b7..337103fd9 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -244,10 +244,19 @@ commonlogrotateconf: - dayweek: '*' {% endif %} +# Manager daemon.json +docker_daemon: + file.managed: + - source: salt://common/files/daemon.json + - name: /etc/docker/daemon.json + - template: jinja + # Make sure Docker is always running docker: service.running: - enable: True + - watch: + - file: docker_daemon {% else %} diff --git a/salt/common/tools/sbin/so-fleet-setup b/salt/common/tools/sbin/so-fleet-setup index 3e9fb1d74..8de83b118 100755 --- a/salt/common/tools/sbin/so-fleet-setup +++ b/salt/common/tools/sbin/so-fleet-setup @@ -15,8 +15,8 @@ if [ ! "$(docker ps -q -f name=so-fleet)" ]; then salt-call state.apply redis queue=True >> /root/fleet-setup.log fi -docker exec so-fleet fleetctl config set --address https://localhost:8080 --tls-skip-verify --url-prefix /fleet -docker exec -it so-fleet bash -c 'while [[ "$(curl -s -o /dev/null --insecure -w ''%{http_code}'' https://localhost:8080/fleet)" != "301" ]]; do sleep 5; done' +docker exec so-fleet fleetctl config set --address https://127.0.0.1:8080 --tls-skip-verify --url-prefix /fleet +docker exec -it so-fleet bash -c 'while [[ "$(curl -s -o /dev/null --insecure -w ''%{http_code}'' https://127.0.0.1:8080/fleet)" != "301" ]]; do sleep 5; done' docker exec so-fleet fleetctl setup --email $1 --password $2 docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 3449158c0..767f9d21c 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -19,76 +19,80 @@ IMAGEREPO=securityonion container_list() { - MANAGERCHECK=$1 - if [ -z "$MANAGERCHECK" ]; then - MANAGERCHECK=so-unknown - if [ -f /etc/salt/grains ]; then - MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') - fi + MANAGERCHECK=$1 + + if [ -z "$MANAGERCHECK" ]; then + MANAGERCHECK=so-unknown + if [ -f /etc/salt/grains ]; then + MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') fi + fi - if [ $MANAGERCHECK == 'so-import' ]; then - TRUSTED_CONTAINERS=( \ - "so-elasticsearch" \ - "so-filebeat" \ - "so-idstools" \ - "so-kibana" \ - "so-kratos" \ - "so-nginx" \ - "so-pcaptools" \ - "so-soc" \ - "so-steno" \ - "so-suricata" \ - "so-zeek" ) - elif [ $MANAGERCHECK != 'so-helix' ]; then - TRUSTED_CONTAINERS=( \ - "so-acng" \ - "so-curator" \ - "so-domainstats" \ - "so-elastalert" \ - "so-elasticsearch" \ - "so-filebeat" \ - "so-fleet" \ - "so-fleet-launcher" \ - "so-freqserver" \ - "so-grafana" \ - "so-idstools" \ - "so-influxdb" \ - "so-kibana" \ - "so-kratos" \ - "so-logstash" \ - "so-minio" \ - "so-mysql" \ - "so-nginx" \ - "so-pcaptools" \ - "so-playbook" \ - "so-redis" \ - "so-soc" \ - "so-soctopus" \ - "so-steno" \ - "so-strelka-backend" \ - "so-strelka-filestream" \ - "so-strelka-frontend" \ - "so-strelka-manager" \ - "so-suricata" \ - "so-telegraf" \ - "so-thehive" \ - "so-thehive-cortex" \ - "so-thehive-es" \ - "so-wazuh" \ - "so-zeek" ) - else - TRUSTED_CONTAINERS=( \ - "so-filebeat" \ - "so-idstools" \ - "so-logstash" \ - "so-nginx" \ - "so-redis" \ - "so-steno" \ - "so-suricata" \ - "so-telegraf" \ - "so-zeek" ) - fi + if [ $MANAGERCHECK == 'so-import' ]; then + TRUSTED_CONTAINERS=( + "so-elasticsearch" + "so-filebeat" + "so-idstools" + "so-kibana" + "so-kratos" + "so-nginx" + "so-pcaptools" + "so-soc" + "so-steno" + "so-suricata" + "so-zeek" + ) + elif [ $MANAGERCHECK != 'so-helix' ]; then + TRUSTED_CONTAINERS=( + "so-acng" + "so-curator" + "so-domainstats" + "so-elastalert" + "so-elasticsearch" + "so-filebeat" + "so-fleet" + "so-fleet-launcher" + "so-freqserver" + "so-grafana" + "so-idstools" + "so-influxdb" + "so-kibana" + "so-kratos" + "so-logstash" + "so-minio" + "so-mysql" + "so-nginx" + "so-pcaptools" + "so-playbook" + "so-redis" + "so-soc" + "so-soctopus" + "so-steno" + "so-strelka-backend" + "so-strelka-filestream" + "so-strelka-frontend" + "so-strelka-manager" + "so-suricata" + "so-telegraf" + "so-thehive" + "so-thehive-cortex" + "so-thehive-es" + "so-wazuh" + "so-zeek" + ) + else + TRUSTED_CONTAINERS=( + "so-filebeat" + "so-idstools" + "so-logstash" + "so-nginx" + "so-redis" + "so-steno" + "so-suricata" + "so-telegraf" + "so-zeek" + ) + fi } update_docker_containers() { diff --git a/salt/common/tools/sbin/so-yara-update b/salt/common/tools/sbin/so-yara-update index a2a633957..ddddb87eb 100755 --- a/salt/common/tools/sbin/so-yara-update +++ b/salt/common/tools/sbin/so-yara-update @@ -165,6 +165,6 @@ else echo "No connectivity to Github...exiting..." exit 1 fi -{%- endif -%} +{% endif %} echo "Finished rule updates at $(date)..." diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index ca840de59..4d168c077 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -155,6 +155,13 @@ copy_new_files() { cd /tmp } +generate_and_clean_tarballs() { + local new_version + new_version=$(cat $UPDATE_DIR/VERSION) + tar -cxf "/opt/so/repo/$new_version.tar.gz" "$UPDATE_DIR" + find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \; +} + highstate() { # Run a highstate. salt-call state.highstate -l info queue=True @@ -197,6 +204,7 @@ pillar_changes() { [[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2 [[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3 [[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0 + [[ "$INSTALLEDVERSION" == 2.3.0 ]] || [[ "$INSTALLEDVERSION" == 2.3.1 ]] || [[ "$INSTALLEDVERSION" == 2.3.2 ]] || [[ "$INSTALLEDVERSION" == 2.3.10 ]] && 2.3.0_to_2.3.20 } rc1_to_rc2() { @@ -278,6 +286,44 @@ rc3_to_2.3.0() { echo "playbook_admin: $(get_random_value)" echo "playbook_automation: $(get_random_value)" } >> /opt/so/saltstack/local/pillar/secrets.sls + + INSTALLEDVERSION=2.3.0 +} + +2.3.0_to_2.3.20(){ + # Remove PCAP from global + sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls + sed '/sensor_checkin_interval_ms:/d' /opt/so/saltstack/local/pillar/global.sls + + # Add checking interval to glbal + echo "sensoroni:" >> /opt/so/saltstack/local/pillar/global.sls + echo " node_checkin_interval_ms: 10000" >> /opt/so/saltstack/local/pillar/global.sls + + # Update pillar fiels for new sensoroni functionality + for file in /opt/so/saltstack/local/pillar/minions/*; do + echo "sensoroni:" >> $file + echo " node_description:" >> $file + local SOMEADDRESS=$(cat $file | grep mainip | tail -n 1 | awk '{print $2'}) + echo " node_address: $SOMEADDRESS" >> $file + done + + # Remove old firewall config to reduce confusion + rm -f /opt/so/saltstack/default/pillar/firewall/ports.sls + + # Fix daemon.json by managing it + echo "docker:" >> /opt/so/saltstack/local/pillar/global.sls + DOCKERGREP=$(cat /etc/docker/daemon.json | grep base | awk {'print $3'} | cut -f1 -d"," | tr -d '"') + if [ -z "$DOCKERGREP" ]; then + echo " range: '172.17.0.0/24'" >> /opt/so/saltstack/local/pillar/global.sls + echo " bip: '172.17.0.1/24'" >> /opt/so/saltstack/local/pillar/global.sls + else + DOCKERSTUFF="${DOCKERGREP//\"}" + DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 + echo " range: '$DOCKERSTUFF/24'" >> /opt/so/saltstack/local/pillar/global.sls + echo " bip: '$DOCKERSTUFFBIP'" >> /opt/so/saltstack/local/pillar/global.sls + + fi + } space_check() { @@ -371,11 +417,18 @@ verify_latest_update_script() { # Check to see if the update scripts match. If not run the new one. CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}') GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}') - if [[ "$CURRENTSOUP" == "$GITSOUP" ]]; then + CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}') + GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}') + CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}') + GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}') + + if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then echo "This version of the soup script is up to date. Proceeding." else - echo "You are not running the latest soup version. Updating soup." + echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete" cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/ + cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ + cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/ salt-call state.apply common queue=True echo "" echo "soup has been updated. Please run soup again." @@ -415,19 +468,24 @@ if [ $is_airgap -eq 0 ]; then airgap_mounted else echo "Cloning Security Onion github repo into $UPDATE_DIR." + echo "Removing previous upgrade sources." + rm -rf $UPDATE_DIR clone_to_tmp fi -if [ -f /usr/sbin/so-image-common ]; then - . /usr/sbin/so-image-common -else -add_common -fi echo "" echo "Verifying we have the latest soup script." verify_latest_update_script echo "" +echo "Generating new repo archive" +generate_and_clean_tarballs +if [ -f /usr/sbin/so-image-common ]; then + . /usr/sbin/so-image-common +else +add_common +fi + echo "Let's see if we need to update Security Onion." upgrade_check space_check @@ -444,6 +502,15 @@ if [ $is_airgap -eq 0 ]; then else update_registry update_docker_containers "soup" + FEATURESCHECK=$(lookup_pillar features elastic) + if [[ "$FEATURESCHECK" == "True" ]]; then + TRUSTED_CONTAINERS=( \ + "so-elasticsearch" \ + "so-filebeat" \ + "so-kibana" \ + "so-logstash" ) + update_docker_containers "features" "-features" + fi fi echo "" echo "Stopping Salt Minion service." @@ -537,9 +604,9 @@ if [ "$UPGRADESALT" == "1" ]; then echo "" echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION." if [ $is_airgap -eq 0 ]; then - salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' cmd.run "yum clean all" + salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone' cmd.run "yum clean all" fi - salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion queue=True + salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion queue=True echo "" fi diff --git a/salt/docker_clean/init.sls b/salt/docker_clean/init.sls index 61499cdb5..9c5ce0d17 100644 --- a/salt/docker_clean/init.sls +++ b/salt/docker_clean/init.sls @@ -1,6 +1,6 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} -{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0','2.3.1']%} +{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0','2.3.1','2.3.2']%} {% for VERSION in OLDVERSIONS %} remove_images_{{ VERSION }}: diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index e23e4eef2..d332f737a 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -45,8 +45,10 @@ {% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %} {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} +{% if grains['role'] != 'so-helix' %} include: - elasticsearch +{% endif %} # Create the logstash group logstashgroup: diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 49e87f784..221c58c93 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -12,7 +12,7 @@ {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %} -{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import'] %} +{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import', 'helixsensor'] %} {% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %} {% set ca_server = grains.id %} {% else %} diff --git a/salt/suricata/suricata_config.map.jinja b/salt/suricata/suricata_config.map.jinja index d8669c231..8c11901d0 100644 --- a/salt/suricata/suricata_config.map.jinja +++ b/salt/suricata/suricata_config.map.jinja @@ -20,7 +20,7 @@ HOME_NET: "[{{salt['pillar.get']('global:hnmanager', '')}}]" '*_eval': { 'default-packet-size': salt['pillar.get']('sensor:mtu', 1500) + hardware_header, }, - '*_helix': { + '*_helixsensor': { 'default-packet-size': salt['pillar.get']('sensor:mtu', 9000) + hardware_header, }, '*': { diff --git a/salt/top.sls b/salt/top.sls index 9d41481fe..b6913895d 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -47,7 +47,7 @@ base: - sensoroni - salt.lasthighstate - '*_helix and G@saltversion:{{saltversion}}': + '*_helixsensor and G@saltversion:{{saltversion}}': - match: compound - salt.master - ca @@ -61,9 +61,7 @@ base: - suricata - zeek - redis - {%- if LOGSTASH %} - logstash - {%- endif %} {%- if FILEBEAT %} - filebeat {%- endif %} diff --git a/setup/so-functions b/setup/so-functions index 3ff66be30..2cf1b28cf 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -557,6 +557,19 @@ check_requirements() { fi } +compare_versions() { + manager_ver=$(ssh -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion) + + if [[ $manager_ver == "" ]]; then + rm /root/install_opt + echo "Could not determine version of Security Onion running on manager $MSRV. Please check your network settings and run setup again." | tee -a "$setup_log" + kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1 + fi + + [[ "$manager_ver" == "$SOVERSION" ]] + return +} + configure_network_sensor() { echo "Setting up sensor interface" >> "$setup_log" 2>&1 local nic_error=0 @@ -692,7 +705,7 @@ copy_ssh_key() { chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh echo "Removing old entry for manager from known_hosts if it exists" - sed -i "/${MSRV}/d" /root/.ssh/known_hosts + grep -q "$MSRV" /root/.ssh/known_hosts && sed -i "/${MSRV}/d" /root/.ssh/known_hosts echo "Copying the SSH key to the manager" #Copy the key over to the manager @@ -898,6 +911,7 @@ docker_registry() { echo "Setting up Docker Registry" >> "$setup_log" 2>&1 mkdir -p /etc/docker >> "$setup_log" 2>&1 + # This will get applied so docker can attempt to start if [ -z "$DOCKERNET" ]; then DOCKERNET=172.17.0.0 fi @@ -952,6 +966,28 @@ docker_seed_registry() { } +download_repo_tarball() { + mkdir -p /root/manager_setup/securityonion + { + local manager_ver + manager_ver=$(ssh -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion) + scp -i /root/.ssh/so.key soremote@"$MSRV":/opt/so/repo/"$manager_ver".tar.gz /root/manager_setup + } >> "$setup_log" 2>&1 + + # Fail if the file doesn't download + if ! [ -f /root/manager_setup/"$manager_ver".tar.gz ]; then + rm /root/install_opt + local message="Could not download $manager_ver.tar.gz from manager, please check your network settings and verify the file /opt/so/repo/$manager_ver.tar.gz exists on the manager." + echo "$message" | tee -a "$setup_log" + kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1 + fi + + { + tar -xzf /root/manager_setup/"$manager_ver".tar.gz -C /root/manager_setup/securityonion + rm -rf /root/manager_setup/"$manager_ver".tar.gz + } >> "$setup_log" 2>&1 +} + fireeye_pillar() { local fireeye_pillar_path=$local_salt_dir/pillar/fireeye @@ -960,8 +996,8 @@ fireeye_pillar() { printf '%s\n'\ "fireeye:"\ " helix:"\ - " api_key: '$HELIXAPIKEY'" "" > "$fireeye_pillar_path"/init.sls + " api_key: '$HELIXAPIKEY'" \ } @@ -1011,6 +1047,11 @@ generate_passwords(){ KRATOSKEY=$(get_random_value) } +generate_repo_tarball() { + mkdir /opt/so/repo + tar -czf /opt/so/repo/"$SOVERSION".tar.gz ../. +} + get_redirect() { whiptail_set_redirect if [ "$REDIRECTINFO" = "OTHER" ]; then @@ -1042,22 +1083,29 @@ host_pillar() { printf '%s\n'\ "host:"\ " mainint: '$MNIC'"\ + "sensoroni:"\ + " node_address: '$MAINIP'"\ + " node_description: '$NODE_DESCRIPTION'"\ "" > "$pillar_file" } install_cleanup() { - echo "Installer removing the following files:" - ls -lR "$temp_install_dir" + if [ -f "$temp_install_dir" ]; then + echo "Installer removing the following files:" + ls -lR "$temp_install_dir" - # Clean up after ourselves - rm -rf "$temp_install_dir" + # Clean up after ourselves + rm -rf "$temp_install_dir" + fi # All cleanup prior to this statement must be compatible with automated testing. Cleanup # that will disrupt automated tests should be placed beneath this statement. [ -n "$TESTING" ] && return # If Mysql is running stop it - /usr/sbin/so-mysql-stop + if docker ps --format "{{.Names}}" 2>&1 | grep -q "so-mysql"; then + /usr/sbin/so-mysql-stop + fi if [[ $setup_type == 'iso' ]]; then info "Removing so-setup permission entry from sudoers file" @@ -1205,8 +1253,6 @@ manager_global() { " imagerepo: '$IMAGEREPO'"\ " pipeline: 'redis'"\ "sensoroni:"\ - " node_address: '$MAINIP'"\ - " node_description: '$NODE_DESCRIPTION'"\ " node_checkin_interval_ms: $NODE_CHECKIN_INTERVAL_MS"\ "strelka:"\ " enabled: $STRELKA"\ @@ -1327,12 +1373,10 @@ network_setup() { disable_misc_network_features; echo "... Setting ONBOOT for management interface"; - if ! netplan > /dev/null 2>&1; then - nmcli con mod "$MNIC" connection.autoconnect "yes"; - fi + command -v netplan &> /dev/null || nmcli con mod "$MNIC" connection.autoconnect "yes" - echo "... Copying 99-so-checksum-offload-disable"; - cp ./install_scripts/99-so-checksum-offload-disable /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable ; + echo "... Copying 99-so-checksum-offload-disable"; + cp ./install_scripts/99-so-checksum-offload-disable /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable ; echo "... Modifying 99-so-checksum-offload-disable"; sed -i "s/\$MNIC/${MNIC}/g" /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable; @@ -1375,7 +1419,7 @@ elasticsearch_pillar() { parse_install_username() { # parse out the install username so things copy correctly - INSTALLUSERNAME=$(pwd | sed -E 's/\// /g' | awk '{ print $2 }') + INSTALLUSERNAME=${SUDO_USER:-${USER}} } patch_pillar() { @@ -1481,8 +1525,10 @@ reinstall_init() { if command -v docker &> /dev/null; then # Stop and remove all so-* containers so files can be changed with more safety - docker stop $(docker ps -a -q --filter "name=so-") - docker rm -f $(docker ps -a -q --filter "name=so-") + if [ $(docker ps -a -q --filter "name=so-") -gt 0 ]; then + docker stop $(docker ps -a -q --filter "name=so-") + docker rm -f $(docker ps -a -q --filter "name=so-") + fi fi local date_string @@ -1629,7 +1675,7 @@ saltify() { 'FLEET') if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi ;; - 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') # TODO: should this also be HELIXSENSOR? + 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR') # Add saltstack repo(s) wget -q --inet4-only -O - https://repo.saltstack.com"$py_ver_url_path"/ubuntu/"$ubuntu_version"/amd64/archive/3002.2/SALTSTACK-GPG-KEY.pub | apt-key add - >> "$setup_log" 2>&1 @@ -1667,7 +1713,7 @@ saltify() { apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1 echo "deb http://repo.saltstack.com$py_ver_url_path/ubuntu/$ubuntu_version/amd64/archive/3002.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" - ;; + ;; esac apt-get update >> "$setup_log" 2>&1 set_progress_str 8 'Installing salt-minion & python modules' @@ -1797,6 +1843,19 @@ set_network_dev_status_list() { set_main_ip() { MAINIP=$(ip route get 1 | awk '{print $7;exit}') + MNIC_IP=$(ip a s "$MNIC" | grep -oE 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2) +} + +compare_main_nic_ip() { + if [[ "$MAINIP" != "$MNIC_IP" ]]; then + read -r -d '' message <<- EOM + The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC). + + This is not a supported configuration, please remediate and rerun setup. + EOM + whiptail --title "Security Onion Setup" --msgbox "$message" 10 75 + kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1 + fi } # Add /usr/sbin to everyone's path diff --git a/setup/so-setup b/setup/so-setup index 73e66d058..3c59c59cb 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -22,6 +22,9 @@ if [ "$uid" -ne 0 ]; then exit 1 fi +# Save the original argument array since we modify it +readarray -t original_args <<< "$@" + cd "$(dirname "$0")" || exit 255 # Source the generic function libraries that are also used by the product after @@ -64,32 +67,31 @@ while [[ $# -gt 0 ]]; do esac done +if ! [ -f /root/install_opt ] && [ -d /root/manager_setup/securityonion ] && [[ $(pwd) != /root/manager_setup/securityonion/setup ]]; then + exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}" +fi + if [[ -f /root/accept_changes ]]; then is_reinstall=true # Move last setup log to backup mv "$setup_log" "$setup_log.bak" + [ -f "$error_log" ] && mv "$error_log" "$error_log.bak" fi -# Begin Installation pre-processing parse_install_username -title "Initializing Setup" -info "Installing as the $INSTALLUSERNAME user" +if ! [ -f /root/install_opt ]; then + # Begin Installation pre-processing + title "Initializing Setup" + info "Installing as the $INSTALLUSERNAME user" -analyze_system + analyze_system +fi automated=no function progress() { local title='Security Onion Install' - if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root ]]; then - if [[ -s /var/spool/mail/root ]]; then - echo '[ ERROR ] /var/spool/mail/root grew unexpectedly' >> $setup_log 2>&1 - fi - - export SO_ERROR=1 - title="Error found, please check $setup_log" - fi if [ $automated == no ]; then whiptail --title "$title" --gauge 'Please wait while installing...' 6 60 0 # append to text @@ -125,7 +127,7 @@ case "$setup_type" in echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1 ;; *) - echo "Invalid install type, must be 'iso' or 'network'" | tee $setup_log + echo "Invalid install type, must be 'iso' or 'network'" | tee -a $setup_log exit 1 ;; esac @@ -162,14 +164,18 @@ if [ "$automated" == no ]; then fi fi -if (whiptail_you_sure); then - true -else - echo "User cancelled setup." | tee $setup_log - whiptail_cancel -fi +if ! [ -f /root/install_opt ]; then + if (whiptail_you_sure); then + true + else + echo "User cancelled setup." | tee -a "$setup_log" + whiptail_cancel + fi -whiptail_install_type + whiptail_install_type +else + source /root/install_opt +fi if [ "$install_type" = 'EVAL' ]; then is_node=true @@ -182,7 +188,6 @@ elif [ "$install_type" = 'STANDALONE' ]; then is_distmanager=true is_node=true is_sensor=true - is_smooshed=true elif [ "$install_type" = 'MANAGERSEARCH' ]; then is_manager=true is_distmanager=true @@ -200,7 +205,6 @@ elif [ "$install_type" = 'HEAVYNODE' ]; then is_node=true is_minion=true is_sensor=true - is_smooshed=true elif [ "$install_type" = 'FLEET' ]; then is_minion=true is_fleet_standalone=true @@ -210,9 +214,7 @@ elif [ "$install_type" = 'HELIXSENSOR' ]; then elif [ "$install_type" = 'IMPORT' ]; then is_import=true elif [ "$install_type" = 'ANALYST' ]; then - cd .. || exit 255 - ./so-analyst-install - exit 0 + is_analyst=true fi # Say yes to the dress if its an ISO install @@ -221,56 +223,96 @@ if [[ "$setup_type" == 'iso' ]]; then fi # Check if this is an airgap install - -if [[ $is_manager ]]; then - if [[ $is_iso ]]; then - whiptail_airgap - if [[ "$INTERWEBS" == 'AIRGAP' ]]; then - is_airgap=true - fi - fi +if [[ ( $is_manager || $is_import ) && $is_iso ]]; then + whiptail_airgap + if [[ "$INTERWEBS" == 'AIRGAP' ]]; then + is_airgap=true + fi fi -if [[ $is_manager && $is_sensor ]]; then - check_requirements "standalone" -elif [[ $is_fleet_standalone ]]; then - check_requirements "dist" "fleet" -elif [[ $is_sensor && ! $is_eval ]]; then - check_requirements "dist" "sensor" -elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then - check_requirements "dist" -elif [[ $is_import ]]; then - check_requirements "import" +if ! [ -f /root/install_opt ]; then + if [[ $is_manager && $is_sensor ]]; then + check_requirements "standalone" + elif [[ $is_fleet_standalone ]]; then + check_requirements "dist" "fleet" + elif [[ $is_sensor && ! $is_eval ]]; then + check_requirements "dist" "sensor" + elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then + check_requirements "dist" + elif [[ $is_import ]]; then + check_requirements "import" + fi + + case "$setup_type" in + 'iso') + whiptail_set_hostname + whiptail_management_nic + whiptail_dhcp_or_static + + if [ "$address_type" != 'DHCP' ]; then + whiptail_management_interface_ip + whiptail_management_interface_mask + whiptail_management_interface_gateway + whiptail_management_interface_dns + whiptail_management_interface_dns_search + fi + ;; + 'network') + whiptail_network_notice + whiptail_dhcp_warn + whiptail_set_hostname + whiptail_management_nic + ;; + esac + + if [[ $is_minion ]]; then + whiptail_management_server + fi + + if [[ $is_minion || $is_iso ]]; then + whiptail_management_interface_setup + fi + + # Init networking so rest of install works + disable_ipv6 + set_hostname + if [[ "$setup_type" == 'iso' ]]; then + set_management_interface + fi + + if [[ -n "$TURBO" ]]; then + use_turbo_proxy + fi + + if [[ $is_minion ]]; then + add_mngr_ip_to_hosts + fi + + if [[ $is_minion ]]; then + [ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1 + fi + + if [[ $is_minion ]] && ! (compare_versions); then + info "Installer version mismatch, downloading correct version from manager" + printf '%s\n' \ + "install_type=$install_type" \ + "MNIC=$MNIC" \ + "HOSTNAME=$HOSTNAME" \ + "MSRV=$MSRV"\ + "MSRVIP=$MSRVIP" > /root/install_opt + download_repo_tarball + exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}" + fi + + if [[ $is_analyst ]]; then + cd .. || exit 255 + exec bash so-analyst-install + fi + +else + rm -rf /root/install_opt >> "$setup_log" 2>&1 fi -if [[ ! $is_import ]]; then - whiptail_patch_schedule -fi - -case "$setup_type" in - 'iso') - whiptail_set_hostname - whiptail_management_nic - whiptail_dhcp_or_static - - if [ "$address_type" != 'DHCP' ]; then - whiptail_management_interface_ip - whiptail_management_interface_mask - whiptail_management_interface_gateway - whiptail_management_interface_dns - whiptail_management_interface_dns_search - fi - - #collect_adminuser_inputs - ;; - 'network') - whiptail_network_notice - whiptail_dhcp_warn - whiptail_set_hostname - whiptail_management_nic - ;; -esac - short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}') MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]') @@ -337,8 +379,11 @@ if [[ $is_helix || $is_sensor || $is_import ]]; then calculate_useable_cores fi +if [[ ! $is_import ]]; then + whiptail_patch_schedule +fi + whiptail_homenet_manager -whiptail_dockernet_check if [[ $is_helix || $is_manager || $is_node || $is_import ]]; then set_base_heapsizes @@ -348,7 +393,7 @@ if [[ $is_manager && ! $is_eval ]]; then whiptail_manager_adv if [ "$MANAGERADV" = 'ADVANCED' ]; then if [ "$install_type" = 'MANAGER' ] || [ "$install_type" = 'MANAGERSEARCH' ]; then - whiptail_manager_adv_escluster + whiptail_manager_adv_escluster fi fi whiptail_zeek_version @@ -361,10 +406,6 @@ if [[ $is_manager && ! $is_eval ]]; then whiptail_oinkcode fi - if [[ "$STRELKA" == 1 ]]; then - STRELKARULES=1 - fi - if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$ZEEKVERSION" != 'SURICATA' ]; then whiptail_manager_adv_service_zeeklogs fi @@ -373,6 +414,15 @@ fi if [[ $is_manager ]]; then whiptail_components_adv_warning whiptail_enable_components + + if [[ "$STRELKA" = 1 ]]; then + info "Enabling Strelka rules" + STRELKARULES=1 + else + info "Disabling Strelka rules: STRELKA='$STRELKA'" + fi + + whiptail_dockernet_check fi if [[ $is_manager || $is_import ]]; then @@ -387,10 +437,6 @@ if [[ $is_distmanager || ( $is_sensor || $is_node || $is_fleet_standalone ) && ! fi fi -if [[ $is_minion ]]; then - whiptail_management_server -fi - if [[ $is_distmanager ]]; then collect_soremote_inputs fi @@ -448,35 +494,15 @@ trap 'catch $LINENO' SIGUSR1 catch() { info "Fatal error occurred at $1 in so-setup, failing setup." + grep --color=never "ERROR" "$setup_log" > "$error_log" whiptail_setup_failed exit } -# Init networking so rest of install works -if [[ -n "$TURBO" ]]; then - use_turbo_proxy -fi - -if [[ "$setup_type" == 'iso' ]]; then - set_hostname >> $setup_log 2>&1 - set_management_interface -fi - -disable_ipv6 - -if [[ "$setup_type" != 'iso' ]]; then - set_hostname >> $setup_log 2>&1 -fi - -if [[ $is_minion ]]; then - add_mngr_ip_to_hosts -fi - # This block sets REDIRECTIT which is used by a function outside the below subshell -{ - set_main_ip; - set_redirect; -} >> $setup_log 2>&1 +set_main_ip >> $setup_log 2>&1 +compare_main_nic_ip +set_redirect >> $setup_log 2>&1 # Begin install { @@ -504,10 +530,13 @@ fi { generate_passwords; secrets_pillar; - add_socore_user_manager; } >> $setup_log 2>&1 fi + if [[ $is_manager || $is_import || $is_helix ]]; then + add_socore_user_manager >> $setup_log 2>&1 + fi + if [[ $is_manager && ! $is_eval ]]; then add_soremote_user_manager >> $setup_log 2>&1 fi @@ -516,7 +545,6 @@ fi if [[ $is_minion || $is_import ]]; then set_updates >> $setup_log 2>&1 - [ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1 fi if [[ $is_manager && $is_airgap ]]; then @@ -603,7 +631,7 @@ fi accept_salt_key_remote >> $setup_log 2>&1 fi - if [[ $is_manager || $is_import ]]; then + if [[ $is_manager || $is_import || $is_helix ]]; then set_progress_str 20 'Accepting Salt key' salt-key -ya "$MINION_ID" >> $setup_log 2>&1 fi @@ -659,8 +687,10 @@ fi set_progress_str 63 "$(print_salt_state_apply 'common')" salt-call state.apply -l info common >> $setup_log 2>&1 - set_progress_str 64 "$(print_salt_state_apply 'nginx')" - salt-call state.apply -l info nginx >> $setup_log 2>&1 + if [[ ! $is_helix ]]; then + set_progress_str 64 "$(print_salt_state_apply 'nginx')" + salt-call state.apply -l info nginx >> $setup_log 2>&1 + fi if [[ $is_manager || $is_node || $is_import ]]; then set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')" @@ -756,12 +786,14 @@ fi set_progress_str 81 "$(print_salt_state_apply 'strelka')" salt-call state.apply -l info strelka >> $setup_log 2>&1 fi - if [[ "$STRELKARULES" == 1 ]]; then - /usr/sbin/so-yara-update >> $setup_log 2>&1 + if [[ "$STRELKARULES" = 1 ]]; then + logCmd /usr/sbin/so-yara-update + else + info "Skipping running yara update: STRELKARULES='$STRELKARULES'" fi fi - if [[ $is_manager || $is_helix || $is_import ]]; then + if [[ $is_manager || $is_import ]]; then set_progress_str 82 "$(print_salt_state_apply 'utility')" salt-call state.apply -l info utility >> $setup_log 2>&1 fi @@ -795,21 +827,32 @@ success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}') if [[ $success != 0 ]]; then SO_ERROR=1; fi # Check entire setup log for errors or unexpected salt states and ensure cron jobs are not reporting errors to root's mailbox -if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then SO_ERROR=1; fi +if grep -q -E "ERROR|Result: False" $setup_log || [[ -s /var/spool/mail/root && "$setup_type" == "iso" ]]; then + SO_ERROR=1 + + grep --color=never "ERROR" "$setup_log" > "$error_log" +fi if [[ -n $SO_ERROR ]]; then echo "Errors detected during setup; skipping post-setup steps to allow for analysis of failures." >> $setup_log 2>&1 + SKIP_REBOOT=1 whiptail_setup_failed + else echo "Successfully completed setup! Continuing with post-installation steps" >> $setup_log 2>&1 { export percentage=95 # set to last percentage used in previous subshell if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then - set_progress_str 98 "Running so-allow -${ALLOW_ROLE} for ${ALLOW_CIDR}" + set_progress_str 97 "Running so-allow -${ALLOW_ROLE} for ${ALLOW_CIDR}" IP=$ALLOW_CIDR so-allow -$ALLOW_ROLE >> $setup_log 2>&1 fi + if [[ $is_manager ]]; then + set_progress_str 98 "Generating archive for setup directory" + generate_repo_tarball >> "$setup_log" 2>&1 + fi + if [[ $THEHIVE == 1 ]]; then set_progress_str 99 'Waiting for TheHive to start up' check_hive_init >> $setup_log 2>&1 @@ -820,6 +863,6 @@ else echo "Post-installation steps have completed." >> $setup_log 2>&1 fi -install_cleanup >> $setup_log 2>&1 +install_cleanup >> "$setup_log" 2>&1 if [[ -z $SKIP_REBOOT ]]; then shutdown -r now; else exit; fi diff --git a/setup/so-variables b/setup/so-variables index 09e0ebc46..1f154a5c0 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -23,6 +23,9 @@ export node_es_port setup_log="/root/sosetup.log" export setup_log +error_log="/root/errors.log" +export error_log + filesystem_root=$(df / | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }') export filesystem_root @@ -61,5 +64,5 @@ mkdir -p "$default_salt_dir" export local_salt_dir=/opt/so/saltstack/local mkdir -p "$local_salt_dir" -SCRIPTDIR=$(cd "$(dirname "$0")" && pwd) +SCRIPTDIR=$(pwd) export SCRIPTDIR diff --git a/setup/so-whiptail b/setup/so-whiptail index 791cceb76..af7f6da2f 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -32,7 +32,7 @@ whiptail_basic_suri() { [ -n "$TESTING" ] && return - if [[ $is_smooshed ]]; then + if [[ $is_node && $is_sensor && ! $is_eval ]]; then local PROCS=$(expr $lb_procs / 2) if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi else @@ -51,7 +51,7 @@ whiptail_basic_zeek() { [ -n "$TESTING" ] && return - if [[ $is_smooshed ]]; then + if [[ $is_node && $is_sensor && ! $is_eval ]]; then local PROCS=$(expr $lb_procs / 2) if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi else @@ -276,7 +276,7 @@ whiptail_requirements_error() { if [[ $(echo "$requirement_needed" | tr '[:upper:]' '[:lower:]') == 'nics' ]]; then whiptail --title "Security Onion Setup" \ - --msgbox "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Press OK to exit setup and reconfigure the machine." 10 75 + --msgbox "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Select OK to exit setup and reconfigure the machine." 10 75 # Same as whiptail_cancel, but changed the wording to exit instead of cancel. whiptail --title "Security Onion Setup" --msgbox "Exiting Setup. No changes have been made." 8 75 @@ -290,7 +290,7 @@ whiptail_requirements_error() { exit else whiptail --title "Security Onion Setup" \ - --yesno "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Press YES to continue anyway, or press NO to cancel." 10 75 + --yesno "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Select YES to continue anyway, or select NO to cancel." 10 75 local exitstatus=$? whiptail_check_exitstatus $exitstatus @@ -311,7 +311,7 @@ whiptail_storage_requirements() { Visit https://docs.securityonion.net/en/2.1/hardware.html for more information. - Press YES to continue anyway, or press NO to cancel. + Select YES to continue anyway, or select NO to cancel. EOM whiptail \ @@ -372,7 +372,7 @@ whiptail_dhcp_warn() { [ -n "$TESTING" ] && return if [[ $setup_type == "iso" ]]; then - local interaction_text="Press YES to keep DHCP or NO to go back." + local interaction_text="Select YES to keep DHCP or NO to go back." local window_type="yesno" else local interaction_text="Press ENTER to continue." @@ -426,7 +426,7 @@ whiptail_dockernet_net() { [ -n "$TESTING" ] && return DOCKERNET=$(whiptail --title "Security Onion Setup" --inputbox \ - "\nEnter a /24 network range for docker to use: \nThe same range MUST be used on ALL nodes \n(Default value is pre-populated.)" 10 75 172.17.0.0 3>&1 1>&2 2>&3) + "\nEnter a /24 size network range for docker to use WITHOUT the /24 notation: \nThis range will be used on ALL nodes \n(Default value is pre-populated.)" 10 75 172.17.0.0 3>&1 1>&2 2>&3) local exitstatus=$? whiptail_check_exitstatus $exitstatus @@ -743,6 +743,44 @@ whiptail_management_nic() { } +whiptail_management_interface_setup() { + [ -n "$TESTING" ] && return + + local minion_msg + local msg + local line_count + + if [[ $is_minion ]]; then + line_count=11 + minion_msg="copy the ssh key for soremote to the manager. This will bring you to the command line temporarily to accept the manager's ECDSA certificate and enter the password for soremote" + else + line_count=9 + minion_msg="" + fi + + if [[ $is_iso ]]; then + if [[ $minion_msg != "" ]]; then + msg="initialize networking and $minion_msg" + else + msg="initialize networking" + fi + else + msg=$minion_msg + fi + + read -r -d '' message <<- EOM + Setup will now $msg. + + Select OK to continue. + EOM + + whiptail --title "Security Onion Setup" --msgbox "$message" $line_count 75 + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} + + + whiptail_management_server() { [ -n "$TESTING" ] && return @@ -922,7 +960,7 @@ whiptail_network_notice() { [ -n "$TESTING" ] && return - whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Press YES to continue." 8 75 + whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Select YES to continue." 8 75 local exitstatus=$? whiptail_check_exitstatus $exitstatus @@ -1339,7 +1377,20 @@ whiptail_setup_failed() { [ -n "$TESTING" ] && return - whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press Ok to exit." 8 75 + local check_err_msg + local height + + [ -f "$error_log" ] && check_err_msg="A summary of errors can be found in $error_log.\n" + + if [[ -n $check_err_msg ]]; then height=11; else height=10; fi + + read -r -d '' message <<- EOM + Install had a problem. Please see $setup_log for details.\n + $check_err_msg + Press Ok to exit. + EOM + + whiptail --title "Security Onion Setup" --msgbox "$message" $height 75 } whiptail_shard_count() { @@ -1420,11 +1471,11 @@ whiptail_suricata_pins() { readarray -t filtered_core_list <<< "$(echo "${cpu_core_list[@]}" "${ZEEKPINS[@]}" | xargs -n1 | sort | uniq -u | awk '{print $1}')" local filtered_core_str=() - for item in "${filtered_core_list[@]}"; do - filtered_core_str+=("$item" "") - done + for item in "${filtered_core_list[@]}"; do + filtered_core_str+=("$item" "") + done - if [[ $is_smooshed ]]; then + if [[ $is_node && $is_sensor && ! $is_eval ]]; then local PROCS=$(expr $lb_procs / 2) if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi else