#!/bin/bash

# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.


. /usr/sbin/so-common
. /usr/sbin/so-image-common

UPDATE_DIR=/tmp/sogh/securityonion
DEFAULT_SALT_DIR=/opt/so/saltstack/default
INSTALLEDVERSION=$(cat /etc/soversion)
POSTVERSION=$INSTALLEDVERSION
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}')
BATCHSIZE=5
SOUP_LOG=/root/soup.log
WHATWOULDYOUSAYYAHDOHERE=soup
whiptail_title='Security Onion UPdater'
NOTIFYCUSTOMELASTICCONFIG=false
TOPFILE=/opt/so/saltstack/default/salt/top.sls
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
SALTUPGRADED=false
SALT_CLOUD_INSTALLED=false
SALT_CLOUD_CONFIGURED=false
# used to display messages to the user at the end of soup
declare -a FINAL_MESSAGE_QUEUE=()


check_err() {
  local exit_code=$1
  local err_msg="Unhandled error occured, please check $SOUP_LOG for details."

  [[ $ERR_HANDLED == true ]] && exit $exit_code

  if [[ $exit_code -ne 0 ]]; then

    set +e
    failed_soup_restore_items

    printf '%s' "Soup failed with error $exit_code: "
    case $exit_code in
      2)
        echo 'No such file or directory'
      ;;
      5)
        echo 'Interrupted system call'
      ;;
      12)
        echo 'Out of memory'
      ;;
      28)
        echo 'No space left on device'
        echo "Likely ran out of space on disk, please review hardware requirements for Security Onion: $DOC_BASE_URL/hardware"
      ;;
      30)
        echo 'Read-only file system'
      ;;
      35)
        echo 'Resource temporarily unavailable'
      ;;
      64)
        echo 'Machine is not on the network'
      ;;
      67)
        echo 'Link has been severed'
      ;;
      100)
        echo 'Network is down'
      ;;
      101)
        echo 'Network is unreachable'
      ;;
      102)
        echo 'Network reset'
      ;;
      110)
        echo 'Connection timed out'
      ;;
      111)
        echo 'Connection refused'
      ;;
      112)
        echo 'Host is down'
      ;;
      113)
        echo 'No route to host'
      ;;
      160)
        echo 'Incompatiable Elasticsearch upgrade'
      ;;
      161)
        echo 'Required intermediate Elasticsearch upgrade not complete'
      ;;
      170)
        echo "Intermediate upgrade completed successfully to $next_step_so_version, but next soup to Security Onion $originally_requested_so_version could not be started automatically."
        echo "Start soup again manually to continue the upgrade to Security Onion $originally_requested_so_version."
      ;;
      *)
        echo 'Unhandled error'
        echo "$err_msg"
      ;;
    esac
    if [[ $exit_code -ge 64 && $exit_code -le 113 ]]; then
      echo "$err_msg"
    fi

    exit $exit_code
  fi

}

add_common() {
  cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
  cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
  salt-call state.apply common queue=True
  echo "Run soup one more time"
  exit 0
}

airgap_mounted() {
  # Let's see if the ISO is already mounted.
  if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
    echo "The ISO is already mounted"
  else
    if [[ -z $ISOLOC ]]; then
      echo "This is airgap. Ask for a location."
      echo ""
      cat << EOF
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.

EOF
      read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
    fi
    if [[ -f $ISOLOC ]]; then
      # Mounting the ISO image
      mkdir -p /tmp/soagupdate
      mount -t iso9660 -o loop $ISOLOC /tmp/soagupdate
      # Make sure mounting was successful
      if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
        echo "Something went wrong trying to mount the ISO."
        echo "Ensure you verify the ISO that you downloaded."
        exit 0
      else
        echo "ISO has been mounted!"
      fi
    elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
      ln -s $ISOLOC /tmp/soagupdate
      echo "Found the update content"
    elif [[ -b $ISOLOC ]]; then
      mkdir -p /tmp/soagupdate
      mount $ISOLOC /tmp/soagupdate
      if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
        echo "Something went wrong trying to mount the device."
        echo "Ensure you verify the ISO that you downloaded."
        exit 0
      else
        echo "Device has been mounted! $(cat /tmp/soagupdate/SecurityOnion/VERSION)"
      fi
    else
      echo "Could not find Security Onion ISO content at ${ISOLOC}"
      echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
      exit 0
    fi
  fi
}

airgap_update_dockers() {
  if [[ $is_airgap -eq 0 ]] || [[ ! -z "$ISOLOC" ]]; then
    # Let's copy the tarball
    if [[ ! -f $AGDOCKER/registry.tar ]]; then
      echo "Unable to locate registry. Exiting"
      exit 0
    else
      echo "Stopping the registry docker"
      docker stop so-dockerregistry
      docker rm so-dockerregistry
      echo "Copying the new dockers over"
      tar xf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker
      echo "Add Registry back"
      docker load -i "$AGDOCKER/registry_image.tar"
      echo "Restart registry container"
      salt-call state.apply registry queue=True
    fi
  fi
}

backup_old_states_pillars() {

	tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_default_states_pillars.tar.gz /opt/so/saltstack/default/
	tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_local_states_pillars.tar.gz /opt/so/saltstack/local/

}

update_registry() {
  docker stop so-dockerregistry
  docker rm so-dockerregistry
  salt-call state.apply registry queue=True
}

check_airgap() {
  # See if this is an airgap install
  AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}' | tr '[:upper:]' '[:lower:]')
  if [[ "$AIRGAP" == "true" ]]; then
      is_airgap=0
      UPDATE_DIR=/tmp/soagupdate/SecurityOnion
      AGDOCKER=/tmp/soagupdate/docker
      AGREPO=/tmp/soagupdate/minimal/Packages
  else
      is_airgap=1
  fi
}

# {% raw %}

check_local_mods() {
  local salt_local=/opt/so/saltstack/local

  local_mod_arr=()

  while IFS= read -r -d '' local_file; do
    stripped_path=${local_file#"$salt_local"}
    default_file="${DEFAULT_SALT_DIR}${stripped_path}"
    if [[ -f $default_file ]]; then
      file_diff=$(diff "$default_file" "$local_file" )
      if [[ $(echo "$file_diff" | grep -Ec "^[<>]") -gt 0 ]]; then
        local_mod_arr+=( "$local_file" )
      fi
    fi
  done< <(find $salt_local -type f -print0)

  if [[ ${#local_mod_arr} -gt 0 ]]; then
    echo "Potentially breaking changes found in the following files (check ${DEFAULT_SALT_DIR} for original copy):"
    for file_str in "${local_mod_arr[@]}"; do
      echo "  $file_str"
    done
    echo ""
    echo "To reference this list later, check $SOUP_LOG"
    sleep 10
  fi
}

# {% endraw %}

check_pillar_items() {
  local pillar_output=$(salt-call pillar.items -lerror --out=json)

  cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
  if [[ "$cond" == "true" ]]; then
    printf "\nThere is an issue rendering the manager's pillars. Please correct the issues in the sls files mentioned below before running SOUP again.\n\n"
    jq '.local._errors[]' <<< "$pillar_output"
    exit 0
  else
    printf "\nThe manager's pillars can be rendered. We can proceed with SOUP.\n\n"
  fi
}

check_saltmaster_status() {
  set +e
  echo "Waiting on the Salt Master service to be ready."
  check_salt_master_status || fail  "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
  set -e
}

check_sudoers() {
  if grep -q "so-setup" /etc/sudoers; then
    echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
  fi
}

check_os_updates() {
  # Check to see if there are OS updates
  echo "Checking for OS updates."
  NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
  OSUPDATES=$(dnf -q list updates | grep -v docker | grep -v containerd | grep -v salt | grep -v Available | wc -l)
  if [[ "$OSUPDATES" -gt 0 ]]; then
      if [[ -z $UNATTENDED ]]; then
        echo "$NEEDUPDATES"
        echo ""
        read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
        if [[ "$confirm" == [cC] ]]; then
          echo "Continuing without updating packages"
        elif [[ "$confirm" == [uU] ]]; then
          echo "Applying Grid Updates. The following patch.os salt state may take a while depending on how many packages need to be updated."
          update_flag=true
        else
          echo "Exiting soup"
          exit 0
        fi
      else
        update_flag=true
      fi
  else
    echo "Looks like you have an updated OS"
  fi

  if [[ $update_flag == true ]]; then
    set +e
    run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
    set -e
  fi
}

clean_dockers() {
  # Place Holder for cleaning up old docker images
  echo "Trying to clean up old dockers."
  docker system prune -a -f --volumes

}

clone_to_tmp() {
  # Clean old files
  rm -rf /tmp/sogh
  # Make a temp location for the files
  mkdir -p /tmp/sogh
  cd /tmp/sogh
  SOUP_BRANCH="-b 2.4/main"
  if [ -n "$BRANCH" ]; then
    SOUP_BRANCH="-b $BRANCH"
  fi
  git clone $SOUP_BRANCH https://github.com/Security-Onion-Solutions/securityonion.git
  cd /tmp
  if [ ! -f $UPDATE_DIR/VERSION ]; then
    echo "Update was unable to pull from Github. Please check your Internet access."
    exit 0
  fi
}

# there is a function like this in so-minion, but we cannot source it since args required for so-minion
create_ca_pillar() {
  local ca_pillar_dir="/opt/so/saltstack/local/pillar/ca"
  local ca_pillar_file="${ca_pillar_dir}/init.sls"

  echo "Updating CA pillar configuration"
  mkdir -p "$ca_pillar_dir"
  echo "ca: {}" > "$ca_pillar_file"

  so-yaml.py add "$ca_pillar_file" ca.server "$MINIONID"
  chown -R socore:socore "$ca_pillar_dir"
}

disable_logstash_heavynodes() {
  c=0
  printf "\nChecking for heavynodes and disabling Logstash if they exist\n"
  for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
    if [[ "$file" =~ "_heavynode.sls" && ! "$file" =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
      if [ "$c" -eq 0 ]; then
        c=$((c + 1))
        FINAL_MESSAGE_QUEUE+=("Logstash has been disabled on all heavynodes. It can be re-enabled via Grid Configuration in SOC.")
      fi
      echo "Disabling Logstash for: $file"
      so-yaml.py replace "$file" logstash.enabled False
    fi
  done
}

disable_redis_heavynodes() {
  local c=0
  printf "\nChecking for heavynodes and disabling Redis if they exist\n"
  for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
    if [[ "$file" =~ "_heavynode.sls" && ! "$file" =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
      c=1
      echo "Disabling Redis for: $file"
      so-yaml.py replace "$file" redis.enabled False
    fi
  done

  if [[ "$c" != 0 ]]; then
    FINAL_MESSAGE_QUEUE+=("Redis has been disabled on all heavynodes.")
  fi
}

enable_highstate() {
    echo "Enabling highstate."
    salt-call state.enable highstate -l info --local
    echo ""
}

get_soup_script_hashes() {
  CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
  GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
  CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
  GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
  CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
  GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
  CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
  GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
}

highstate() {
  # Run a highstate.
  salt-call state.highstate -l info queue=True
}

masterlock() {
  echo "Locking Salt Master"
  mv -v $TOPFILE $BACKUPTOPFILE
  echo "base:" > $TOPFILE
  echo "  $MINIONID:" >> $TOPFILE
  echo "    - ca" >> $TOPFILE
  echo "    - elasticsearch" >> $TOPFILE
}

masterunlock() {
  if [ -f $BACKUPTOPFILE ]; then
    echo "Unlocking Salt Master"
    mv -v $BACKUPTOPFILE $TOPFILE
  else
    echo "Salt Master does not need unlocked."
  fi
}

phases_pillar_2_4_80() {
  echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists"
  set +e
  PHASES=$(so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases)
  case $? in
    0)
    so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases
    read -r -d '' msg <<- EOF
Found elasticsearch.index_settings.global_overrides.index_template.phases was set to:
${PHASES}

Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases
To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases
A backup of all pillar files was saved to /nsm/backup/
EOF
    FINAL_MESSAGE_QUEUE+=("$msg")
    ;;
    2) echo "Pillar elasticsearch.index_settings.global_overrides.index_template.phases does not exist. No action taken." ;;
    *) echo "so-yaml.py returned something other than 0 or 2 exit code" ;; # we shouldn't see this
  esac
  set -e
}

preupgrade_changes() {
    # This function is to add any new pillar items if needed.
    echo "Checking to see if changes are needed."

    [[ "$INSTALLEDVERSION" == 2.4.2 ]] && up_to_2.4.3
    [[ "$INSTALLEDVERSION" == 2.4.3 ]] && up_to_2.4.4
    [[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5
    [[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10
    [[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
    [[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
    [[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40
    [[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50
    [[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60
    [[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
    [[ "$INSTALLEDVERSION" == 2.4.70 ]] && up_to_2.4.80
    [[ "$INSTALLEDVERSION" == 2.4.80 ]] && up_to_2.4.90
    [[ "$INSTALLEDVERSION" == 2.4.90 ]] && up_to_2.4.100
    [[ "$INSTALLEDVERSION" == 2.4.100 ]] && up_to_2.4.110
    [[ "$INSTALLEDVERSION" == 2.4.110 ]] && up_to_2.4.111
    [[ "$INSTALLEDVERSION" == 2.4.111 ]] && up_to_2.4.120
    [[ "$INSTALLEDVERSION" == 2.4.120 ]] && up_to_2.4.130
    [[ "$INSTALLEDVERSION" == 2.4.130 ]] && up_to_2.4.140
    [[ "$INSTALLEDVERSION" == 2.4.140 ]] && up_to_2.4.141
    [[ "$INSTALLEDVERSION" == 2.4.141 ]] && up_to_2.4.150
    [[ "$INSTALLEDVERSION" == 2.4.150 ]] && up_to_2.4.160
    [[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170
    [[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
    [[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
    [[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
    [[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.201
    [[ "$INSTALLEDVERSION" == 2.4.201 ]] && up_to_2.4.210
    [[ "$INSTALLEDVERSION" == 2.4.210 ]] && up_to_2.4.211
    true
}

postupgrade_changes() {
    # This function is to add any new pillar items if needed.
    echo "Running post upgrade processes."

    [[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3
    [[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
    [[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
    [[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
    [[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
    [[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
    [[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
    [[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
    [[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
    [[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
    [[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
    [[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
    [[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
    [[ "$POSTVERSION" == 2.4.100 ]] && post_to_2.4.110
    [[ "$POSTVERSION" == 2.4.110 ]] && post_to_2.4.111
    [[ "$POSTVERSION" == 2.4.111 ]] && post_to_2.4.120
    [[ "$POSTVERSION" == 2.4.120 ]] && post_to_2.4.130
    [[ "$POSTVERSION" == 2.4.130 ]] && post_to_2.4.140
    [[ "$POSTVERSION" == 2.4.140 ]] && post_to_2.4.141
    [[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
    [[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
    [[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170
    [[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
    [[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
    [[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
    [[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.201
    [[ "$POSTVERSION" == 2.4.201 ]] && post_to_2.4.210
    [[ "$POSTVERSION" == 2.4.210 ]] && post_to_2.4.211
    true
}

post_to_2.4.3() {
  echo "Nothing to apply"
  POSTVERSION=2.4.3
}

post_to_2.4.4() {
  echo "Nothing to apply"
  POSTVERSION=2.4.4
}

post_to_2.4.5() {
  echo "Nothing to apply"
  POSTVERSION=2.4.5
}

post_to_2.4.10() {
  echo "Updating Elastic Fleet ES URLs...."
  /sbin/so-elastic-fleet-es-url-update --force
  POSTVERSION=2.4.10
}

post_to_2.4.20() {
  echo "Pruning unused docker volumes on all nodes - This process will run in the background."
  salt --async \* cmd.run "docker volume prune -f"
  POSTVERSION=2.4.20
}

post_to_2.4.30() {
  # there is an occasional error with this state: pki_public_ca_crt: TypeError: list indices must be integers or slices, not str
  set +e
  salt-call state.apply ca queue=True
  set -e
  stop_salt_minion
  mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
  mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
  systemctl_func "start" "salt-minion"
  salt-call state.apply nginx queue=True
  enable_highstate
  POSTVERSION=2.4.30
}

post_to_2.4.40() {
  echo "Nothing to apply"
  POSTVERSION=2.4.40
}

post_to_2.4.50() {
  echo "Nothing to apply"
  POSTVERSION=2.4.50
}

post_to_2.4.60() {
  echo "Nothing to apply"
  POSTVERSION=2.4.60
}

post_to_2.4.70() {
  printf "\nRemoving idh.services from any existing IDH node pillar files\n"
  for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
    if [[ $file =~ "_idh.sls" && ! $file =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
      echo "Removing idh.services from: $file"
      so-yaml.py remove "$file" idh.services
    fi
  done
  POSTVERSION=2.4.70
}

post_to_2.4.80() {
  echo -e "\nChecking if update to Elastic Fleet output policy is required\n"
  so-kafka-fleet-output-policy
  POSTVERSION=2.4.80
}

post_to_2.4.90() {
  disable_logstash_heavynodes
  POSTVERSION=2.4.90
}

post_to_2.4.100() {
  echo "Nothing to apply"
  POSTVERSION=2.4.100
}

post_to_2.4.110() {
  echo "Nothing to apply"
  POSTVERSION=2.4.110
}

post_to_2.4.111() {
  echo "Nothing to apply"
  POSTVERSION=2.4.111
}

post_to_2.4.120() {
  update_elasticsearch_index_settings

  # Manually rollover suricata alerts index to ensure data_stream.dataset expected mapping is set to 'suricata'
  rollover_index "logs-suricata.alerts-so"

  POSTVERSION=2.4.120
}

post_to_2.4.130() {
  # Optional integrations are loaded AFTER initial successful load of core ES templates (/opt/so/state/estemplates.txt)
  #   Dynamic templates are created in elasticsearch.enabled for every optional integration based on output of so-elastic-fleet-optional-integrations-load script
  echo "Ensuring Elasticsearch templates are up to date after updating package registry"
  salt-call state.apply elasticsearch queue=True

  # Update kibana default space
  salt-call state.apply kibana.config queue=True
  echo "Updating Kibana default space"
  /usr/sbin/so-kibana-space-defaults

  POSTVERSION=2.4.130
}

post_to_2.4.140() {
  echo "Nothing to apply"
  POSTVERSION=2.4.140
}

post_to_2.4.141() {
  echo "Nothing to apply"
  POSTVERSION=2.4.141
}

post_to_2.4.150() {
  echo "Nothing to apply"
  POSTVERSION=2.4.150
}

post_to_2.4.160() {
  echo "Nothing to apply"
  POSTVERSION=2.4.160
}

post_to_2.4.170() {
  # Update kibana default space
  salt-call state.apply kibana.config queue=True
  echo "Updating Kibana default space"
  /usr/sbin/so-kibana-space-defaults

  POSTVERSION=2.4.170
}

post_to_2.4.180() {
  # Force update to Kafka output policy
  /usr/sbin/so-kafka-fleet-output-policy --force

  POSTVERSION=2.4.180
}

post_to_2.4.190() {
    # Only need to update import / eval nodes
    if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
        update_import_fleet_output
    fi

    # Check if expected default policy is logstash (global.pipeline is REDIS or "")
    pipeline=$(lookup_pillar "pipeline" "global")
    if [[ -z "$pipeline" ]] || [[ "$pipeline" == "REDIS" ]]; then
        # Check if this grid is currently affected by corrupt fleet output policy
        if elastic-agent status | grep "config: key file not configured" > /dev/null 2>&1; then
            echo "Elastic Agent shows an ssl error connecting to logstash output. Updating output policy..."
            update_default_logstash_output
        fi
    fi
    # Apply new elasticsearch.server index template
    rollover_index "logs-elasticsearch.server-default"

  POSTVERSION=2.4.190
}

post_to_2.4.200() {
  echo "Initiating Suricata idstools migration..."
  suricata_idstools_removal_post

  POSTVERSION=2.4.200
}

post_to_2.4.201() {
  echo "Nothing to apply"
  POSTVERSION=2.4.201
}

post_to_2.4.210() {
  echo "Rolling over Kratos index to apply new index template"

  rollover_index "logs-kratos-so"

  disable_redis_heavynodes

  initialize_elasticsearch_indices "so-case so-casehistory so-assistant-session so-assistant-chat"

  echo "Regenerating Elastic Agent Installers"
  /sbin/so-elastic-agent-gen-installers

  # migrate elasticsearch:managed_integrations pillar to manager:managed_integrations
  if managed_integrations=$(/usr/sbin/so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.managed_integrations 2>/dev/null); then
    local managed_integrations_old_pillar="/tmp/elasticsearch-managed_integrations.yaml"

    echo "Migrating managed_integrations pillar"
    echo -e "$managed_integrations" > "$managed_integrations_old_pillar"

    /usr/sbin/so-yaml.py add /opt/so/saltstack/local/pillar/manager/soc_manager.sls manager.managed_integrations file:$managed_integrations_old_pillar > /dev/null 2>&1

    /usr/sbin/so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.managed_integrations
  fi

  # Remove so-rule-update script left behind by the idstools removal in 2.4.200
  rm -f /usr/sbin/so-rule-update
  
  POSTVERSION=2.4.210
}

post_to_2.4.211() {
  echo "Nothing to apply"
  POSTVERSION=2.4.211
}

repo_sync() {
  echo "Sync the local repo."
  su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
}

stop_salt_master() {
    # kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
    set +e
    echo ""
    echo "Killing all Salt jobs across the grid."
    salt \* saltutil.kill_all_jobs >> $SOUP_LOG 2>&1
    echo ""
    echo "Killing any queued Salt jobs on the manager."
    pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1

    echo ""
    echo "Storing salt-master PID."
    MASTERPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-master MainProcess')
    if [ ! -z "$MASTERPID" ]; then
      echo "Found salt-master PID $MASTERPID"
      systemctl_func "stop" "salt-master"
      if ps -p "$MASTERPID" > /dev/null 2>&1; then
        timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
      fi
    else
      echo "The salt-master PID was not found. The process '/usr/bin/salt-master MainProcess' is not running."
    fi
    set -e
}

stop_salt_minion() {
    echo "Disabling highstate to prevent from running if salt-minion restarts."
    salt-call state.disable highstate -l info --local
    echo ""

    # kill all salt jobs before stopping salt-minion
    set +e
    echo ""
    echo "Killing Salt jobs on this node."
    salt-call saltutil.kill_all_jobs --local

    echo "Storing salt-minion pid."
    MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
    echo "Found salt-minion PID $MINIONPID"
    systemctl_func "stop" "salt-minion"

    timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
    set -e
}


up_to_2.4.3() {
  echo "Nothing to do for 2.4.3"

  INSTALLEDVERSION=2.4.3
}

up_to_2.4.4() {
  echo "Nothing to do for 2.4.4"

  INSTALLEDVERSION=2.4.4
}

up_to_2.4.5() {
  echo "Nothing to do for 2.4.5"

  INSTALLEDVERSION=2.4.5
}

up_to_2.4.10() {
  echo "Nothing to do for 2.4.10"

  INSTALLEDVERSION=2.4.10
}

up_to_2.4.20() {
  echo "Nothing to do for 2.4.20"

  INSTALLEDVERSION=2.4.20
}

up_to_2.4.30() {
  echo "Nothing to do for 2.4.30"

  INSTALLEDVERSION=2.4.30
}

up_to_2.4.40() {
  echo "Removing old ATT&CK Navigator Layers..."
  rm -f /opt/so/conf/navigator/layers/enterprise-attack.json
  rm -f /opt/so/conf/navigator/layers/nav_layer_playbook.json

  INSTALLEDVERSION=2.4.40
}

up_to_2.4.50() {
  echo "Creating additional pillars.."
  mkdir -p /opt/so/saltstack/local/pillar/stig/
  mkdir -p /opt/so/saltstack/local/salt/stig/
  chown socore:socore /opt/so/saltstack/local/salt/stig/
  touch /opt/so/saltstack/local/pillar/stig/adv_stig.sls
  touch /opt/so/saltstack/local/pillar/stig/soc_stig.sls

  # the file_roots need to be update due to salt 3006.6 upgrade not allowing symlinks outside the file_roots
  # put new so-yaml in place
  echo "Updating so-yaml"
  \cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" "$DEFAULT_SALT_DIR/salt/manager/tools/sbin/"
  \cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" /usr/sbin/
  echo "Creating a backup of the salt-master config."
  # INSTALLEDVERSION is 2.4.40 at this point, but we want the backup to have the version
  # so was at prior to starting upgrade. use POSTVERSION here since it doesnt change until
  # post upgrade changes. POSTVERSION set to INSTALLEDVERSION at start of soup
  cp -v /etc/salt/master "/etc/salt/master.so-$POSTVERSION.bak"
  echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml"
  so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules/nids
  echo "Moving Suricata rules"
  mkdir /opt/so/rules/nids/suri
  chown socore:socore /opt/so/rules/nids/suri
  mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/.

  echo "Adding /nsm/elastic-fleet/artifacts to file_roots in /etc/salt/master using so-yaml"
  so-yaml.py append /etc/salt/master file_roots.base  /nsm/elastic-fleet/artifacts

  INSTALLEDVERSION=2.4.50
}

up_to_2.4.60() {
  echo "Creating directory to store Suricata classification.config"
  mkdir -vp /opt/so/saltstack/local/salt/suricata/classification
  chown socore:socore /opt/so/saltstack/local/salt/suricata/classification

  INSTALLEDVERSION=2.4.60
}

up_to_2.4.70() {
  playbook_migration
  suricata_idstools_migration
  toggle_telemetry
  add_detection_test_pillars

  INSTALLEDVERSION=2.4.70
}

up_to_2.4.80() {
  phases_pillar_2_4_80
  # Kafka configuration changes

  # Global pipeline changes to REDIS or KAFKA
  echo "Removing global.pipeline pillar configuration"
  sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls
  # Kafka pillars
  mkdir -p /opt/so/saltstack/local/pillar/kafka
  touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
  touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
  echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
  kafka_cluster_id=$(get_random_value 22)
  echo '  cluster_id: '$kafka_cluster_id >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
  kafkapass=$(get_random_value)
  echo '  password: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls

  INSTALLEDVERSION=2.4.80
}

up_to_2.4.90() {
  kafkatrust=$(get_random_value)
  # rearranging the kafka pillar to reduce clutter in SOC UI
  kafkasavedpass=$(so-yaml.py get /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password)
  kafkatrimpass=$(echo "$kafkasavedpass" | sed -n '1 p' )
  echo "Making changes to the Kafka pillar layout"
  so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password
  so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.password "$kafkatrimpass"
  so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.trustpass "$kafkatrust"

  INSTALLEDVERSION=2.4.90
}

up_to_2.4.100() {
  echo "Nothing to do for 2.4.100"

  INSTALLEDVERSION=2.4.100
}

up_to_2.4.110() {
  echo "Nothing to do for 2.4.110"

  INSTALLEDVERSION=2.4.110
}

up_to_2.4.111() {
  echo "Nothing to do for 2.4.111"

  INSTALLEDVERSION=2.4.111
}

up_to_2.4.120() {
  add_hydra_pillars

  # this is needed for the new versionlock state
  mkdir -p /opt/so/saltstack/local/pillar/versionlock
  touch /opt/so/saltstack/local/pillar/versionlock/adv_versionlock.sls /opt/so/saltstack/local/pillar/versionlock/soc_versionlock.sls


  INSTALLEDVERSION=2.4.120
}

up_to_2.4.130() {
  # Remove any old Elastic Defend config files
  rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json

  # Ensure override exists to allow nmcli access to other devices
  touch /etc/NetworkManager/conf.d/10-globally-managed-devices.conf

  INSTALLEDVERSION=2.4.130
}

up_to_2.4.140() {
  echo "Nothing to do for 2.4.140"

  INSTALLEDVERSION=2.4.140
}

up_to_2.4.141() {
  echo "Nothing to do for 2.4.141"

  INSTALLEDVERSION=2.4.141
}

up_to_2.4.150() {
  echo "If the Detection indices exists, update the refresh_interval"
  so-elasticsearch-query so-detection*/_settings -X PUT -d '{"index":{"refresh_interval":"1s"}}'

  INSTALLEDVERSION=2.4.150
}

up_to_2.4.160() {
  echo "Nothing to do for 2.4.160"

  INSTALLEDVERSION=2.4.160
}

up_to_2.4.170() {
  echo "Creating pillar files for virtualization feature"

  states=("hypervisor" "vm" "libvirt")
  
  # Create pillar files for each state
  for state in "${states[@]}"; do
    mkdir -p /opt/so/saltstack/local/pillar/$state
    touch /opt/so/saltstack/local/pillar/$state/adv_$state.sls /opt/so/saltstack/local/pillar/$state/soc_$state.sls
  done


  INSTALLEDVERSION=2.4.170
}

up_to_2.4.180() {
  echo "Nothing to do for 2.4.180"
  INSTALLEDVERSION=2.4.180
}

up_to_2.4.190() {
  echo "Nothing to do for 2.4.190"
  INSTALLEDVERSION=2.4.190
}

up_to_2.4.200() {
  echo "Backing up idstools config..."
  suricata_idstools_removal_pre

  touch /opt/so/state/esfleet_logstash_config_pillar

  INSTALLEDVERSION=2.4.200
}

up_to_2.4.201() {
  echo "Nothing to do for 2.4.201"

  INSTALLEDVERSION=2.4.201
}

up_to_2.4.210() {
  # Elastic Update for this release, so download Elastic Agent files
  determine_elastic_agent_upgrade
  create_ca_pillar
  # This state is used to deal with the breaking change introduced in 3006.17 - https://docs.saltproject.io/en/3006/topics/releases/3006.17.html
  # This is the only way the state is called so we can use concurrent=True
  salt-call state.apply salt.master.add_minimum_auth_version --file-root=$UPDATE_DIR/salt --local concurrent=True
  INSTALLEDVERSION=2.4.210
}

up_to_2.4.211() {
  echo "Nothing to do for 2.4.211"

  INSTALLEDVERSION=2.4.211
}

add_hydra_pillars() {
  mkdir -p /opt/so/saltstack/local/pillar/hydra
  touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
  chmod 660 /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
  touch /opt/so/saltstack/local/pillar/hydra/adv_hydra.sls
  HYDRAKEY=$(get_random_value)
  HYDRASALT=$(get_random_value)
	printf '%s\n'\
		"hydra:"\
		"  config:"\
		"    secrets:"\
		"      system:"\
		"        - '$HYDRAKEY'"\
    "    oidc:"\
    "        subject_identifiers:"\
    "            pairwise:"\
    "                salt: '$HYDRASALT'"\
		"" > /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
}

add_detection_test_pillars() {
  if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then
    echo "Adding detection pillar values for automated testing"
    so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.allowRegex SecurityOnion
    so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.elastalertengine.failAfterConsecutiveErrorCount 1
    so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.allowRegex "EquationGroup_Toolset_Apr17__ELV_.*"
    so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.strelkaengine.failAfterConsecutiveErrorCount 1
    so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.allowRegex "(200033\\d|2100538|2102466)"
    so-yaml.py add /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.config.server.modules.suricataengine.failAfterConsecutiveErrorCount 1
  fi
}

toggle_telemetry() {
  if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then
    cat << ASSIST_EOF

--------------- SOC Telemetry ---------------

The Security Onion development team could use your help! Enabling SOC
Telemetry will help the team understand which UI features are being
used and enables informed prioritization of future development.

Adjust this setting at anytime via the SOC Configuration screen.

Documentation: https://securityonion.net/docs/telemetry

ASSIST_EOF

    echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "

    read -r input
    input=$(echo "${input,,}" | xargs echo -n)
    echo ""
    if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then
      echo "Thank you for helping improve Security Onion!"
    else
      if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then
        echo "Disabled SOC Telemetry."
      else
        fail "Failed to disable SOC Telemetry; aborting."
      fi
    fi
    echo ""
  fi
}

rollover_index() {
  idx=$1
  exists=$(so-elasticsearch-query $idx -o /dev/null -w "%{http_code}")
  if [[ $exists -eq 200 ]]; then
    rollover=$(so-elasticsearch-query $idx/_rollover -o /dev/null -w "%{http_code}" -XPOST)

    if [[ $rollover -eq 200 ]]; then
      echo "Successfully triggered rollover for $idx..."
    else
      echo "Could not trigger rollover for $idx..."
    fi
  else
    echo "Could not find index $idx..."
  fi
}

suricata_idstools_migration() {
  # For 2.4.70

  #Backup the pillars for idstools
  mkdir -p /nsm/backup/detections-migration/idstools
  rsync -av /opt/so/saltstack/local/pillar/idstools/* /nsm/backup/detections-migration/idstools
  if [[ $? -eq 0 ]]; then
  	echo "IDStools configuration has been backed up."
  else
  	fail "Error: rsync failed to copy the files. IDStools configuration has not been backed up."
  fi

  #Backup Thresholds
  mkdir -p /nsm/backup/detections-migration/suricata
  rsync -av /opt/so/saltstack/local/salt/suricata/thresholding /nsm/backup/detections-migration/suricata
  if [[ $? -eq 0 ]]; then
  	echo "Suricata thresholds have been backed up."
  else
  	fail "Error: rsync failed to copy the files. Thresholds have not been backed up."
  fi

  #Backup local rules
  mkdir -p /nsm/backup/detections-migration/suricata/local-rules
  rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules
  if [[ -f /opt/so/saltstack/local/salt/idstools/rules/local.rules ]]; then
      rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak
  fi

  #Tell SOC to migrate
  mkdir -p /opt/so/conf/soc/migrations
  echo "0" > /opt/so/conf/soc/migrations/suricata-migration-2.4.70
  chown -R socore:socore /opt/so/conf/soc/migrations
}

playbook_migration() {
  # Start SOC Detections migration
  mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert}

  # Remove cronjobs
  crontab -l | grep -v 'so-playbook-sync_cron' | crontab -
  crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab -

  if grep -A 1 'playbook:'  /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then

    # Check for active Elastalert rules
    active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f \( -name "*.yaml" -o -name "*.yml" \) | wc -l)

    if [[ "$active_rules_count" -gt 0 ]]; then
        # Prompt the user to press ENTER if active Elastalert rules found
        echo
        echo "$active_rules_count Active Elastalert/Playbook rules found."
        echo "In preparation for the new Detections module, they will be backed up and then disabled."
        echo
        echo "Press ENTER to proceed."
        echo
        # Read user input
        read -r

        echo "Backing up the Elastalert rules..."
        rsync -av --ignore-missing-args --stats /opt/so/rules/elastalert/playbook/*.{yaml,yml} /nsm/backup/detections-migration/elastalert/

        # Verify that rsync completed successfully
        if [[ $? -eq 0 ]]; then
            # Delete the Elastlaert rules
            rm -f /opt/so/rules/elastalert/playbook/*.yaml
            echo "Active Elastalert rules have been backed up."
        else
            fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up."
        fi
    fi

    echo
    echo "Exporting Sigma rules from Playbook..."
    MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls)

    docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do
        echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml"
    done || fail "Failed to export Sigma rules..."

    echo
    echo "Exporting Sigma Filters from Playbook..."
    docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters."

    echo
    echo "Backing up Playbook database..."
    docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database."
    docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database."
  fi

  echo
  echo "Stopping Playbook services & cleaning up..."
  for container in so-playbook so-mysql so-soctopus; do
      if [ -n "$(docker ps -q -f name=^${container}$)" ]; then
          docker stop $container
      fi
  done
  sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf
  rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-*

  echo
  echo "Playbook Migration is complete...."
}

suricata_idstools_removal_pre() {
# For SOUPs beginning with 2.4.200 - pre SOUP checks

# Create syncBlock file
install -d -o 939 -g 939 -m 755 /opt/so/conf/soc/fingerprints
install -o 939 -g 939 -m 644 /dev/null /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF
Suricata ruleset sync is blocked until this file is removed. **CRITICAL** Make sure that you have manually added any custom Suricata rulesets via SOC config before removing this file - review the documentation for more details: https://securityonion.net/docs/nids
EOF

# Remove possible symlink & create salt local rules dir
[ -L /opt/so/saltstack/local/salt/suricata/rules ] && rm -f /opt/so/saltstack/local/salt/suricata/rules
install -d -o 939 -g 939 /opt/so/saltstack/local/salt/suricata/rules/ || echo "Failed to create Suricata local rules directory"

# Backup custom rules & overrides
mkdir -p /nsm/backup/detections-migration/2-4-200
cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200
cp /opt/so/conf/idstools/etc/rulecat.conf /nsm/backup/detections-migration/2-4-200

# Backup so-detection index via reindex
echo "Creating sos-backup index template..."
template_result=$(/sbin/so-elasticsearch-query '_index_template/sos-backup' -X PUT \
    --retry 5 --retry-delay 15 --retry-all-errors \
    -d '{"index_patterns":["sos-backup-*"],"priority":501,"template":{"settings":{"index":{"number_of_replicas":0,"number_of_shards":1}}}}')

if [[ -z "$template_result" ]] || ! echo "$template_result" | jq -e '.acknowledged == true' > /dev/null 2>&1; then
    echo "Error: Failed to create sos-backup index template"
    echo "$template_result"
    exit 1
fi

BACKUP_INDEX="sos-backup-detection-$(date +%Y%m%d-%H%M%S)"
echo "Backing up so-detection index to $BACKUP_INDEX..."
reindex_result=$(/sbin/so-elasticsearch-query '_reindex?wait_for_completion=true' \
    --retry 5 --retry-delay 15 --retry-all-errors \
    -X POST -d "{\"source\": {\"index\": \"so-detection\"}, \"dest\": {\"index\": \"$BACKUP_INDEX\"}}")

if [[ -z "$reindex_result" ]]; then
    echo "Error: Backup of detections failed - no response from Elasticsearch"
    exit 1
elif echo "$reindex_result" | jq -e '.created >= 0' > /dev/null 2>&1; then
    echo "Backup complete: $(echo "$reindex_result" | jq -r '.created') documents copied"
elif echo "$reindex_result" | grep -q "index_not_found_exception"; then
    echo "so-detection index does not exist, skipping backup"
else
    echo "Error: Backup of detections failed"
    echo "$reindex_result"
    exit 1
fi

}

suricata_idstools_removal_post() {
# For SOUPs beginning with 2.4.200 - post SOUP checks

echo "Checking idstools configuration for custom modifications..."

# Normalize and hash file content for consistent comparison
# Args: $1 - file path
# Outputs: SHA256 hash to stdout
# Returns: 0 on success, 1 on failure
hash_normalized_file() {
    local file="$1"

    if [[ ! -r "$file" ]]; then
        return 1
    fi

    # Ensure trailing newline for consistent hashing regardless of source file
    { sed -E \
        -e 's/^[[:space:]]+//; s/[[:space:]]+$//' \
        -e '/^$/d' \
        -e 's|--url=http://[^:]+:7788|--url=http://MANAGER:7788|' \
        "$file"; echo; } | sed '/^$/d' | sha256sum | awk '{print $1}'
}

# Known-default hashes for so-rule-update (ETOPEN ruleset)
KNOWN_SO_RULE_UPDATE_HASHES=(
    # 2.4.100+ (suricata 7.0.3, non-airgap)
    "5fbd067ced86c8ec72ffb7e1798aa624123b536fb9d78f4b3ad8d3b45db1eae7"  # 2.4.100-2.4.190 non-Airgap
    # 2.4.90+ airgap (same for 2.4.90 and 2.4.100+)
    "61f632c55791338c438c071040f1490066769bcce808b595b5cc7974a90e653a"  # 2.4.90+ Airgap
    # 2.4.90 (suricata 6.0, non-airgap, comment inside proxy block)
    "0380ec52a05933244ab0f0bc506576e1d838483647b40612d5fe4b378e47aedd"  # 2.4.90 non-Airgap
    # 2.4.10-2.4.80 (suricata 6.0, non-airgap, comment outside proxy block)
    "b6e4d1b5a78d57880ad038a9cd2cc6978aeb2dd27d48ea1a44dd866a2aee7ff4"  # 2.4.10-2.4.80 non-Airgap
    # 2.4.10-2.4.80 airgap
    "b20146526ace2b142fde4664f1386a9a1defa319b3a1d113600ad33a1b037dad"  # 2.4.10-2.4.80 Airgap
    # 2.4.5 and earlier (no pidof check, non-airgap)
    "d04f5e4015c348133d28a7840839e82d60009781eaaa1c66f7f67747703590dc"  # 2.4.5 non-Airgap
)

# Known-default hashes for rulecat.conf
KNOWN_RULECAT_CONF_HASHES=(
    # 2.4.100+ (suricata 7.0.3)
    "302e75dca9110807f09ade2eec3be1fcfc8b2bf6cf2252b0269bb72efeefe67e"  # 2.4.100-2.4.190 without SURICATA md_engine
    "8029b7718c324a9afa06a5cf180afde703da1277af4bdd30310a6cfa3d6398cb"  # 2.4.100-2.4.190 with SURICATA md_engine
    # 2.4.80-2.4.90 (suricata 6.0, with --suricata-version and --output)
    "4d8b318e6950a6f60b02f307cf27c929efd39652990c1bd0c8820aa8a307e1e7"  # 2.4.80-2.4.90 without SURICATA md_engine
    "a1ddf264c86c4e91c81c5a317f745a19466d4311e4533ec3a3c91fed04c11678"  # 2.4.80-2.4.90 with SURICATA md_engine
    # 2.4.50-2.4.70 (/suri/ path, no --suricata-version)
    "86e3afb8d0f00c62337195602636864c98580a13ca9cc85029661a539deae6ae"  # 2.4.50-2.4.70 without SURICATA md_engine
    "5a97604ca5b820a10273a2d6546bb5e00c5122ca5a7dfe0ba0bfbce5fc026f4b"  # 2.4.50-2.4.70 with SURICATA md_engine
    # 2.4.20-2.4.40 (/nids/ path without /suri/)
    "d098ea9ecd94b5cca35bf33543f8ea8f48066a0785221fabda7fef43d2462c29"  # 2.4.20-2.4.40 without SURICATA md_engine
    "9dbc60df22ae20d65738ba42e620392577857038ba92278e23ec182081d191cd"  # 2.4.20-2.4.40 with SURICATA md_engine
    # 2.4.5-2.4.10 (/sorules/ path for extraction/filters)
    "490f6843d9fca759ee74db3ada9c702e2440b8393f2cfaf07bbe41aaa6d955c3"  # 2.4.5-2.4.10 with SURICATA md_engine
    # Note: 2.4.5-2.4.10 without SURICATA md_engine has same hash as 2.4.20-2.4.40 without SURICATA md_engine
)

# Check a config file against known hashes
# Args: $1 - file path, $2 - array name of known hashes
check_config_file() {
    local file="$1"
    local known_hashes_array="$2"
    local file_display_name=$(basename "$file")

    if [[ ! -f "$file" ]]; then
        echo "Warning: $file not found"
        echo "$file_display_name not found - manual verification required" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
        return 1
    fi

    echo "Hashing $file..."
    local file_hash
    if ! file_hash=$(hash_normalized_file "$file"); then
        echo "Warning: Could not read $file"
        echo "$file_display_name not readable - manual verification required" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
        return 1
    fi

    echo "  Hash: $file_hash"

    # Check if hash matches any known default
    local -n known_hashes=$known_hashes_array
    for known_hash in "${known_hashes[@]}"; do
        if [[ "$file_hash" == "$known_hash" ]]; then
            echo "  Matches known default configuration"
            return 0
        fi
    done

    # No match - custom configuration detected
    echo "Does not match known default - custom configuration detected"
    echo "Custom $file_display_name detected (hash: $file_hash)" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock

    # If this is so-rule-update, check for ETPRO license code and write out to the syncBlock file
    # If ETPRO is enabled, the license code already exists in the so-rule-update script, this is just making it easier to migrate
    if [[ "$file_display_name" == "so-rule-update" ]]; then
        local etpro_code
        etpro_code=$(grep -oP '\-\-etpro=\K[0-9a-fA-F]+' "$file" 2>/dev/null) || true
        if [[ -n "$etpro_code" ]]; then
            echo "ETPRO code found: $etpro_code" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
        fi
    fi

    return 1
}

# Check so-rule-update and rulecat.conf
SO_RULE_UPDATE="/usr/sbin/so-rule-update"
RULECAT_CONF="/opt/so/conf/idstools/etc/rulecat.conf"

custom_found=0

check_config_file "$SO_RULE_UPDATE" "KNOWN_SO_RULE_UPDATE_HASHES" || custom_found=1
check_config_file "$RULECAT_CONF" "KNOWN_RULECAT_CONF_HASHES" || custom_found=1

# Check for ETPRO rules on airgap systems
if [[ $is_airgap -eq 0 ]] && grep -q 'ETPRO ' /nsm/rules/suricata/emerging-all.rules 2>/dev/null; then
    echo "ETPRO rules detected on airgap system - custom configuration"
    echo "ETPRO rules detected on Airgap in /nsm/rules/suricata/emerging-all.rules" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
    custom_found=1
fi

# If no custom configs found, remove syncBlock
if [[ $custom_found -eq 0 ]]; then
    echo "idstools migration completed successfully - removing Suricata engine syncBlock"
    rm -f /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
else
    echo "Custom idstools configuration detected - syncBlock remains in place"
    echo "Review /opt/so/conf/soc/fingerprints/suricataengine.syncBlock for details"
fi

echo "Cleaning up idstools"
echo "Stopping and removing the idstools container..."
if [ -n "$(docker ps -q -f name=^so-idstools$)" ]; then
    image_name=$(docker ps -a --filter name=^so-idstools$ --format '{{.Image}}' 2>/dev/null || true)
    docker stop so-idstools || echo "Warning: failed to stop so-idstools container"
    docker rm so-idstools || echo "Warning: failed to remove so-idstools container"

    if [[ -n "$image_name" ]]; then
        echo "Removing idstools image: $image_name"
        docker rmi "$image_name" || echo "Warning: failed to remove image $image_name"
    fi
fi

echo "Removing idstools symlink and scripts..."
rm -rf /usr/sbin/so-idstools*
sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf
crontab -l | grep -v 'so-rule-update' | crontab -

# Backup the salt master config & manager pillar before editing it
cp /opt/so/saltstack/local/pillar/minions/$MINIONID.sls /nsm/backup/detections-migration/2-4-200/
cp /etc/salt/master  /nsm/backup/detections-migration/2-4-200/
so-yaml.py remove /opt/so/saltstack/local/pillar/minions/$MINIONID.sls idstools
so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids

}

determine_elastic_agent_upgrade() {
  if [[ $is_airgap -eq 0 ]]; then
    update_elastic_agent_airgap
  else
    set +e
    # the new elasticsearch defaults.yaml file is not yet placed in /opt/so/saltstack/default/salt/elasticsearch yet
    update_elastic_agent "$UPDATE_DIR"
    set -e
  fi
}

update_elastic_agent_airgap() {
  get_elastic_agent_vars "/tmp/soagupdate/SecurityOnion"
  rsync -av /tmp/soagupdate/fleet/* /nsm/elastic-fleet/artifacts/
  tar -xf "$ELASTIC_AGENT_FILE" -C "$ELASTIC_AGENT_EXPANSION_DIR"
}

verify_upgradespace() {
  CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
  if [ "$CURRENTSPACE" -lt "10" ]; then
      echo "You are low on disk space."
      return 1
  else
      return 0
  fi
}

upgrade_space() {
  if ! verify_upgradespace; then
    clean_dockers
    if ! verify_upgradespace; then
      echo "There is not enough space to perform the upgrade. Please free up space and try again"
      exit 0
    fi
  else
      echo "You have enough space for upgrade. Proceeding with soup."
  fi
}

unmount_update() {
  cd /tmp
  umount /tmp/soagupdate
}

update_airgap_rules() {
  # Copy the rules over to update them for airgap.
  rsync -a --delete $UPDATE_DIR/agrules/suricata/ /nsm/rules/suricata/etopen/
  rsync -a $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
  rsync -a $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
  # Copy the securityonion-resorces repo over for SOC Detection Summaries and checkout the published summaries branch
  rsync -a --delete --chown=socore:socore $UPDATE_DIR/agrules/securityonion-resources /opt/so/conf/soc/ai_summary_repos
  git config --global --add safe.directory /opt/so/conf/soc/ai_summary_repos/securityonion-resources
  git -C /opt/so/conf/soc/ai_summary_repos/securityonion-resources checkout generated-summaries-published
  # Copy the securityonion-resorces repo over to nsm
  rsync -a $UPDATE_DIR/agrules/securityonion-resources/* /nsm/securityonion-resources/
}

update_airgap_repo() {
  # Update the files in the repo
  echo "Syncing new updates to /nsm/repo"
  rsync -a $AGREPO/* /nsm/repo/
  echo "Creating repo"
  dnf -y install yum-utils createrepo_c
  createrepo /nsm/repo
}

update_elasticsearch_index_settings() {
  # Update managed indices to reflect latest index template
  for idx in "so-detection" "so-detectionhistory" "so-case" "so-casehistory"; do
    ilm_name=$idx
    if [ "$idx" = "so-detectionhistory" ]; then
      ilm_name="so-detection"
    elif [ "$idx" = "so-casehistory" ]; then
      ilm_name="so-case"
    fi
    JSON_STRING=$( jq -n --arg ILM_NAME "$ilm_name" '{"settings": {"index.auto_expand_replicas":"0-2","index.lifecycle.name":($ILM_NAME + "-logs")}}')

    echo "Checking if index \"$idx\" exists"
    exists=$(curl -K /opt/so/conf/elasticsearch/curl.config -s -o /dev/null -w "%{http_code}" -k -L -H "Content-Type: application/json" "https://localhost:9200/$idx")
    if [ $exists -eq 200 ]; then
      echo "$idx index found..."
      echo "Updating $idx index settings"
      curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/$idx/_settings" -d "$JSON_STRING" -XPUT
      echo -e "\n"
    else
      echo -e "Skipping $idx... index does not exist\n"
    fi
  done
}

update_import_fleet_output() {
    if output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" --retry 3 --fail 2>/dev/null); then
        # Update the current config of so-manager_elasticsearch output policy in place (leaving any customizations like having changed the preset value from 'balanced' to 'performance')
        CAFINGERPRINT=$(openssl x509 -in /etc/pki/tls/certs/intca.crt -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
        updated_policy=$(jq --arg CAFINGERPRINT "$CAFINGERPRINT" '.item | (del(.id) | .ca_trusted_fingerprint = $CAFINGERPRINT)' <<< "$output")
        if curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -XPUT -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$updated_policy" --retry 3 --fail 2>/dev/null; then
            echo "Successfully updated so-manager_elasticsearch fleet output policy"
        else
            fail "Failed to update so-manager_elasticsearch fleet output policy"
        fi
    fi
}

update_default_logstash_output() {
    echo "Updating fleet logstash output policy grid-logstash"
    if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
        # Keep already configured hosts for this update, subsequent host updates come from so-elastic-fleet-outputs-update
        HOSTS=$(echo "$logstash_policy" | jq -r '.item.hosts')
        DEFAULT_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default')
        DEFAULT_MONITORING_ENABLED=$(echo "$logstash_policy" | jq -r '.item.is_default_monitoring')
        LOGSTASHKEY=$(openssl rsa -in  /etc/pki/elasticfleet-logstash.key)
        LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
        LOGSTASHCA=$(openssl x509 -in  /etc/pki/tls/certs/intca.crt)
        JSON_STRING=$(jq -n \
            --argjson HOSTS "$HOSTS" \
            --arg DEFAULT_ENABLED "$DEFAULT_ENABLED" \
            --arg DEFAULT_MONITORING_ENABLED "$DEFAULT_MONITORING_ENABLED" \
            --arg LOGSTASHKEY "$LOGSTASHKEY" \
            --arg LOGSTASHCRT "$LOGSTASHCRT" \
            --arg LOGSTASHCA "$LOGSTASHCA" \
            '{"name":"grid-logstash","type":"logstash","hosts": $HOSTS,"is_default": $DEFAULT_ENABLED,"is_default_monitoring": $DEFAULT_MONITORING_ENABLED,"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }}}')
    fi

    if curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --retry 3 --retry-delay 10 --fail; then
        echo "Successfully updated grid-logstash fleet output policy"
    fi
}

update_salt_mine() {
    echo "Populating the mine with mine_functions for each host."
    set +e
    salt \* mine.update -b 50
    set -e
}

update_version() {
  # Update the version to the latest
  echo "Updating the Security Onion version file."
  echo $NEWVERSION > /etc/soversion
  echo $HOTFIXVERSION > /etc/sohotfix
  sed -i "s/soversion:.*/soversion: $NEWVERSION/" /opt/so/saltstack/local/pillar/global/soc_global.sls
}

upgrade_check() {
  # Let's make sure we actually need to update.
  NEWVERSION=$(cat $UPDATE_DIR/VERSION)
  HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
  if [ ! -f /etc/sohotfix ]; then
    touch /etc/sohotfix
  fi
  [[ -f /etc/sohotfix ]] && CURRENTHOTFIX=$(cat /etc/sohotfix)
  if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
    echo "Checking to see if there are hotfixes needed"
    if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
      echo "You are already running the latest version of Security Onion."
      exit 0
    else
      echo "We need to apply a hotfix"
      is_hotfix=true
    fi
  else
    is_hotfix=false
  fi

}

upgrade_check_salt() {
  NEWSALTVERSION=$(grep "version:" $UPDATE_DIR/salt/salt/master.defaults.yaml | grep -o "[0-9]\+\.[0-9]\+")
  if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
    echo "You are already running the correct version of Salt for Security Onion."
  else
    echo "Salt needs to be upgraded to $NEWSALTVERSION."
    UPGRADESALT=1
  fi
}

upgrade_salt() {
  echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
  echo ""
  # If rhel family
  if [[ $is_rpm ]]; then
    # Check if salt-cloud is installed
    if rpm -q salt-cloud &>/dev/null; then
      SALT_CLOUD_INSTALLED=true
    fi
    # Check if salt-cloud is configured
    if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
      SALT_CLOUD_CONFIGURED=true
    fi
    
    echo "Removing yum versionlock for Salt."
    echo ""
    yum versionlock delete "salt"
    yum versionlock delete "salt-minion"
    yum versionlock delete "salt-master"
    # Remove salt-cloud versionlock if installed
    if [[ $SALT_CLOUD_INSTALLED == true ]]; then
      yum versionlock delete "salt-cloud"
    fi
    echo "Updating Salt packages."
    echo ""
    set +e
    # if oracle run with -r to ignore repos set by bootstrap
    if [[ $OS == 'oracle' ]]; then
      # Add -L flag only if salt-cloud is already installed
      if [[ $SALT_CLOUD_INSTALLED == true ]]; then
        run_check_net_err \
        "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -L -F -M stable \"$NEWSALTVERSION\"" \
        "Could not update salt, please check $SOUP_LOG for details."
      else
        run_check_net_err \
        "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M stable \"$NEWSALTVERSION\"" \
        "Could not update salt, please check $SOUP_LOG for details."
      fi
    # if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
    else
      run_check_net_err \
      "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M stable \"$NEWSALTVERSION\"" \
      "Could not update salt, please check $SOUP_LOG for details."
    fi
    set -e
    echo "Applying yum versionlock for Salt."
    echo ""
    yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
    yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
    yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
    # Add salt-cloud versionlock if installed
    if [[ $SALT_CLOUD_INSTALLED == true ]]; then
      yum versionlock add "salt-cloud-0:$NEWSALTVERSION-0.*"
    fi
  # Else do Ubuntu things
  elif [[ $is_deb ]]; then
    # ensure these files don't exist when upgrading from 3006.9 to 3006.16
    rm -f /etc/apt/keyrings/salt-archive-keyring-2023.pgp /etc/apt/sources.list.d/salt.list
    echo "Removing apt hold for Salt."
    echo ""
    apt-mark unhold "salt-common"
    apt-mark unhold "salt-master"
    apt-mark unhold "salt-minion"
    echo "Updating Salt packages."
    echo ""
    set +e
    run_check_net_err \
    "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M stable \"$NEWSALTVERSION\"" \
    "Could not update salt, please check $SOUP_LOG for details."
    set -e
    echo "Applying apt hold for Salt."
    echo ""
    apt-mark hold "salt-common"
    apt-mark hold "salt-master"
    apt-mark hold "salt-minion"
  fi

  echo "Checking if Salt was upgraded."
  echo ""
  # Check that Salt was upgraded
  SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
  if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
    echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
    echo "Once the issue is resolved, run soup again."
    echo "Exiting."
    echo ""
    exit 1
  else
    SALTUPGRADED=true
    echo "Salt upgrade success."
    echo ""
  fi

}

verify_latest_update_script() {
  get_soup_script_hashes
  if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
    echo "This version of the soup script is up to date. Proceeding."
  else
    echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."

    salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null

    # Verify that soup scripts updated as expected
    get_soup_script_hashes
    if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
      echo "Succesfully updated soup scripts."
    else
      echo "There was a problem updating soup scripts. Trying to rerun script update."
      salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
    fi

    echo ""
    echo "The soup script has been modified. Please run soup again to continue the upgrade."
    exit 0
  fi

}

verify_es_version_compatibility() {

    local es_required_version_statefile_base="/opt/so/state/so_es_required_upgrade_version"
    local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
    local is_active_intermediate_upgrade=1
    # supported upgrade paths for SO-ES versions
    declare -A es_upgrade_map=(
	    ["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
	    ["8.17.3"]="8.18.4 8.18.6 8.18.8"
	    ["8.18.4"]="8.18.6 8.18.8 9.0.8"
	    ["8.18.6"]="8.18.8 9.0.8"
	    ["8.18.8"]="9.0.8"
    )

    # Elasticsearch MUST upgrade through these versions
    declare -A es_to_so_version=(
	    ["8.18.8"]="2.4.190-20251024"
    )

    # Get current Elasticsearch version
    if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
        es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
    else
        echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."

        exit 160
    fi

    if ! target_es_version_raw=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then
        # so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.

        # if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
        if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
            echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"

            exit 160
        fi

        # allow upgrade to version < 2.4.110 without checking ES version compatibility
        return 0
    else
        target_es_version=$(sed -n '1p' <<< "$target_es_version_raw")
    fi

    for statefile in "${es_required_version_statefile_base}"-*; do
        [[ -f $statefile ]] || continue

        local es_required_version_statefile_value=$(cat "$statefile")

        if [[ "$es_required_version_statefile_value" == "$target_es_version" ]]; then
            echo "Intermediate upgrade to ES $target_es_version is in progress. Skipping Elasticsearch version compatibility check."
            is_active_intermediate_upgrade=0
            continue
        fi

        # use sort to check if es_required_statefile_value is < the current es_version.
        if [[ "$(printf '%s\n' $es_required_version_statefile_value $es_version | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then
            rm -f "$statefile"
            continue
        fi

        if [[ ! -f "$es_verification_script" ]]; then
            create_intermediate_upgrade_verification_script "$es_verification_script"
        fi

        echo -e "\n##############################################################################################################################\n"
        echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss! This command can take up to an hour to complete."
        timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"
        if [[ $? -ne 0 ]]; then
            echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"

            echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"

            echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"

            exit 161
        fi
        echo -e "\n##############################################################################################################################\n"
    done

    # if current soup is an intermediate upgrade we can skip the upgrade map check below
    if [[ $is_active_intermediate_upgrade -eq 0 ]]; then
        return 0
    fi

    if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
        # supported upgrade
        return 0
    else
        compatible_versions=${es_upgrade_map[$es_version]}
        if [[ -z "$compatible_versions" ]]; then
            # If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
            # We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
            local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
            next_step_so_version=${es_to_so_version[$first_es_required_version]}
            required_es_upgrade_version="$first_es_required_version"
        else
            next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
            required_es_upgrade_version="${compatible_versions##* }"
        fi
        echo -e "\n##############################################################################################################################\n"
        echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"

        es_required_version_statefile="${es_required_version_statefile_base}-${required_es_upgrade_version}"
        echo "$required_es_upgrade_version" > "$es_required_version_statefile"

        # We expect to upgrade to the latest compatiable minor version of ES
        create_intermediate_upgrade_verification_script "$es_verification_script"

        if [[ $is_airgap -eq 0 ]]; then
            run_airgap_intermediate_upgrade
        else
            if [[ ! -z $ISOLOC ]]; then
                originally_requested_iso_location="$ISOLOC"
            fi
            # Make sure ISOLOC is not set. Network installs that used soup -f would have ISOLOC set.
            unset ISOLOC

            run_network_intermediate_upgrade
	    fi
    fi

}

wait_for_salt_minion_with_restart() {
    local minion="$1"
    local max_wait="${2:-60}"
    local interval="${3:-3}"
    local logfile="$4"
    
    wait_for_salt_minion "$minion" "$max_wait" "$interval" "$logfile"
    local result=$?
    
    if [[ $result -ne 0 ]]; then
        echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion not ready, attempting restart..."
        systemctl_func "restart" "salt-minion"
        wait_for_salt_minion "$minion" "$max_wait" "$interval" "$logfile"
        result=$?
    fi
    
    return $result
}

run_airgap_intermediate_upgrade() {
    local originally_requested_so_version=$(cat $UPDATE_DIR/VERSION)
    # preserve ISOLOC value, so we can try to use it post intermediate upgrade
    local originally_requested_iso_location="$ISOLOC"

    # make sure a fresh ISO gets mounted
    unmount_update

    echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
    echo -e "\nIf you have the next ISO / USB ready, enter the path now eg. /dev/sdd, /home/onion/securityonion-$next_step_so_version.iso:"

    while [[ -z "$next_iso_location" ]] || [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; do
        # List removable devices if any are present
        local removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
        if [[ -n "$removable_devices" ]]; then
            echo "PATH        SIZE    TYPE    MOUNTPOINTS    RM"
            echo "$removable_devices"
        fi

        read -rp "Device/ISO Path (or 'exit' to quit): " next_iso_location
        if [[ "${next_iso_location,,}" == "exit" ]]; then
            echo "Exiting soup. Before reattempting to upgrade to $originally_requested_so_version, please first upgrade to $next_step_so_version to ensure Elasticsearch can properly update through the required versions."

            exit 160
        fi

        if [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; then
            echo "$next_iso_location is not a valid file or block device."
            next_iso_location=""
        fi
    done

    echo "Using $next_iso_location for required intermediary upgrade."
    exec bash <<EOF
        ISOLOC=$next_iso_location soup -y && \
        ISOLOC=$next_iso_location soup -y && \

        echo -e "\n##############################################################################################################################\n" && \
        echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \

        timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \

        echo -e "\n##############################################################################################################################\n" && \

        # automatically start the next soup if the original ISO isn't using the same block device we just used
        if [[ -n "$originally_requested_iso_location" ]] && [[ "$originally_requested_iso_location" != "$next_iso_location" ]]; then
            umount /tmp/soagupdate
            ISOLOC=$originally_requested_iso_location soup -y && \
            ISOLOC=$originally_requested_iso_location soup -y
        else
            echo "Could not automatically start next soup to $originally_requested_so_version. Soup will now exit here at $(cat /etc/soversion)" && \

            exit 170
        fi

        echo -e "\n##############################################################################################################################\n"
EOF
}

run_network_intermediate_upgrade() {
    # preserve BRANCH value if set originally
    if [[ -n "$BRANCH" ]]; then
        local originally_requested_so_branch="$BRANCH"
    else
        local originally_requested_so_branch="2.4/main"
    fi

    echo "Starting automated intermediate upgrade to $next_step_so_version."
    echo "After completion, the system will automatically attempt to upgrade to the latest version."
    echo -e "\n##############################################################################################################################\n"
    exec bash << EOF
        BRANCH=$next_step_so_version soup -y && \
        BRANCH=$next_step_so_version soup -y && \

        echo -e "\n##############################################################################################################################\n" && \
        echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \

        timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \

        echo -e "\n##############################################################################################################################\n" && \
        if [[ -n "$originally_requested_iso_location" ]]; then
            # nonairgap soup that used -f originally, runs intermediate upgrade using network + BRANCH, later coming back to the original ISO for the last soup
            ISOLOC=$originally_requested_iso_location soup -y && \
            ISOLOC=$originally_requested_iso_location soup -y
        else
            BRANCH=$originally_requested_so_branch soup -y && \
            BRANCH=$originally_requested_so_branch soup -y
        fi
        echo -e "\n##############################################################################################################################\n"
EOF
}

create_intermediate_upgrade_verification_script() {
    # After an intermediate upgrade, verify that ALL nodes running Elasticsearch are at the expected version BEFORE proceeding to the next upgrade step. This is a CRITICAL step
    local verification_script="$1"

    cat << 'EOF' > "$verification_script"
    #!/bin/bash

    SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE="/root/so_intermediate_upgrade_verification_failures.log"
    CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
    EXPECTED_ES_VERSION="$1"

    if [[ -z "$EXPECTED_ES_VERSION" ]]; then
        echo -e "\nExpected Elasticsearch version not provided. Usage: $0 <expected_es_version>"
        exit 1
    fi

    if [[ -f "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" ]]; then
        mv "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE.$CURRENT_TIME"
    fi

    check_heavynodes_es_version() {
        # Check if heavynodes are in this grid
        if ! salt-key -l accepted | grep -q 'heavynode$'; then

            # No heavynodes, skip version check
            echo "No heavynodes detected in this Security Onion deployment. Skipping heavynode Elasticsearch version verification."
            return 0
        fi

        echo -e "\nOne or more heavynodes detected. Verifying their Elasticsearch versions."

        local retries=20
        local retry_count=0
        local delay=180

        while [[ $retry_count -lt $retries ]]; do
            # keep stderr with variable for logging
            heavynode_versions=$(salt -C 'G@role:so-heavynode' cmd.run 'so-elasticsearch-query / --retry 3 --retry-delay 10 | jq ".version.number"' shell=/bin/bash --out=json 2> /dev/null)
            local exit_status=$?

            # Check that all heavynodes returned good data
            if [[ $exit_status -ne 0 ]]; then
                echo "Failed to retrieve Elasticsearch version from one or more heavynodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
                ((retry_count++))
                sleep $delay

                continue
            else
                if echo "$heavynode_versions" | jq -s --arg expected "\"$EXPECTED_ES_VERSION\"" --exit-status 'all(.[]; . | to_entries | all(.[]; .value == $expected))' > /dev/null; then
                    echo -e "\nAll heavynodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."

                    return 0
                else
                    echo "One or more heavynodes are not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
                    ((retry_count++))
                    sleep $delay

                    continue
                fi
            fi
        done

        echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
        echo "One or more heavynodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
        echo "Current versions:"
        echo "$heavynode_versions" | jq -s 'add'
        echo "$heavynode_versions" | jq -s 'add' >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
        echo -e "\n Stopping automatic upgrade to latest Security Onion version. Heavynodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
        echo -e "\n Heavynodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a long time depending on network link between Manager and Heavynodes."
        echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"

        return 1
    }

    check_searchnodes_es_version() {
        local retries=20
        local retry_count=0
        local delay=180

        while [[ $retry_count -lt $retries ]]; do
            # keep stderr with variable for logging
            cluster_versions=$(so-elasticsearch-query _nodes/_all/version --retry 5 --retry-delay 10 --fail 2>&1)
            local exit_status=$?

            if [[ $exit_status -ne 0 ]]; then
                echo "Failed to retrieve Elasticsearch versions from searchnodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
                ((retry_count++))
                sleep $delay

                continue
            else
                if echo "$cluster_versions" | jq --arg expected "$EXPECTED_ES_VERSION" --exit-status '.nodes | to_entries | all(.[].value.version; . == $expected)' > /dev/null; then
                    echo "All Searchnodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."

                    return 0
                else
                    echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
                    ((retry_count++))
                    sleep $delay

                    continue
                fi
            fi
        done

        echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
        echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
        echo "Current versions:"
        echo "$cluster_versions" | jq '.nodes | to_entries | map({(.value.name): .value.version}) | sort | add'
        echo "$cluster_versions" >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
        echo -e "\nStopping automatic upgrade to latest version. Searchnodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
        echo -e "\nSearchnodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a while depending on cluster size / network link between Manager and Searchnodes."
        echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"

        echo "$cluster_versions" > "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"

        return 1

    }

    # Need to add a check for heavynodes and ensure all heavynodes get their own "cluster" upgraded before moving on to final upgrade.
    check_searchnodes_es_version || exit 1
    check_heavynodes_es_version || exit 1

    # Remove required version state file after successful verification
    rm -f "$2"

    exit 0

EOF
}

# Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() {
  if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
    salt-call state.apply elasticfleet -l info queue=True
    . /usr/sbin/so-elastic-fleet-common
    elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
    /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
  elif [[ "$INSTALLEDVERSION" == "2.4.30" ]] ; then
    if [[ $is_airgap -eq 0 ]]; then
      update_airgap_rules
    fi
    if [[ -f /etc/pki/managerssl.key.old ]]; then
      echo "Skipping Certificate Generation"
    else
      rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
      so-kibana-restart --force
      so-kibana-api-check
      . /usr/sbin/so-elastic-fleet-common

      elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
      rm -f /opt/so/state/eaintegrations.txt
      salt-call state.apply ca queue=True
      stop_salt_minion
      mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
      mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
      systemctl_func "start" "salt-minion"
       wait_for_salt_minion_with_restart "$MINIONID" "60" "3" "$SOUP_LOG" || fail "Salt minion was not running or ready."
    fi
  else
   echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
  fi
}

failed_soup_restore_items() {
  local services=("$cron_service_name" "salt-master" "salt-minion")
  for SERVICE_NAME in "${services[@]}"; do
    if ! systemctl is-active --quiet "$SERVICE_NAME"; then
      systemctl_func "start" "$SERVICE_NAME"
    fi
  done
  enable_highstate
  masterunlock
}

main() {
  trap 'check_err $?' EXIT

  # If running 3.X.X, we need to fetch the correct soup and supporting scripts
  # from the 3/main branch before proceeding, otherwise we'll clone 2.4/main
  # and end up with incompatible scripts.
  if [[ "$INSTALLEDVERSION" == 3.* && "$BRANCH" != "3/main" ]]; then
    echo "Detected Security Onion $INSTALLEDVERSION. Fetching soup from 3/main branch."
    rm -rf /tmp/sogh
    mkdir -p /tmp/sogh
    cd /tmp/sogh
    git clone -b 3/main https://github.com/Security-Onion-Solutions/securityonion.git
    if [ ! -f "$UPDATE_DIR/VERSION" ]; then
      echo "Unable to clone 3/main branch from Github. Please check your Internet access."
      exit 1
    fi
    cp "$UPDATE_DIR/salt/manager/tools/sbin/soup" /usr/sbin/soup
    cp "$UPDATE_DIR/salt/common/tools/sbin/so-common" /usr/sbin/so-common
    cp "$UPDATE_DIR/salt/common/tools/sbin/so-image-common" /usr/sbin/so-image-common
    echo "Updated soup scripts from 3/main. Restarting soup."
    exec env BRANCH=3/main soup "$@"
  fi

  if [ -n "$BRANCH" ]; then
    echo "SOUP will use the $BRANCH branch."
    echo ""
  fi

  echo "### Preparing soup at $(date) ###"
  echo ""
  set_os

  check_salt_master_status 1 || fail  "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."

  echo "Checking to see if this is a manager."
  echo ""
  require_manager

  failed_soup_restore_items

  check_pillar_items

  echo "Checking to see if this is an airgap install."
  echo ""
  check_airgap
  if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then
    echo "Missing file argument (-f <FILENAME>) for unattended airgap upgrade."
    exit 0
  fi

  set_minionid
  MINION_ROLE=$(lookup_role)
  echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
  echo ""
  if [[ $is_airgap -eq 0 ]]; then
    # Let's mount the ISO since this is airgap
    airgap_mounted
  else
    # if not airgap but -f was used
    if [[ ! -z "$ISOLOC" ]]; then
      airgap_mounted
      AGDOCKER=/tmp/soagupdate/docker
    fi
    echo "Cloning Security Onion github repo into $UPDATE_DIR."
    echo "Removing previous upgrade sources."
    rm -rf $UPDATE_DIR
    echo "Cloning the Security Onion Repo."
    clone_to_tmp
  fi
  echo "Verifying we have the latest soup script."
  verify_latest_update_script

  echo "Verifying Elasticsearch version compatibility before upgrading."
  verify_es_version_compatibility

  echo "Let's see if we need to update Security Onion."
  upgrade_check
  upgrade_space

  echo "Checking for Salt Master and Minion updates."
  upgrade_check_salt
  set -e

  if [[ $is_airgap -eq 0 ]]; then
    update_airgap_repo
    dnf clean all
    check_os_updates
  elif [[ $OS == 'oracle' ]]; then
    # sync remote repo down to local if not airgap
    repo_sync
    dnf clean all
    check_os_updates
  fi

  if [ "$is_hotfix" == "true" ]; then
    echo "Applying $HOTFIXVERSION hotfix"
    # since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
    if [[ ! "$MINION_ROLE" == "import" ]]; then
      backup_old_states_pillars
    fi
    copy_new_files
    create_local_directories "/opt/so/saltstack/default"
    apply_hotfix
    echo "Hotfix applied"
    update_version
    enable_highstate
    highstate
  else
    echo ""
    echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
    echo ""

    systemctl_func "stop" "$cron_service_name"

    echo "Updating dockers to $NEWVERSION."
    if [[ $is_airgap -eq 0 ]]; then
      airgap_update_dockers
    # if not airgap but -f was used
    elif [[ ! -z "$ISOLOC" ]]; then
      airgap_update_dockers
      unmount_update
    else
      update_registry
      set +e
      update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1
      set -e
    fi

    stop_salt_minion

    stop_salt_master

    #update_repo

    # Does salt need upgraded. If so update it.
    if [[ $UPGRADESALT -eq 1 ]]; then
      echo "Upgrading Salt"
      # Update the repo files so it can actually upgrade
      upgrade_salt

      # for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt
      # *  WARN: Not starting daemons on Debian based distributions
      #    is not working mostly because starting them is the default behaviour.
      if [[ $is_deb ]]; then
        stop_salt_minion
        stop_salt_master
      fi
    fi

    preupgrade_changes
    echo ""

    if [[ $is_airgap -eq 0 ]]; then
      echo "Updating Rule Files to the Latest."
      update_airgap_rules
      echo "Updating Playbooks to the Latest."
      airgap_playbooks "$UPDATE_DIR"
    fi

    # since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
    if [[ ! "$MINION_ROLE" == "import" ]]; then
      echo ""
      echo "Creating snapshots of default and local Salt states and pillars and saving to /nsm/backup/"
      backup_old_states_pillars
    fi

    echo ""
    echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
    copy_new_files
    echo ""
    create_local_directories "/opt/so/saltstack/default"
    update_version

    echo ""
    echo "Locking down Salt Master for upgrade at $(date +"%T.%6N")."
    masterlock

    systemctl_func "start" "salt-master"

    # Testing that salt-master is up by checking that is it connected to itself
    check_saltmaster_status

    # update the salt-minion configs here and start the minion
    # since highstate are disabled above, minion start should not trigger a highstate
    echo ""
    echo "Ensuring salt-minion configs are up-to-date."
    salt-call state.apply salt.minion -l info queue=True
    echo ""

    # ensure the mine is updated and populated before highstates run, following the salt-master restart
    update_salt_mine

    if [[ $SALT_CLOUD_CONFIGURED == true && $SALTUPGRADED == true ]]; then
      echo "Updating salt-cloud config to use the new Salt version"
      salt-call state.apply salt.cloud.config concurrent=True
    fi

    enable_highstate

    echo ""
    echo "Running a highstate. This could take several minutes."
    set +e
    wait_for_salt_minion_with_restart "$MINIONID" "60" "3" "$SOUP_LOG" || fail "Salt minion was not running or ready."
    highstate
    set -e

    stop_salt_master

    masterunlock

    systemctl_func "start" "salt-master"

    check_saltmaster_status

    echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
    wait_for_salt_minion_with_restart "$MINIONID" "60" "3" "$SOUP_LOG" || fail "Salt minion was not running or ready."

    # Stop long-running scripts to allow potentially updated scripts to load on the next execution.
    if pgrep salt-relay.sh > /dev/null 2>&1; then
        echo "Stopping salt-relay.sh"
        killall salt-relay.sh
    else
        echo "salt-relay.sh is not running"
    fi

    # ensure the mine is updated and populated before highstates run, following the salt-master restart
    update_salt_mine

    highstate
    check_saltmaster_status
    postupgrade_changes
    [[ $is_airgap -eq 0 ]] && unmount_update

    echo ""
    echo "Upgrade to $NEWVERSION complete."

    # Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error
    set +e

    echo "Checking the number of minions."
    NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | grep -v adv_ | wc -l)
    if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
      if [[ $is_airgap -eq 0 ]]; then
        echo ""
        echo "Cleaning repos on remote Security Onion nodes."
        salt -C 'not *_eval and not *_manager* and not *_standalone and G@os:OEL' cmd.run "dnf clean all"
        echo ""
      fi
    fi

    #echo "Checking for local modifications."
    #check_local_mods

    echo "Checking sudoers file."
    check_sudoers

    systemctl_func "start" "$cron_service_name"

    if [[ -n $lsl_msg ]]; then
      case $lsl_msg in
        'distributed')
          echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
          echo " -> We recommend checking and adjusting the values as necessary."
          echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
        ;;
        'single-node')
          # We can assume the lsl_details array has been set if lsl_msg has this value
          echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
          echo " -> We recommend checking and adjusting the value as necessary."
          echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
        ;;
      esac
    fi

    if [[ $NUM_MINIONS -gt 1 ]]; then

      cat << EOF



This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.

Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.

If it looks like you’re missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Sensor nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.

For more information, please see $DOC_BASE_URL/soup#distributed-deployments.

EOF

    fi
  fi

  if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then

    cat << EOF


A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80.

If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at $DOC_BASE_URL/elasticsearch.

EOF

  fi

# check if the FINAL_MESSAGE_QUEUE is not empty
if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then
  echo "The following additional information applies specifically to your grid:"
  for m in "${FINAL_MESSAGE_QUEUE[@]}"; do
    echo "$m"
    echo
  done
fi

  echo "### soup has been served at $(date) ###"
}

while getopts ":b:f:y" opt; do
  case ${opt} in
    b )
      BATCHSIZE="$OPTARG"
      if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then
        echo "Batch size must be a number greater than 0."
        exit 1
      fi
    ;;
    y )
      if [[ ! -f /opt/so/state/yeselastic.txt ]]; then
        echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
        exit 1
      else
        UNATTENDED=true
      fi
    ;;
    f )
      ISOLOC="$OPTARG"
    ;;
    \? )
      echo "Usage: soup [-b] [-y] [-f <iso location>]"
      exit 1
    ;;
    : )
      echo "Invalid option: $OPTARG requires an argument"
      exit 1
    ;;
  esac
done
shift $((OPTIND - 1))

if [ -f $SOUP_LOG ]; then
  CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
  mv $SOUP_LOG $SOUP_LOG.$INSTALLEDVERSION.$CURRENT_TIME
fi

if [[ -z $UNATTENDED ]]; then
  cat << EOF

SOUP - Security Onion UPdater

Please review the following for more information about the update process and recent updates:
$DOC_BASE_URL/soup
https://blog.securityonion.net

WARNING: If you run soup via an SSH session and that SSH session terminates, then any processes running in that session would terminate. You should avoid leaving soup unattended especially if the machine you are SSHing from is configured to sleep after a period of time. You might also consider using something like screen or tmux so that if your SSH session terminates, the processes will continue running on the server.

EOF

  cat << EOF
Press Enter to continue or Ctrl-C to cancel.
EOF

  read -r input
fi

main "$@" | tee -a $SOUP_LOG
