diff --git a/salt/elastalert/enabled.sls b/salt/elastalert/enabled.sls index 6a1ff1440..e28a55958 100644 --- a/salt/elastalert/enabled.sls +++ b/salt/elastalert/enabled.sls @@ -60,7 +60,7 @@ so-elastalert: - watch: - file: elastaconf - onlyif: - - "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #} + - "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #} delete_so-elastalert_so-status.disabled: file.uncomment: diff --git a/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json b/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json index fb9069e83..debfc73a3 100644 --- a/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json +++ b/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json @@ -5,7 +5,7 @@ "package": { "name": "endpoint", "title": "Elastic Defend", - "version": "8.18.1", + "version": "9.0.2", "requires_root": true }, "enabled": true, diff --git a/salt/elasticfleet/integration-defaults.map.jinja b/salt/elasticfleet/integration-defaults.map.jinja index 69ce7f3af..f85a95ec9 100644 --- a/salt/elasticfleet/integration-defaults.map.jinja +++ b/salt/elasticfleet/integration-defaults.map.jinja @@ -21,6 +21,7 @@ 'azure_application_insights.app_state': 'azure.app_state', 'azure_billing.billing': 'azure.billing', 'azure_functions.metrics': 'azure.function', + 'azure_ai_foundry.metrics': 'azure.ai_foundry', 'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset', 'azure_metrics.compute_vm': 'azure.compute_vm', 'azure_metrics.container_instance': 'azure.container_instance', diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-optional-integrations-load b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-optional-integrations-load index 01777e5da..8c0f627ef 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-optional-integrations-load +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-optional-integrations-load @@ -86,7 +86,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list) echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST rm -f $INSTALLED_PACKAGE_LIST - echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST + echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST while read -r package; do # get package details diff --git a/salt/elasticfleet/tools/sbin_jinja/so-kafka-fleet-output-policy b/salt/elasticfleet/tools/sbin_jinja/so-kafka-fleet-output-policy index d44a5cb6c..b44b467bc 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-kafka-fleet-output-policy +++ b/salt/elasticfleet/tools/sbin_jinja/so-kafka-fleet-output-policy @@ -47,7 +47,7 @@ if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l --arg KAFKACA "$KAFKACA" \ --arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ - '{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' + '{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' ) if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n" @@ -67,7 +67,7 @@ elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l --arg ENABLED_DISABLED "$ENABLED_DISABLED"\ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --argjson HOSTS "$HOSTS" \ - '{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' + '{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' ) if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n" diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 38559e68c..99403d9b8 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1,6 +1,6 @@ elasticsearch: enabled: false - version: 8.18.8 + version: 9.0.8 index_clean: true config: action: diff --git a/salt/elasticsearch/tools/sbin_jinja/so-catrust b/salt/elasticsearch/tools/sbin_jinja/so-catrust index 16fd3ffdb..89cd9147d 100644 --- a/salt/elasticsearch/tools/sbin_jinja/so-catrust +++ b/salt/elasticsearch/tools/sbin_jinja/so-catrust @@ -15,7 +15,7 @@ set -e if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts - docker cp so-elasticsearchca:/etc/ssl/certs/ca-certificates.crt /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem + docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem docker rm so-elasticsearchca echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index 5af366459..520182555 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -63,7 +63,7 @@ logstash: settings: lsheap: 500m config: - http_x_host: 0.0.0.0 + api_x_http_x_host: 0.0.0.0 path_x_logs: /var/log/logstash pipeline_x_workers: 1 pipeline_x_batch_x_size: 125 diff --git a/salt/logstash/pipelines/config/so/0011_input_endgame.conf b/salt/logstash/pipelines/config/so/0011_input_endgame.conf index 375585957..c6f9c59e8 100644 --- a/salt/logstash/pipelines/config/so/0011_input_endgame.conf +++ b/salt/logstash/pipelines/config/so/0011_input_endgame.conf @@ -5,10 +5,10 @@ input { codec => es_bulk request_headers_target_field => client_headers remote_host_target_field => client_host - ssl => true + ssl_enabled => true ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"] ssl_certificate => "/usr/share/logstash/filebeat.crt" ssl_key => "/usr/share/logstash/filebeat.key" - ssl_verify_mode => "peer" + ssl_client_authentication => "required" } } diff --git a/salt/logstash/pipelines/config/so/0012_input_elastic_agent.conf.jinja b/salt/logstash/pipelines/config/so/0012_input_elastic_agent.conf.jinja index 6ba29f8e5..a4d699aff 100644 --- a/salt/logstash/pipelines/config/so/0012_input_elastic_agent.conf.jinja +++ b/salt/logstash/pipelines/config/so/0012_input_elastic_agent.conf.jinja @@ -2,11 +2,11 @@ input { elastic_agent { port => 5055 tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ] - ssl => true + ssl_enabled => true ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"] ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt" ssl_key => "/usr/share/logstash/elasticfleet-logstash.key" - ssl_verify_mode => "force_peer" + ssl_client_authentication => "required" ecs_compatibility => v8 } } diff --git a/salt/logstash/pipelines/config/so/0013_input_lumberjack_fleet.conf b/salt/logstash/pipelines/config/so/0013_input_lumberjack_fleet.conf index fd9a87a22..b31ffee8d 100644 --- a/salt/logstash/pipelines/config/so/0013_input_lumberjack_fleet.conf +++ b/salt/logstash/pipelines/config/so/0013_input_lumberjack_fleet.conf @@ -2,7 +2,7 @@ input { elastic_agent { port => 5056 tags => [ "elastic-agent", "fleet-lumberjack-input" ] - ssl => true + ssl_enabled => true ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt" ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key" ecs_compatibility => v8 diff --git a/salt/logstash/pipelines/config/so/9805_output_elastic_agent.conf.jinja b/salt/logstash/pipelines/config/so/9805_output_elastic_agent.conf.jinja index be7ec6898..4fe138dd8 100644 --- a/salt/logstash/pipelines/config/so/9805_output_elastic_agent.conf.jinja +++ b/salt/logstash/pipelines/config/so/9805_output_elastic_agent.conf.jinja @@ -8,8 +8,8 @@ output { document_id => "%{[metadata][_id]}" index => "so-ip-mappings" silence_errors_in_log => ["version_conflict_engine_exception"] - ssl => true - ssl_certificate_verification => false + ssl_enabled => true + ssl_verification_mode => "none" } } else { @@ -25,8 +25,8 @@ output { document_id => "%{[metadata][_id]}" pipeline => "%{[metadata][pipeline]}" silence_errors_in_log => ["version_conflict_engine_exception"] - ssl => true - ssl_certificate_verification => false + ssl_enabled => true + ssl_verification_mode => "none" } } else { @@ -37,8 +37,8 @@ output { user => "{{ ES_USER }}" password => "{{ ES_PASS }}" pipeline => "%{[metadata][pipeline]}" - ssl => true - ssl_certificate_verification => false + ssl_enabled => true + ssl_verification_mode => "none" } } } @@ -49,8 +49,8 @@ output { data_stream => true user => "{{ ES_USER }}" password => "{{ ES_PASS }}" - ssl => true - ssl_certificate_verification => false + ssl_enabled => true + ssl_verification_mode=> "none" } } } diff --git a/salt/logstash/pipelines/config/so/9900_output_endgame.conf.jinja b/salt/logstash/pipelines/config/so/9900_output_endgame.conf.jinja index c056f5774..27e311fc4 100644 --- a/salt/logstash/pipelines/config/so/9900_output_endgame.conf.jinja +++ b/salt/logstash/pipelines/config/so/9900_output_endgame.conf.jinja @@ -13,8 +13,8 @@ output { user => "{{ ES_USER }}" password => "{{ ES_PASS }}" index => "endgame-%{+YYYY.MM.dd}" - ssl => true - ssl_certificate_verification => false + ssl_enabled => true + ssl_verification_mode => "none" } } } diff --git a/salt/logstash/soc_logstash.yaml b/salt/logstash/soc_logstash.yaml index b617abfdd..9560b5c36 100644 --- a/salt/logstash/soc_logstash.yaml +++ b/salt/logstash/soc_logstash.yaml @@ -56,7 +56,7 @@ logstash: helpLink: logstash.html global: False config: - http_x_host: + api_x_http_x_host: description: Host interface to listen to connections. helpLink: logstash.html readonly: True diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 9516acdd1..894cf14f5 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -87,6 +87,12 @@ check_err() { 113) echo 'No route to host' ;; + 160) + echo 'Incompatiable Elasticsearch upgrade' + ;; + 161) + echo 'Required intermediate Elasticsearch upgrade not complete' + ;; *) echo 'Unhandled error' echo "$err_msg" @@ -631,9 +637,6 @@ post_to_2.4.180() { } post_to_2.4.190() { - echo "Regenerating Elastic Agent Installers" - /sbin/so-elastic-agent-gen-installers - # Only need to update import / eval nodes if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then update_import_fleet_output @@ -666,6 +669,9 @@ post_to_2.4.210() { rollover_index "logs-kratos-so" + echo "Regenerating Elastic Agent Installers" + /sbin/so-elastic-agent-gen-installers + POSTVERSION=2.4.210 } @@ -935,9 +941,7 @@ up_to_2.4.180() { } up_to_2.4.190() { - # Elastic Update for this release, so download Elastic Agent files - determine_elastic_agent_upgrade - + echo "Nothing to do for 2.4.190" INSTALLEDVERSION=2.4.190 } @@ -951,7 +955,8 @@ up_to_2.4.200() { } up_to_2.4.210() { - echo "Nothing to do for 2.4.210" + # Elastic Update for this release, so download Elastic Agent files + determine_elastic_agent_upgrade INSTALLEDVERSION=2.4.210 } @@ -1653,6 +1658,243 @@ verify_latest_update_script() { fi } + +verify_es_version_compatibility() { + + local es_required_version_statefile="/opt/so/state/so_es_required_upgrade_version.txt" + local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh" + # supported upgrade paths for SO-ES versions + declare -A es_upgrade_map=( + ["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8" + ["8.17.3"]="8.18.4 8.18.6 8.18.8" + ["8.18.4"]="8.18.6 8.18.8 9.0.8" + ["8.18.6"]="8.18.8 9.0.8" + ["8.18.8"]="9.0.8" + ) + + # Elasticsearch MUST upgrade through these versions + declare -A es_to_so_version=( + ["8.18.8"]="2.4.190-20251024" + ) + + # Get current Elasticsearch version + if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then + es_version=$(echo "$es_version_raw" | jq -r '.version.number' ) + else + echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version." + exit 160 + fi + + if ! target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p'); then + # so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade. + + # if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail + if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then + echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting" + exit 160 + fi + + # allow upgrade to version < 2.4.110 without checking ES version compatibility + return 0 + + fi + + # if this statefile exists then we have done an intermediate upgrade and we need to ensure that ALL ES nodes have been upgraded to the version in the statefile before allowing soup to continue + if [[ -f "$es_required_version_statefile" ]]; then + # required so verification script should have already been created + if [[ ! -f "$es_verification_script" ]]; then + create_intermediate_upgrade_verification_script $es_verification_script + fi + + local es_required_version_statefile_value=$(cat $es_required_version_statefile) + echo -e "\n##############################################################################################################################\n" + echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss!" + # create script using version in statefile + timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$es_required_version_statefile" + if [[ $? -ne 0 ]]; then + echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + + echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!" + + echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + exit 161 + fi + echo -e "\n##############################################################################################################################\n" + fi + + if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then + # supported upgrade + return 0 + else + compatible_versions=${es_upgrade_map[$es_version]} + next_step_so_version=${es_to_so_version[${compatible_versions##* }]} + echo -e "\n##############################################################################################################################\n" + echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n" + + echo "${compatible_versions##* }" > "$es_required_version_statefile" + + # We expect to upgrade to the latest compatiable minor version of ES + create_intermediate_upgrade_verification_script $es_verification_script + + if [[ $is_airgap -eq 0 ]]; then + echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso" + echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***" + echo -e "\n##############################################################################################################################\n" + exit 160 + else + # preserve BRANCH value if set originally + if [[ -n "$BRANCH" ]]; then + local originally_requested_so_version="$BRANCH" + else + local originally_requested_so_version="2.4/main" + fi + + echo "Starting automated intermediate upgrade to $next_step_so_version." + echo "After completion, the system will automatically attempt to upgrade to the latest version." + echo -e "\n##############################################################################################################################\n" + exec bash -c "BRANCH=$next_step_so_version soup -y && BRANCH=$next_step_so_version soup -y && \ + echo -e \"\n##############################################################################################################################\n\" && \ + echo -e \"Verifying Elasticsearch was successfully upgraded to ${compatible_versions##* } across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n\" \ + && timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh ${compatible_versions##* } $es_required_version_statefile && \ + echo -e \"\n##############################################################################################################################\n\" \ + && BRANCH=$originally_requested_so_version soup -y && BRANCH=$originally_requested_so_version soup -y" + fi + fi + +} + +create_intermediate_upgrade_verification_script() { + # After an intermediate upgrade, verify that ALL nodes running Elasticsearch are at the expected version BEFORE proceeding to the next upgrade step. This is a CRITICAL step + local verification_script="$1" + + cat << 'EOF' > "$verification_script" + #!/bin/bash + + SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE="/root/so_intermediate_upgrade_verification_failures.log" + CURRENT_TIME=$(date +%Y%m%d.%H%M%S) + EXPECTED_ES_VERSION="$1" + + if [[ -z "$EXPECTED_ES_VERSION" ]]; then + echo -e "\nExpected Elasticsearch version not provided. Usage: $0 " + exit 1 + fi + + if [[ -f "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" ]]; then + mv "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE.$CURRENT_TIME" + fi + + check_heavynodes_es_version() { + # Check if heavynodes are in this grid + if ! salt-key -l accepted | grep -q 'heavynode$'; then + + # No heavynodes, skip version check + echo "No heavynodes detected in this Security Onion deployment. Skipping heavynode Elasticsearch version verification." + return 0 + fi + + echo -e "\nOne or more heavynodes detected. Verifying their Elasticsearch versions." + + local retries=20 + local retry_count=0 + local delay=180 + + while [[ $retry_count -lt $retries ]]; do + # keep stderr with variable for logging + heavynode_versions=$(salt -C 'G@role:so-heavynode' cmd.run 'so-elasticsearch-query / --retry 3 --retry-delay 10 | jq ".version.number"' shell=/bin/bash --out=json 2> /dev/null) + local exit_status=$? + + # Check that all heavynodes returned good data + if [[ $exit_status -ne 0 ]]; then + echo "Failed to retrieve Elasticsearch version from one or more heavynodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries." + ((retry_count++)) + sleep $delay + + continue + else + if echo "$heavynode_versions" | jq -s --arg expected "\"$EXPECTED_ES_VERSION\"" --exit-status 'all(.[]; . | to_entries | all(.[]; .value == $expected))' > /dev/null; then + echo -e "\nAll heavynodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION." + + return 0 + else + echo "One or more heavynodes are not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries." + ((retry_count++)) + sleep $delay + + continue + fi + fi + done + + echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + echo "One or more heavynodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION." + echo "Current versions:" + echo "$heavynode_versions" | jq -s 'add' + echo "$heavynode_versions" | jq -s 'add' >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" + echo -e "\n Stopping automatic upgrade to latest Security Onion version. Heavynodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!" + echo -e "\n Heavynodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a long time depending on network link between Manager and Heavynodes." + echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + + return 1 + } + + check_searchnodes_es_version() { + local retries=20 + local retry_count=0 + local delay=180 + + while [[ $retry_count -lt $retries ]]; do + # keep stderr with variable for logging + cluster_versions=$(so-elasticsearch-query _nodes/_all/version --retry 5 --retry-delay 10 --fail 2>&1) + local exit_status=$? + + if [[ $exit_status -ne 0 ]]; then + echo "Failed to retrieve Elasticsearch versions from searchnodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries." + ((retry_count++)) + sleep $delay + + continue + else + if echo "$cluster_versions" | jq --arg expected "$EXPECTED_ES_VERSION" --exit-status '.nodes | to_entries | all(.[].value.version; . == $expected)' > /dev/null; then + echo "All Searchnodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION." + + return 0 + else + echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries." + ((retry_count++)) + sleep $delay + + continue + fi + fi + done + + echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION." + echo "Current versions:" + echo "$cluster_versions" | jq '.nodes | to_entries | map({(.value.name): .value.version}) | sort | add' + echo "$cluster_versions" >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" + echo -e "\nStopping automatic upgrade to latest version. Searchnodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!" + echo -e "\nSearchnodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a while depending on cluster size / network link between Manager and Searchnodes." + echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" + + echo "$cluster_versions" > "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" + + return 1 + + } + + # Need to add a check for heavynodes and ensure all heavynodes get their own "cluster" upgraded before moving on to final upgrade. + check_searchnodes_es_version || exit 1 + check_heavynodes_es_version || exit 1 + + # Remove required version state file after successful verification + rm -f "$2" + + exit 0 + +EOF +} + # Keeping this block in case we need to do a hotfix that requires salt update apply_hotfix() { if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then @@ -1749,6 +1991,8 @@ main() { echo "Verifying we have the latest soup script." verify_latest_update_script + verify_es_version_compatibility + echo "Let's see if we need to update Security Onion." upgrade_check upgrade_space diff --git a/salt/sensoroni/files/templates/reports/standard/case_report.md b/salt/sensoroni/files/templates/reports/standard/case_report.md index 49f18e7c6..76a166f3f 100644 --- a/salt/sensoroni/files/templates/reports/standard/case_report.md +++ b/salt/sensoroni/files/templates/reports/standard/case_report.md @@ -130,4 +130,42 @@ Security Onion Case Report | ---- | ---- | ------ | --------- | {{ range sortHistory "CreateTime" "asc" .History -}} | {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .CreateTime}} | {{getUserDetail "email" .UserId}} | {{.Kind}} | {{.Operation}} | +{{end}} + +## Attached Onion AI Sessions + +{{ range $idx, $session := sortAssistantSessionDetails "CreateTime" "desc" .AssistantSessions }} + +#### Session {{ add $idx 1 }} + +**Session ID:** {{$session.Session.SessionId}} + +**Title:** {{$session.Session.Title}} + +**User ID:** {{getUserDetail "email" $session.Session.UserId}} + +**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $session.Session.CreateTime}} + +**Updated:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $session.Session.UpdateTime}} + +{{ if $session.Session.DeleteTime }} +**Deleted:** {{ formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $session.Session.DeleteTime}} +{{ end }} + +#### Messages + +{{ range $index, $msg := sortAssistantMessages "CreateTime" "asc" $session.History }} +{{ range $i, $block := $msg.Message.ContentBlocks }} + +{{ if eq $block.Type "text" }} + +**Role:** {{$msg.Message.Role}} + +{{ stripEmoji $block.Text }} + +--- + +{{ end }}{{ end }} + +{{end}} {{end}} \ No newline at end of file