Compare commits

..

4 Commits

Author SHA1 Message Date
Mike Reeves
3fb153c43e Add support for version 2.4.201 upgrades 2026-01-13 16:41:39 -05:00
Mike Reeves
6de20c63d4 Update VERSION 2026-01-13 16:20:57 -05:00
Mike Reeves
8ff0c6828b Merge pull request #15319 from Security-Onion-Solutions/2.4/dev
2.4.200
2025-12-16 11:10:30 -05:00
Jason Ertel
33ada95bbc Merge pull request #15167 from Security-Onion-Solutions/2.4/dev
2.4.190
2025-10-24 16:01:05 -04:00
26 changed files with 100 additions and 491 deletions

View File

@@ -33,7 +33,6 @@ body:
- 2.4.180 - 2.4.180
- 2.4.190 - 2.4.190
- 2.4.200 - 2.4.200
- 2.4.210
- Other (please provide detail below) - Other (please provide detail below)
validations: validations:
required: true required: true

View File

@@ -1 +1 @@
2.4.0-foxtrot 2.4.201

View File

@@ -129,7 +129,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
fi fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then

View File

@@ -60,7 +60,7 @@ so-elastalert:
- watch: - watch:
- file: elastaconf - file: elastaconf
- onlyif: - onlyif:
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #} - "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
delete_so-elastalert_so-status.disabled: delete_so-elastalert_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -5,7 +5,7 @@
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "9.0.2", "version": "8.18.1",
"requires_root": true "requires_root": true
}, },
"enabled": true, "enabled": true,

View File

@@ -21,7 +21,6 @@
'azure_application_insights.app_state': 'azure.app_state', 'azure_application_insights.app_state': 'azure.app_state',
'azure_billing.billing': 'azure.billing', 'azure_billing.billing': 'azure.billing',
'azure_functions.metrics': 'azure.function', 'azure_functions.metrics': 'azure.function',
'azure_ai_foundry.metrics': 'azure.ai_foundry',
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset', 'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
'azure_metrics.compute_vm': 'azure.compute_vm', 'azure_metrics.compute_vm': 'azure.compute_vm',
'azure_metrics.container_instance': 'azure.container_instance', 'azure_metrics.container_instance': 'azure.container_instance',

View File

@@ -86,7 +86,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list) latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
rm -f $INSTALLED_PACKAGE_LIST rm -f $INSTALLED_PACKAGE_LIST
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
while read -r package; do while read -r package; do
# get package details # get package details

View File

@@ -47,7 +47,7 @@ if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg KAFKACA "$KAFKACA" \ --arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \ --arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
@@ -67,7 +67,7 @@ elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\ --arg ENABLED_DISABLED "$ENABLED_DISABLED"\
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
--argjson HOSTS "$HOSTS" \ --argjson HOSTS "$HOSTS" \
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}' '{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n" echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"

View File

@@ -1,6 +1,6 @@
elasticsearch: elasticsearch:
enabled: false enabled: false
version: 9.0.8 version: 8.18.8
index_clean: true index_clean: true
config: config:
action: action:
@@ -857,11 +857,53 @@ elasticsearch:
composed_of: composed_of:
- agent-mappings - agent-mappings
- dtc-agent-mappings - dtc-agent-mappings
- base-mappings
- dtc-base-mappings
- client-mappings
- dtc-client-mappings
- container-mappings
- destination-mappings
- dtc-destination-mappings
- pb-override-destination-mappings
- dll-mappings
- dns-mappings
- dtc-dns-mappings
- ecs-mappings
- dtc-ecs-mappings
- error-mappings
- event-mappings
- dtc-event-mappings
- file-mappings
- dtc-file-mappings
- group-mappings
- host-mappings - host-mappings
- dtc-host-mappings - dtc-host-mappings
- http-mappings - http-mappings
- dtc-http-mappings - dtc-http-mappings
- log-mappings
- metadata-mappings - metadata-mappings
- network-mappings
- dtc-network-mappings
- observer-mappings
- dtc-observer-mappings
- organization-mappings
- package-mappings
- process-mappings
- dtc-process-mappings
- related-mappings
- rule-mappings
- dtc-rule-mappings
- server-mappings
- service-mappings
- dtc-service-mappings
- source-mappings
- dtc-source-mappings
- pb-override-source-mappings
- threat-mappings
- tls-mappings
- url-mappings
- user_agent-mappings
- dtc-user_agent-mappings
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
data_stream: data_stream:

View File

@@ -1,90 +1,9 @@
{ {
"description": "kratos", "description" : "kratos",
"processors": [ "processors" : [
{ {"set":{"field":"audience","value":"access","override":false,"ignore_failure":true}},
"set": { {"set":{"field":"event.dataset","ignore_empty_value":true,"ignore_failure":true,"value":"kratos.{{{audience}}}","media_type":"text/plain"}},
"field": "audience", {"set":{"field":"event.action","ignore_failure":true,"copy_from":"msg" }},
"value": "access", { "pipeline": { "name": "common" } }
"override": false, ]
"ignore_failure": true
}
},
{
"set": {
"field": "event.dataset",
"ignore_empty_value": true,
"ignore_failure": true,
"value": "kratos.{{{audience}}}",
"media_type": "text/plain"
}
},
{
"set": {
"field": "event.action",
"ignore_failure": true,
"copy_from": "msg"
}
},
{
"rename": {
"field": "http_request",
"target_field": "http.request",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http_response",
"target_field": "http.response",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.path",
"target_field": "http.uri",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.query",
"target_field": "http.query",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.headers.user-agent",
"target_field": "http.useragent",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "common"
}
}
]
} }

View File

@@ -15,7 +15,7 @@ set -e
if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then
docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt
docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem docker cp so-elasticsearchca:/etc/ssl/certs/ca-certificates.crt /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker rm so-elasticsearchca docker rm so-elasticsearchca
echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem

View File

@@ -75,7 +75,6 @@ kratosconfig:
- group: 928 - group: 928
- mode: 600 - mode: 600
- template: jinja - template: jinja
- show_changes: False
- defaults: - defaults:
KRATOSMERGED: {{ KRATOSMERGED }} KRATOSMERGED: {{ KRATOSMERGED }}

View File

@@ -46,7 +46,6 @@ kratos:
ui_url: https://URL_BASE/ ui_url: https://URL_BASE/
login: login:
ui_url: https://URL_BASE/login/ ui_url: https://URL_BASE/login/
lifespan: 60m
error: error:
ui_url: https://URL_BASE/login/ ui_url: https://URL_BASE/login/
registration: registration:

View File

@@ -182,10 +182,6 @@ kratos:
global: True global: True
advanced: True advanced: True
helpLink: kratos.html helpLink: kratos.html
lifespan:
description: Defines the duration that a login form will remain valid.
global: True
helpLink: kratos.html
error: error:
ui_url: ui_url:
description: User accessible URL containing the Security Onion login page. Leave as default to ensure proper operation. description: User accessible URL containing the Security Onion login page. Leave as default to ensure proper operation.

View File

@@ -63,7 +63,7 @@ logstash:
settings: settings:
lsheap: 500m lsheap: 500m
config: config:
api_x_http_x_host: 0.0.0.0 http_x_host: 0.0.0.0
path_x_logs: /var/log/logstash path_x_logs: /var/log/logstash
pipeline_x_workers: 1 pipeline_x_workers: 1
pipeline_x_batch_x_size: 125 pipeline_x_batch_x_size: 125

View File

@@ -5,10 +5,10 @@ input {
codec => es_bulk codec => es_bulk
request_headers_target_field => client_headers request_headers_target_field => client_headers
remote_host_target_field => client_host remote_host_target_field => client_host
ssl_enabled => true ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"] ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt" ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key" ssl_key => "/usr/share/logstash/filebeat.key"
ssl_client_authentication => "required" ssl_verify_mode => "peer"
} }
} }

View File

@@ -2,11 +2,11 @@ input {
elastic_agent { elastic_agent {
port => 5055 port => 5055
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ] tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ]
ssl_enabled => true ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"] ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt" ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt"
ssl_key => "/usr/share/logstash/elasticfleet-logstash.key" ssl_key => "/usr/share/logstash/elasticfleet-logstash.key"
ssl_client_authentication => "required" ssl_verify_mode => "force_peer"
ecs_compatibility => v8 ecs_compatibility => v8
} }
} }

View File

@@ -2,7 +2,7 @@ input {
elastic_agent { elastic_agent {
port => 5056 port => 5056
tags => [ "elastic-agent", "fleet-lumberjack-input" ] tags => [ "elastic-agent", "fleet-lumberjack-input" ]
ssl_enabled => true ssl => true
ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt" ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt"
ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key" ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key"
ecs_compatibility => v8 ecs_compatibility => v8

View File

@@ -8,8 +8,8 @@ output {
document_id => "%{[metadata][_id]}" document_id => "%{[metadata][_id]}"
index => "so-ip-mappings" index => "so-ip-mappings"
silence_errors_in_log => ["version_conflict_engine_exception"] silence_errors_in_log => ["version_conflict_engine_exception"]
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
else { else {
@@ -25,8 +25,8 @@ output {
document_id => "%{[metadata][_id]}" document_id => "%{[metadata][_id]}"
pipeline => "%{[metadata][pipeline]}" pipeline => "%{[metadata][pipeline]}"
silence_errors_in_log => ["version_conflict_engine_exception"] silence_errors_in_log => ["version_conflict_engine_exception"]
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
else { else {
@@ -37,8 +37,8 @@ output {
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
pipeline => "%{[metadata][pipeline]}" pipeline => "%{[metadata][pipeline]}"
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
} }
@@ -49,8 +49,8 @@ output {
data_stream => true data_stream => true
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
ssl_enabled => true ssl => true
ssl_verification_mode=> "none" ssl_certificate_verification => false
} }
} }
} }

View File

@@ -13,8 +13,8 @@ output {
user => "{{ ES_USER }}" user => "{{ ES_USER }}"
password => "{{ ES_PASS }}" password => "{{ ES_PASS }}"
index => "endgame-%{+YYYY.MM.dd}" index => "endgame-%{+YYYY.MM.dd}"
ssl_enabled => true ssl => true
ssl_verification_mode => "none" ssl_certificate_verification => false
} }
} }
} }

View File

@@ -56,7 +56,7 @@ logstash:
helpLink: logstash.html helpLink: logstash.html
global: False global: False
config: config:
api_x_http_x_host: http_x_host:
description: Host interface to listen to connections. description: Host interface to listen to connections.
helpLink: logstash.html helpLink: logstash.html
readonly: True readonly: True

View File

@@ -87,12 +87,6 @@ check_err() {
113) 113)
echo 'No route to host' echo 'No route to host'
;; ;;
160)
echo 'Incompatiable Elasticsearch upgrade'
;;
161)
echo 'Required intermediate Elasticsearch upgrade not complete'
;;
*) *)
echo 'Unhandled error' echo 'Unhandled error'
echo "$err_msg" echo "$err_msg"
@@ -433,7 +427,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180 [[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190 [[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
[[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200 [[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
[[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.210 [[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.201
true true
} }
@@ -466,7 +460,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180 [[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190 [[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200 [[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
[[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.210 [[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.201
true true
} }
@@ -623,6 +617,9 @@ post_to_2.4.180() {
} }
post_to_2.4.190() { post_to_2.4.190() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
# Only need to update import / eval nodes # Only need to update import / eval nodes
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
update_import_fleet_output update_import_fleet_output
@@ -650,15 +647,9 @@ post_to_2.4.200() {
POSTVERSION=2.4.200 POSTVERSION=2.4.200
} }
post_to_2.4.210() { post_to_2.4.201() {
echo "Rolling over Kratos index to apply new index template" echo "Nothing to apply"
POSTVERSION=2.4.201
rollover_index "logs-kratos-so"
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
POSTVERSION=2.4.210
} }
repo_sync() { repo_sync() {
@@ -922,7 +913,9 @@ up_to_2.4.180() {
} }
up_to_2.4.190() { up_to_2.4.190() {
echo "Nothing to do for 2.4.190" # Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
INSTALLEDVERSION=2.4.190 INSTALLEDVERSION=2.4.190
} }
@@ -935,13 +928,6 @@ up_to_2.4.200() {
INSTALLEDVERSION=2.4.200 INSTALLEDVERSION=2.4.200
} }
up_to_2.4.210() {
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
INSTALLEDVERSION=2.4.210
}
add_hydra_pillars() { add_hydra_pillars() {
mkdir -p /opt/so/saltstack/local/pillar/hydra mkdir -p /opt/so/saltstack/local/pillar/hydra
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
@@ -1338,6 +1324,12 @@ so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids
} }
up_to_2.4.201() {
echo "Nothing to do for 2.4.201"
INSTALLEDVERSION=2.4.201
}
determine_elastic_agent_upgrade() { determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap update_elastic_agent_airgap
@@ -1633,242 +1625,6 @@ verify_latest_update_script() {
fi fi
} }
verify_es_version_compatibility() {
local es_required_version_statefile="/opt/so/state/so_es_required_upgrade_version.txt"
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
# supported upgrade paths for SO-ES versions
declare -A es_upgrade_map=(
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
["8.17.3"]="8.18.4 8.18.6 8.18.8"
["8.18.4"]="8.18.6 8.18.8 9.0.8"
["8.18.6"]="8.18.8 9.0.8"
["8.18.8"]="9.0.8"
)
# Elasticsearch MUST upgrade through these versions
declare -A es_to_so_version=(
["8.18.8"]="2.4.190-20251024"
)
# Get current Elasticsearch version
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
else
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
exit 160
fi
if ! target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p'); then
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
exit 160
fi
# allow upgrade to version < 2.4.110 without checking ES version compatibility
return 0
fi
# if this statefile exists then we have done an intermediate upgrade and we need to ensure that ALL ES nodes have been upgraded to the version in the statefile before allowing soup to continue
if [[ -f "$es_required_version_statefile" ]]; then
# required so verification script should have already been created
if [[ ! -f "$es_verification_script" ]]; then
create_intermediate_upgrade_verification_script $es_verification_script
fi
# create script using version in statefile
local es_required_version_statefile_value=$(cat $es_required_version_statefile)
timeout --foreground 3600 bash "$es_verification_script" "$es_required_version_statefile_value" "$es_required_version_statefile"
if [[ $? -ne 0 ]]; then
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
exit 161
fi
fi
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
# supported upgrade
return 0
else
compatible_versions=${es_upgrade_map[$es_version]}
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
echo -e "\n##############################################################################################################################\n"
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
echo "${compatible_versions##* }" > "$es_required_version_statefile"
# We expect to upgrade to the latest compatiable minor version of ES
create_intermediate_upgrade_verification_script $es_verification_script
if [[ $is_airgap -eq 0 ]]; then
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***"
echo -e "\n##############################################################################################################################\n"
exit 160
else
# preserve BRANCH value if set originally
if [[ -n "$BRANCH" ]]; then
local originally_requested_so_version="$BRANCH"
else
local originally_requested_so_version="2.4/main"
fi
echo "Starting automated intermediate upgrade to $next_step_so_version."
echo "After completion, the system will automatically attempt to upgrade to the latest version."
echo -e "\n##############################################################################################################################\n"
exec bash -c "BRANCH=$next_step_so_version soup -y && BRANCH=$next_step_so_version soup -y && \
echo -e \"\n##############################################################################################################################\n\" && \
echo -e \"Verifying Elasticsearch was successfully upgraded to ${compatible_versions##* } across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n\" \
&& timeout --foreground 3600 bash /tmp/so_intermediate_upgrade_verification.sh ${compatible_versions##* } $es_required_version_statefile && \
echo -e \"\n##############################################################################################################################\n\" \
&& BRANCH=$originally_requested_so_version soup -y && BRANCH=$originally_requested_so_version soup -y"
fi
fi
}
create_intermediate_upgrade_verification_script() {
# After an intermediate upgrade, verify that ALL nodes running Elasticsearch are at the expected version BEFORE proceeding to the next upgrade step. This is a CRITICAL step
local verification_script="$1"
cat << 'EOF' > "$verification_script"
#!/bin/bash
SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE="/root/so_intermediate_upgrade_verification_failures.log"
CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
EXPECTED_ES_VERSION="$1"
if [[ -z "$EXPECTED_ES_VERSION" ]]; then
echo -e "\nExpected Elasticsearch version not provided. Usage: $0 <expected_es_version>"
exit 1
fi
if [[ -f "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" ]]; then
mv "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE.$CURRENT_TIME"
fi
check_heavynodes_es_version() {
# Check if heavynodes are in this grid
if ! salt-key -l accepted | grep -q 'heavynode$'; then
# No heavynodes, skip version check
echo "No heavynodes detected in this Security Onion deployment. Skipping heavynode Elasticsearch version verification."
return 0
fi
echo -e "\nOne or more heavynodes detected. Verifying their Elasticsearch versions."
local retries=20
local retry_count=0
local delay=180
local success=1
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
heavynode_versions=$(salt -C 'G@role:so-heavynode' cmd.run 'so-elasticsearch-query / --retry 3 --retry-delay 10 | jq ".version.number"' shell=/bin/bash --out=json 2>&1)
local exit_status=$?
# Check that all heavynodes returned good data
if [[ $exit_status -ne 0 ]]; then
echo "Failed to retrieve Elasticsearch version from one or more heavynodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
else
if echo "$heavynode_versions" | jq -s --arg expected "\"$EXPECTED_ES_VERSION\"" --exit-status 'all(.[]; . | to_entries | all(.[]; .value == $expected))' > /dev/null; then
echo -e "\nAll heavynodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
return 0
else
echo "One or more heavynodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
fi
fi
done
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "One or more heavynodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
echo "Current versions:"
echo "$heavynode_versions" | jq -s 'add'
echo "$heavynode_versions" | jq -s 'add' >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
echo -e "\n Stopping automatic upgrade to latest Security Onion version. Heavynodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
echo -e "\n Heavynodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a long time depending on network link between Manager and Heavynodes."
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
return 1
}
check_searchnodes_es_version() {
local retries=20
local retry_count=0
local delay=180
local success=1
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
cluster_versions=$(so-elasticsearch-query _nodes/_all/version --retry 5 --retry-delay 10 2>&1)
local exit_status=$?
if [[ $exit_status -ne 0 ]]; then
echo "Failed to retrieve Elasticsearch versions from searchnodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
else
if echo "$cluster_versions" | jq --arg expected "$EXPECTED_ES_VERSION" --exit-status '.nodes | to_entries | all(.[].value.version; . == $expected)' > /dev/null; then
echo "All Searchnodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
return 0
else
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
fi
fi
done
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
echo "Current versions:"
echo "$cluster_versions" | jq '.nodes | to_entries | map({(.value.name): .value.version}) | sort | add'
echo "$cluster_versions" >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
echo -e "\nStopping automatic upgrade to latest version. Searchnodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
echo -e "\nSearchnodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a while depending on cluster size / network link between Manager and Searchnodes."
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "$cluster_versions" > "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
return 1
}
# Need to add a check for heavynodes and ensure all heavynodes get their own "cluster" upgraded before moving on to final upgrade.
check_searchnodes_es_version || exit 1
check_heavynodes_es_version || exit 1
# Remove required version state file after successful verification
rm -f "$2"
exit 0
EOF
}
# Keeping this block in case we need to do a hotfix that requires salt update # Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() { apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
@@ -1965,8 +1721,6 @@ main() {
echo "Verifying we have the latest soup script." echo "Verifying we have the latest soup script."
verify_latest_update_script verify_latest_update_script
verify_es_version_compatibility
echo "Let's see if we need to update Security Onion." echo "Let's see if we need to update Security Onion."
upgrade_check upgrade_check
upgrade_space upgrade_space

View File

@@ -1,91 +0,0 @@
Onion AI Session Report
==========================
## Session Details
**Session ID:** {{.Session.SessionId}}
**Title:** {{.Session.Title}}
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Session.CreateTime}}
**Updated:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Session.UpdateTime}}
{{ if .Session.DeleteTime }}
**Deleted:** {{ formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Session.DeleteTime}}
{{ end }}
**User ID:** {{getUserDetail "email" .Session.UserId}}
## Session Usage
**Total Input Tokens** {{.Session.Usage.TotalInputTokens}}
**Total Output Tokens** {{.Session.Usage.TotalOutputTokens}}
**Total Credits:** {{.Session.Usage.TotalCredits}}
**Total Messages:** {{.Session.Usage.TotalMessages}}
## Messages
{{ range $index, $msg := sortAssistantMessages "CreateTime" "asc" .History }}
#### Message {{ add $index 1 }}
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $msg.CreateTime}}
**User ID:** {{getUserDetail "email" $msg.UserId}}
**Role:** {{$msg.Message.Role}}
{{ range $i, $block := $msg.Message.ContentBlocks }}
---
{{ if eq $block.Type "text" }}
**Text:** {{ stripEmoji $block.Text }}
{{ else if eq $block.Type "tool_use" }}
**Tool:** {{ $block.Name }}
{{ if $block.Input }}
**Parameters:**
{{ range $key, $value := parseJSON $block.Input }}
{{ if eq $key "limit" }}- {{ $key }}: {{ $value }}
{{ else }}- {{ $key }}: "{{ $value }}"
{{ end }}{{ end }}{{ end }}
{{ else if $block.ToolResult }}
**Tool Result:**
{{ if $block.ToolResult.Content }}
{{ range $j, $contentBlock := $block.ToolResult.Content }}
{{ if gt $j 0 }}
---
{{ end }}
{{ if $contentBlock.Text }}
{{ if $block.ToolResult.IsError }}
**Error:** {{ $contentBlock.Text }}
{{ else }}
{{ $contentBlock.Text }}
{{ end }}
{{ else if $contentBlock.Json }}
```json
{{ toJSON $contentBlock.Json }}
```
{{ end }}{{ end }}
{{ end }}{{ end }}{{ end }}
{{ if eq $msg.Message.Role "assistant" }}{{ if $msg.Message.Usage }}
---
**Message Usage:**
- Input Tokens: {{$msg.Message.Usage.InputTokens}}
- Output Tokens: {{$msg.Message.Usage.OutputTokens}}
- Credits: {{$msg.Message.Usage.Credits}}
{{end}}{{end}}
---
{{end}}

View File

@@ -357,7 +357,7 @@ sensoroni:
reports: reports:
standard: standard:
case_report__md: case_report__md:
title: Case Report Template title: Case report Template
description: The template used when generating a case report. Supports markdown format. description: The template used when generating a case report. Supports markdown format.
file: True file: True
global: True global: True
@@ -370,13 +370,6 @@ sensoroni:
global: True global: True
syntax: md syntax: md
helpLink: reports.html helpLink: reports.html
assistant_session_report__md:
title: Assistant Session Report Template
description: The template used when generating an assistant session report. Supports markdown format.
file: True
global: True
syntax: md
helplink: reports.html
custom: custom:
generic_report1__md: generic_report1__md:
title: Custom Report 1 title: Custom Report 1

View File

@@ -115,16 +115,16 @@ soc:
':kratos:': ':kratos:':
- soc_timestamp - soc_timestamp
- event.dataset - event.dataset
- http.request.headers.x-real-ip - http_request.headers.x-real-ip
- user.name - user.name
- http.useragent - http_request.headers.user-agent
- msg - msg
':hydra:': ':hydra:':
- soc_timestamp - soc_timestamp
- event.dataset - event.dataset
- http.request.headers.x-real-ip - http_request.headers.x-real-ip
- user.name - user.name
- http.useragent - http_request.headers.user-agent
- msg - msg
'::conn': '::conn':
- soc_timestamp - soc_timestamp
@@ -1747,7 +1747,7 @@ soc:
showSubtitle: true showSubtitle: true
- name: SOC - Auth - name: SOC - Auth
description: Users authenticated to SOC grouped by IP address and identity description: Users authenticated to SOC grouped by IP address and identity
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http.request.headers.x-real-ip user.name' query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip user.name'
showSubtitle: true showSubtitle: true
- name: SOC - App - name: SOC - App
description: Logs generated by the Security Onion Console (SOC) server and modules description: Logs generated by the Security Onion Console (SOC) server and modules
@@ -2027,10 +2027,10 @@ soc:
query: '* | groupby event.category | groupby -sankey event.category event.module | groupby event.module | groupby -sankey event.module event.dataset | groupby event.dataset | groupby observer.name | groupby host.name | groupby source.ip | groupby destination.ip | groupby destination.port' query: '* | groupby event.category | groupby -sankey event.category event.module | groupby event.module | groupby -sankey event.module event.dataset | groupby event.dataset | groupby observer.name | groupby host.name | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: SOC Logins - name: SOC Logins
description: SOC (Security Onion Console) logins description: SOC (Security Onion Console) logins
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http.request.headers.x-real-ip | groupby -sankey http.request.headers.x-real-ip user.name | groupby user.name | groupby http.useragent' query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip user.name | groupby user.name | groupby http_request.headers.user-agent'
- name: SOC Login Failures - name: SOC Login Failures
description: SOC (Security Onion Console) login failures description: SOC (Security Onion Console) login failures
query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby user.name | groupby http.request.headers.x-real-ip | groupby -sankey http.request.headers.x-real-ip http.useragent | groupby http.useragent' query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby user.name | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip http_request.headers.user-agent | groupby http_request.headers.user-agent'
- name: Alerts - name: Alerts
description: Overview of all alerts description: Overview of all alerts
query: 'tags:alert | groupby event.module* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby event.severity | groupby destination.as.organization.name' query: 'tags:alert | groupby event.module* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby event.severity | groupby destination.as.organization.name'

View File

@@ -652,6 +652,7 @@ soc:
assistant: assistant:
apiUrl: apiUrl:
description: The URL of the AI gateway. description: The URL of the AI gateway.
advanced: True
global: True global: True
healthTimeoutSeconds: healthTimeoutSeconds:
description: Timeout in seconds for the Onion AI health check. description: Timeout in seconds for the Onion AI health check.