Compare commits

..

82 Commits

Author SHA1 Message Date
Matthew Wright
c99dd4e44f Merge pull request #15367 from Security-Onion-Solutions/mwright/assistant-case-reports 2026-01-08 15:33:53 -05:00
Jorge Reyes
541b8b288d Merge pull request #15363 from Security-Onion-Solutions/reyesj2/elastic9-autosoup
ES 9.0.8
2026-01-08 14:19:19 -06:00
Matthew Wright
db168a0452 update case report for attached ai sessions 2026-01-08 13:59:51 -05:00
reyesj2
aa96cf44d4 increase timeout commands timeout to account for time taken by salt minions to return data.
add note informing user a previously required ES upgrade was detected and being verified before soup continues
2026-01-07 19:26:46 -06:00
reyesj2
0d59c35d2a phrasing/typo 2026-01-07 19:20:27 -06:00
reyesj2
8463bde90d dont capture stderr from salt command failure 'ERROR: Minions returned with non-zero exit code' 2026-01-07 19:19:26 -06:00
reyesj2
150c31009e make sure so-elasticsearch-query exits non-zero on failure 2026-01-07 19:18:20 -06:00
reyesj2
ee66d6c7d1 Merge branch 'reyesj2/elastic9-autosoup' of github.com:Security-Onion-Solutions/securityonion into reyesj2/elastic9-autosoup 2026-01-07 14:50:21 -06:00
reyesj2
3effd30f7e unused var 2026-01-07 14:49:19 -06:00
Jorge Reyes
c075b5a1a7 Merge branch '2.4/dev' into reyesj2/elastic9-autosoup 2026-01-07 10:33:25 -06:00
reyesj2
cb1e59fa49 Merge branch '2.4/dev' of github.com:Security-Onion-Solutions/securityonion into reyesj2/elastic9-autosoup 2026-01-07 10:30:45 -06:00
reyesj2
588aa435ec update version 2026-01-07 10:21:36 -06:00
reyesj2
752c764066 autosoup preserve branch setting if set originally 2026-01-07 10:03:46 -06:00
reyesj2
af604c2ea8 autosoup functionality for non-airgap 2026-01-07 09:45:26 -06:00
Matthew Wright
605797c86a Merge pull request #15355 from Security-Onion-Solutions/mwright/session-reports
Assistant: Session Report Template
2026-01-06 13:58:18 -05:00
Jason Ertel
1ee5b1611a Merge pull request #15359 from Security-Onion-Solutions/jertel/wip
suppress config diffs to avoid false positive errors
2026-01-06 12:52:59 -05:00
Jason Ertel
5028729e4c suppress config diffs to avoid false positive errors 2026-01-06 12:50:18 -05:00
Jason Ertel
ab00fa8809 Merge pull request #15358 from Security-Onion-Solutions/jertel/wip
exempt kratos online check
2026-01-06 09:50:03 -05:00
Jason Ertel
2d705e7caa exempt kratos online check 2026-01-06 09:47:35 -05:00
Jorge Reyes
3b349b9803 Merge pull request #15353 from Security-Onion-Solutions/reyesj2/kratos
update kratos index template
2026-01-05 14:56:08 -06:00
reyesj2
f2b7ffe0eb align with ECS fieldnames 2026-01-05 14:48:10 -06:00
Matthew Wright
3a410eed1a assistant session reports 2026-01-05 14:45:02 -05:00
reyesj2
a53619f10f update kratos index template 2026-01-05 12:22:01 -06:00
reyesj2
893aaafa1b foxtrot 2025-12-29 15:54:06 -06:00
reyesj2
33c34cdeca Merge branch '2.4/dev' of github.com:Security-Onion-Solutions/securityonion into reyesj2/elastic9-autosoup 2025-12-29 15:49:49 -06:00
reyesj2
9b411867df update version 2025-12-29 10:27:38 -06:00
Jason Ertel
fd1596b3a0 Merge pull request #15347 from Security-Onion-Solutions/jertel/wip
expose login form lifespan in config scr
2025-12-24 15:09:36 -05:00
Jason Ertel
b05de22f58 expose login form lifespan in config scr 2025-12-24 14:39:55 -05:00
reyesj2
f666ad600f accept same version 'upgrades' 2025-12-23 16:27:22 -06:00
reyesj2
9345718967 verify pre-soup ES version is directly upgradable to post-soup ES version. 2025-12-19 16:15:05 -06:00
reyesj2
6c879cbd13 soup changes 2025-12-17 19:08:21 -06:00
reyesj2
089b5aaf44 Merge branch 'reyesj2/elastic9' of github.com:Security-Onion-Solutions/securityonion into reyesj2/elastic9 2025-12-17 16:03:18 -06:00
reyesj2
b61885add5 Fix Kafka output policy - singular topic key 2025-12-17 16:03:12 -06:00
Jorge Reyes
5cb1e284af Update VERSION 2025-12-17 06:54:32 -06:00
reyesj2
e3a4f0873e update expected version for elastalert state 2025-12-17 06:53:08 -06:00
reyesj2
7977a020ac elasticsearch 9.0.8 2025-12-16 16:03:47 -06:00
coreyogburn
1d63269883 Merge pull request #15323 from Security-Onion-Solutions/cogburn/non-advanced-apiurl
Un-Advanced Assistant ApiUrl
2025-12-16 12:08:14 -07:00
Corey Ogburn
dd8027480b Un-Advanced Assistant ApiUrl 2025-12-16 12:02:01 -07:00
Mike Reeves
c45bd77e44 Merge pull request #15320 from Security-Onion-Solutions/TOoSmOotH-patch-1
Update VERSION
2025-12-16 11:25:35 -05:00
Mike Reeves
032e0abd61 Update 2-4.yml 2025-12-16 11:23:53 -05:00
Mike Reeves
8509d1e454 Update VERSION 2025-12-16 11:23:12 -05:00
Mike Reeves
ddd6935e50 Merge pull request #15318 from Security-Onion-Solutions/2.4.200
2.4.200
2025-12-16 09:15:32 -05:00
Mike Reeves
5588a56b24 2.4.200 2025-12-16 09:07:29 -05:00
Mike Reeves
12aed6e280 Merge pull request #15311 from Security-Onion-Solutions/TOoSmOotH-patch-5
Update so-minion
2025-12-15 12:07:37 -05:00
Mike Reeves
b2a469e08c Update so-minion 2025-12-15 11:56:23 -05:00
Jason Ertel
285b0e4af9 Merge pull request #15308 from Security-Onion-Solutions/idstools-refactor
Add trailing nl if it doesnt already exist
2025-12-14 15:35:24 -05:00
DefensiveDepth
f9edfd6391 Add trailing nl if it doesnt already exist 2025-12-14 12:03:44 -05:00
Josh Patterson
f6301bc3e5 Merge pull request #15304 from Security-Onion-Solutions/ggjorge
fix cleaning repos on remote nodes if airgap
2025-12-12 14:22:21 -05:00
Josh Patterson
6c5c176b7d fix cleaning repos on remote nodes if airgap 2025-12-12 14:18:54 -05:00
Josh Brower
c6d52b5eb1 Merge pull request #15303 from Security-Onion-Solutions/idstools-refactor
Add Airgap check
2025-12-12 09:59:19 -05:00
DefensiveDepth
7cac528389 Add Airgap check 2025-12-12 09:52:01 -05:00
reyesj2
d518f75468 update deprecated config items 2025-12-11 20:07:06 -06:00
Josh Brower
6fe817ca4a Merge pull request #15301 from Security-Onion-Solutions/idstools-refactor
Rework backup
2025-12-11 13:57:25 -05:00
DefensiveDepth
cb9a6fac25 Update tests for rework 2025-12-11 12:14:37 -05:00
DefensiveDepth
a945768251 Refactor backup 2025-12-11 11:15:30 -05:00
Mike Reeves
c6646e3821 Merge pull request #15289 from Security-Onion-Solutions/TOoSmOotH-patch-3
Update Assistant Models
2025-12-10 17:22:13 -05:00
Mike Reeves
99dc72cece Merge branch '2.4/dev' into TOoSmOotH-patch-3 2025-12-10 17:19:32 -05:00
Josh Brower
04d6cca204 Merge pull request #15298 from Security-Onion-Solutions/idstools-refactor
Fixup logic
2025-12-10 17:18:59 -05:00
DefensiveDepth
5ab6bda639 Fixup logic 2025-12-10 17:16:35 -05:00
Josh Brower
f433de7e12 Merge pull request #15297 from Security-Onion-Solutions/idstools-refactor
small fixes
2025-12-10 15:23:12 -05:00
DefensiveDepth
8ef6c2f91d small fixes 2025-12-10 15:19:44 -05:00
Mike Reeves
7575218697 Merge pull request #15293 from Security-Onion-Solutions/TOoSmOotH-patch-4
Remove Claude Sonnet 4 model configuration
2025-12-09 11:04:38 -05:00
Mike Reeves
dc945dad00 Remove Claude Sonnet 4 model configuration
Removed configuration for Claude Sonnet 4 model.
2025-12-09 11:00:53 -05:00
Josh Brower
ddcd74ffd2 Merge pull request #15292 from Security-Onion-Solutions/idstools-refactor
Fix custom name
2025-12-09 10:12:41 -05:00
DefensiveDepth
e105bd12e6 Fix custom name 2025-12-09 09:49:27 -05:00
Josh Brower
f5688175b6 Merge pull request #15290 from Security-Onion-Solutions/idstools-refactor
match correct custom ruleset name
2025-12-08 18:25:46 -05:00
DefensiveDepth
72a4ba405f match correct custom ruleset name 2025-12-08 16:45:40 -05:00
Mike Reeves
94694d394e Add origin field to model training configuration 2025-12-08 16:36:09 -05:00
Mike Reeves
03dd746601 Add origin field to model configurations 2025-12-08 16:34:19 -05:00
Mike Reeves
eec3373ae7 Update display name for Claude Sonnet 4 2025-12-08 16:30:50 -05:00
Mike Reeves
db45ce07ed Modify model display names and remove GPT-OSS 120B
Updated display names for models and removed GPT-OSS 120B.
2025-12-08 16:26:45 -05:00
Josh Brower
ba49765312 Merge pull request #15287 from Security-Onion-Solutions/idstools-refactor
Rework ordering
2025-12-08 12:42:48 -05:00
DefensiveDepth
72c8c2371e Rework ordering 2025-12-08 12:39:30 -05:00
Josh Brower
80411ab6cf Merge pull request #15286 from Security-Onion-Solutions/idstools-refactor
be more verbose
2025-12-08 10:31:39 -05:00
DefensiveDepth
0ff8fa57e7 be more verbose 2025-12-08 10:29:24 -05:00
Josh Brower
411f28a049 Merge pull request #15284 from Security-Onion-Solutions/idstools-refactor
Make sure local salt dir is created
2025-12-07 17:49:56 -05:00
DefensiveDepth
0f42233092 Make sure local salt dir is created 2025-12-07 16:13:55 -05:00
Josh Brower
2dd49f6d9b Merge pull request #15283 from Security-Onion-Solutions/idstools-refactor
Fixup Airgap
2025-12-06 16:06:57 -05:00
DefensiveDepth
271f545f4f Fixup Airgap 2025-12-06 15:26:44 -05:00
Josh Brower
c4a70b540e Merge pull request #15232 from Security-Onion-Solutions/idstools-refactor
Idstools refactor
2025-12-05 12:58:10 -05:00
DefensiveDepth
bef85772e3 Merge branch 'idstools-refactor' of https://github.com/Security-Onion-Solutions/securityonion into idstools-refactor 2025-12-05 12:17:06 -05:00
DefensiveDepth
a6b19c4a6c Remove idstools config from manager pillar file 2025-12-05 12:13:05 -05:00
36 changed files with 756 additions and 235 deletions

View File

@@ -33,6 +33,7 @@ body:
- 2.4.180
- 2.4.190
- 2.4.200
- 2.4.210
- Other (please provide detail below)
validations:
required: true

View File

@@ -1,17 +1,17 @@
### 2.4.190-20251024 ISO image released on 2025/10/24
### 2.4.200-20251216 ISO image released on 2025/12/16
### Download and Verify
2.4.190-20251024 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
2.4.200-20251216 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.200-20251216.iso
MD5: 25358481FB876226499C011FC0710358
SHA1: 0B26173C0CE136F2CA40A15046D1DFB78BCA1165
SHA256: 4FD9F62EDA672408828B3C0C446FE5EA9FF3C4EE8488A7AB1101544A3C487872
MD5: 07B38499952D1F2FD7B5AF10096D0043
SHA1: 7F3A26839CA3CAEC2D90BB73D229D55E04C7D370
SHA256: 8D3AC735873A2EA8527E16A6A08C34BD5018CBC0925AC4096E15A0C99F591D5F
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.200-20251216.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.200-20251216.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.200-20251216.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.190-20251024.iso.sig securityonion-2.4.190-20251024.iso
gpg --verify securityonion-2.4.200-20251216.iso.sig securityonion-2.4.200-20251216.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Thu 23 Oct 2025 07:21:46 AM EDT using RSA key ID FE507013
gpg: Signature made Mon 15 Dec 2025 05:24:11 PM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.4.200
2.4.210

View File

@@ -129,6 +129,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then

View File

@@ -60,7 +60,7 @@ so-elastalert:
- watch:
- file: elastaconf
- onlyif:
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #}
delete_so-elastalert_so-status.disabled:
file.uncomment:

View File

@@ -5,7 +5,7 @@
"package": {
"name": "endpoint",
"title": "Elastic Defend",
"version": "8.18.1",
"version": "9.0.2",
"requires_root": true
},
"enabled": true,

View File

@@ -21,6 +21,7 @@
'azure_application_insights.app_state': 'azure.app_state',
'azure_billing.billing': 'azure.billing',
'azure_functions.metrics': 'azure.function',
'azure_ai_foundry.metrics': 'azure.ai_foundry',
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
'azure_metrics.compute_vm': 'azure.compute_vm',
'azure_metrics.container_instance': 'azure.container_instance',

View File

@@ -86,7 +86,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
rm -f $INSTALLED_PACKAGE_LIST
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
while read -r package; do
# get package details

View File

@@ -47,7 +47,7 @@ if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
)
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
@@ -67,7 +67,7 @@ elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://l
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
--argjson HOSTS "$HOSTS" \
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
)
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"

View File

@@ -1,6 +1,6 @@
elasticsearch:
enabled: false
version: 8.18.8
version: 9.0.8
index_clean: true
config:
action:
@@ -299,6 +299,19 @@ elasticsearch:
hot:
actions: {}
min_age: 0ms
sos-backup:
index_sorting: false
index_template:
composed_of: []
ignore_missing_component_templates: []
index_patterns:
- sos-backup-*
priority: 501
template:
settings:
index:
number_of_replicas: 0
number_of_shards: 1
so-assistant-chat:
index_sorting: false
index_template:
@@ -844,53 +857,11 @@ elasticsearch:
composed_of:
- agent-mappings
- dtc-agent-mappings
- base-mappings
- dtc-base-mappings
- client-mappings
- dtc-client-mappings
- container-mappings
- destination-mappings
- dtc-destination-mappings
- pb-override-destination-mappings
- dll-mappings
- dns-mappings
- dtc-dns-mappings
- ecs-mappings
- dtc-ecs-mappings
- error-mappings
- event-mappings
- dtc-event-mappings
- file-mappings
- dtc-file-mappings
- group-mappings
- host-mappings
- dtc-host-mappings
- http-mappings
- dtc-http-mappings
- log-mappings
- metadata-mappings
- network-mappings
- dtc-network-mappings
- observer-mappings
- dtc-observer-mappings
- organization-mappings
- package-mappings
- process-mappings
- dtc-process-mappings
- related-mappings
- rule-mappings
- dtc-rule-mappings
- server-mappings
- service-mappings
- dtc-service-mappings
- source-mappings
- dtc-source-mappings
- pb-override-source-mappings
- threat-mappings
- tls-mappings
- url-mappings
- user_agent-mappings
- dtc-user_agent-mappings
- common-settings
- common-dynamic-mappings
data_stream:

View File

@@ -1,9 +1,90 @@
{
"description" : "kratos",
"processors" : [
{"set":{"field":"audience","value":"access","override":false,"ignore_failure":true}},
{"set":{"field":"event.dataset","ignore_empty_value":true,"ignore_failure":true,"value":"kratos.{{{audience}}}","media_type":"text/plain"}},
{"set":{"field":"event.action","ignore_failure":true,"copy_from":"msg" }},
{ "pipeline": { "name": "common" } }
]
"description": "kratos",
"processors": [
{
"set": {
"field": "audience",
"value": "access",
"override": false,
"ignore_failure": true
}
},
{
"set": {
"field": "event.dataset",
"ignore_empty_value": true,
"ignore_failure": true,
"value": "kratos.{{{audience}}}",
"media_type": "text/plain"
}
},
{
"set": {
"field": "event.action",
"ignore_failure": true,
"copy_from": "msg"
}
},
{
"rename": {
"field": "http_request",
"target_field": "http.request",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http_response",
"target_field": "http.response",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.path",
"target_field": "http.uri",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.method",
"target_field": "http.method",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.query",
"target_field": "http.query",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"rename": {
"field": "http.request.headers.user-agent",
"target_field": "http.useragent",
"ignore_failure": true,
"ignore_missing": true
}
},
{
"pipeline": {
"name": "common"
}
}
]
}

View File

@@ -15,7 +15,7 @@ set -e
if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then
docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt
docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts
docker cp so-elasticsearchca:/etc/ssl/certs/ca-certificates.crt /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
docker rm so-elasticsearchca
echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem
echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem

View File

@@ -75,6 +75,7 @@ kratosconfig:
- group: 928
- mode: 600
- template: jinja
- show_changes: False
- defaults:
KRATOSMERGED: {{ KRATOSMERGED }}

View File

@@ -46,6 +46,7 @@ kratos:
ui_url: https://URL_BASE/
login:
ui_url: https://URL_BASE/login/
lifespan: 60m
error:
ui_url: https://URL_BASE/login/
registration:

View File

@@ -182,6 +182,10 @@ kratos:
global: True
advanced: True
helpLink: kratos.html
lifespan:
description: Defines the duration that a login form will remain valid.
global: True
helpLink: kratos.html
error:
ui_url:
description: User accessible URL containing the Security Onion login page. Leave as default to ensure proper operation.

View File

@@ -63,7 +63,7 @@ logstash:
settings:
lsheap: 500m
config:
http_x_host: 0.0.0.0
api_x_http_x_host: 0.0.0.0
path_x_logs: /var/log/logstash
pipeline_x_workers: 1
pipeline_x_batch_x_size: 125

View File

@@ -5,10 +5,10 @@ input {
codec => es_bulk
request_headers_target_field => client_headers
remote_host_target_field => client_host
ssl => true
ssl_enabled => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key"
ssl_verify_mode => "peer"
ssl_client_authentication => "required"
}
}

View File

@@ -2,11 +2,11 @@ input {
elastic_agent {
port => 5055
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname }}" ]
ssl => true
ssl_enabled => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/elasticfleet-logstash.crt"
ssl_key => "/usr/share/logstash/elasticfleet-logstash.key"
ssl_verify_mode => "force_peer"
ssl_client_authentication => "required"
ecs_compatibility => v8
}
}

View File

@@ -2,7 +2,7 @@ input {
elastic_agent {
port => 5056
tags => [ "elastic-agent", "fleet-lumberjack-input" ]
ssl => true
ssl_enabled => true
ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt"
ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key"
ecs_compatibility => v8

View File

@@ -8,8 +8,8 @@ output {
document_id => "%{[metadata][_id]}"
index => "so-ip-mappings"
silence_errors_in_log => ["version_conflict_engine_exception"]
ssl => true
ssl_certificate_verification => false
ssl_enabled => true
ssl_verification_mode => "none"
}
}
else {
@@ -25,8 +25,8 @@ output {
document_id => "%{[metadata][_id]}"
pipeline => "%{[metadata][pipeline]}"
silence_errors_in_log => ["version_conflict_engine_exception"]
ssl => true
ssl_certificate_verification => false
ssl_enabled => true
ssl_verification_mode => "none"
}
}
else {
@@ -37,8 +37,8 @@ output {
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
pipeline => "%{[metadata][pipeline]}"
ssl => true
ssl_certificate_verification => false
ssl_enabled => true
ssl_verification_mode => "none"
}
}
}
@@ -49,8 +49,8 @@ output {
data_stream => true
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
ssl => true
ssl_certificate_verification => false
ssl_enabled => true
ssl_verification_mode=> "none"
}
}
}

View File

@@ -13,8 +13,8 @@ output {
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
index => "endgame-%{+YYYY.MM.dd}"
ssl => true
ssl_certificate_verification => false
ssl_enabled => true
ssl_verification_mode => "none"
}
}
}

View File

@@ -56,7 +56,7 @@ logstash:
helpLink: logstash.html
global: False
config:
http_x_host:
api_x_http_x_host:
description: Host interface to listen to connections.
helpLink: logstash.html
readonly: True

View File

@@ -214,7 +214,7 @@ git_config_set_safe_dirs:
surinsmrulesdir:
file.directory:
- name: /nsm/rules/suricata
- name: /nsm/rules/suricata/etopen
- user: 939
- group: 939
- makedirs: True

View File

@@ -133,7 +133,7 @@ function getinstallinfo() {
return 1
fi
source <(echo $INSTALLVARS)
export $(echo "$INSTALLVARS" | xargs)
if [ $? -ne 0 ]; then
log "ERROR" "Failed to source install variables"
return 1

View File

@@ -87,6 +87,12 @@ check_err() {
113)
echo 'No route to host'
;;
160)
echo 'Incompatiable Elasticsearch upgrade'
;;
161)
echo 'Required intermediate Elasticsearch upgrade not complete'
;;
*)
echo 'Unhandled error'
echo "$err_msg"
@@ -427,6 +433,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
[[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190
[[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200
[[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.210
true
}
@@ -459,6 +466,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
[[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190
[[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200
[[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.210
true
}
@@ -615,9 +623,6 @@ post_to_2.4.180() {
}
post_to_2.4.190() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
# Only need to update import / eval nodes
if [[ "$MINION_ROLE" == "import" ]] || [[ "$MINION_ROLE" == "eval" ]]; then
update_import_fleet_output
@@ -645,6 +650,17 @@ post_to_2.4.200() {
POSTVERSION=2.4.200
}
post_to_2.4.210() {
echo "Rolling over Kratos index to apply new index template"
rollover_index "logs-kratos-so"
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
POSTVERSION=2.4.210
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -906,9 +922,7 @@ up_to_2.4.180() {
}
up_to_2.4.190() {
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
echo "Nothing to do for 2.4.190"
INSTALLEDVERSION=2.4.190
}
@@ -921,6 +935,13 @@ up_to_2.4.200() {
INSTALLEDVERSION=2.4.200
}
up_to_2.4.210() {
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
INSTALLEDVERSION=2.4.210
}
add_hydra_pillars() {
mkdir -p /opt/so/saltstack/local/pillar/hydra
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls
@@ -1113,47 +1134,47 @@ suricata_idstools_removal_pre() {
install -d -o 939 -g 939 -m 755 /opt/so/conf/soc/fingerprints
install -o 939 -g 939 -m 644 /dev/null /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF
Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs
Suricata ruleset sync is blocked until this file is removed. **CRITICAL** Make sure that you have manually added any custom Suricata rulesets via SOC config before removing this file - review the documentation for more details: https://docs.securityonion.net/en/2.4/nids.html#sync-block
EOF
# Remove possible symlink & create salt local rules dir
[ -L /opt/so/saltstack/local/salt/suricata/rules ] && rm -f /opt/so/saltstack/local/salt/suricata/rules
install -d -o 939 -g 939 /opt/so/saltstack/local/salt/suricata/rules/ || echo "Failed to create Suricata local rules directory"
# Backup custom rules & overrides
mkdir -p /nsm/backup/detections-migration/2-4-200
cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200
cp /opt/so/conf/idstools/etc/rulecat.conf /nsm/backup/detections-migration/2-4-200
if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then
python3 /opt/so/conf/soc/so-detections-backup.py
# Backup so-detection index via reindex
echo "Creating sos-backup index template..."
template_result=$(/sbin/so-elasticsearch-query '_index_template/sos-backup' -X PUT \
--retry 5 --retry-delay 15 --retry-all-errors \
-d '{"index_patterns":["sos-backup-*"],"priority":501,"template":{"settings":{"index":{"number_of_replicas":0,"number_of_shards":1}}}}')
# Verify backup by comparing counts
echo "Verifying detection overrides backup..."
es_override_count=$(/sbin/so-elasticsearch-query 'so-detection/_count' \
-d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') || {
echo " Error: Failed to query Elasticsearch for override count"
exit 1
}
if [[ -z "$template_result" ]] || ! echo "$template_result" | jq -e '.acknowledged == true' > /dev/null 2>&1; then
echo "Error: Failed to create sos-backup index template"
echo "$template_result"
exit 1
fi
if [[ ! "$es_override_count" =~ ^[0-9]+$ ]]; then
echo " Error: Invalid override count from Elasticsearch: '$es_override_count'"
exit 1
fi
BACKUP_INDEX="sos-backup-detection-$(date +%Y%m%d-%H%M%S)"
echo "Backing up so-detection index to $BACKUP_INDEX..."
reindex_result=$(/sbin/so-elasticsearch-query '_reindex?wait_for_completion=true' \
--retry 5 --retry-delay 15 --retry-all-errors \
-X POST -d "{\"source\": {\"index\": \"so-detection\"}, \"dest\": {\"index\": \"$BACKUP_INDEX\"}}")
backup_override_count=$(find /nsm/backup/detections/repo/*/overrides -type f 2>/dev/null | wc -l)
echo " Elasticsearch overrides: $es_override_count"
echo " Backed up overrides: $backup_override_count"
if [[ "$es_override_count" -gt 0 ]]; then
if [[ "$backup_override_count" -gt 0 ]]; then
echo " Override backup verified successfully"
else
echo " Error: Elasticsearch has $es_override_count overrides but backup has 0 files"
exit 1
fi
else
echo " No overrides to backup"
fi
if [[ -z "$reindex_result" ]]; then
echo "Error: Backup of detections failed - no response from Elasticsearch"
exit 1
elif echo "$reindex_result" | jq -e '.created >= 0' > /dev/null 2>&1; then
echo "Backup complete: $(echo "$reindex_result" | jq -r '.created') documents copied"
elif echo "$reindex_result" | grep -q "index_not_found_exception"; then
echo "so-detection index does not exist, skipping backup"
else
echo "SOC Detections backup script not found, skipping detection backup"
echo "Error: Backup of detections failed"
echo "$reindex_result"
exit 1
fi
}
@@ -1174,11 +1195,12 @@ hash_normalized_file() {
return 1
fi
sed -E \
# Ensure trailing newline for consistent hashing regardless of source file
{ sed -E \
-e 's/^[[:space:]]+//; s/[[:space:]]+$//' \
-e '/^$/d' \
-e 's|--url=http://[^:]+:7788|--url=http://MANAGER:7788|' \
"$file" | sha256sum | awk '{print $1}'
"$file"; echo; } | sed '/^$/d' | sha256sum | awk '{print $1}'
}
# Known-default hashes for so-rule-update (ETOPEN ruleset)
@@ -1274,6 +1296,13 @@ custom_found=0
check_config_file "$SO_RULE_UPDATE" "KNOWN_SO_RULE_UPDATE_HASHES" || custom_found=1
check_config_file "$RULECAT_CONF" "KNOWN_RULECAT_CONF_HASHES" || custom_found=1
# Check for ETPRO rules on airgap systems
if [[ $is_airgap -eq 0 ]] && grep -q 'ETPRO ' /nsm/rules/suricata/emerging-all.rules 2>/dev/null; then
echo "ETPRO rules detected on airgap system - custom configuration"
echo "ETPRO rules detected on Airgap in /nsm/rules/suricata/emerging-all.rules" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock
custom_found=1
fi
# If no custom configs found, remove syncBlock
if [[ $custom_found -eq 0 ]]; then
echo "idstools migration completed successfully - removing Suricata engine syncBlock"
@@ -1297,12 +1326,14 @@ if [ -n "$(docker ps -q -f name=^so-idstools$)" ]; then
fi
echo "Removing idstools symlink and scripts..."
rm /opt/so/saltstack/local/salt/suricata/rules
rm -rf /usr/sbin/so-idstools*
sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf
crontab -l | grep -v 'so-rule-update' | crontab -
# Backup the salt master config before editing it
cp /etc/salt/master /nsm/backup/detections-migration/2-4-200
# Backup the salt master config & manager pillar before editing it
cp /opt/so/saltstack/local/pillar/minions/$MINIONID.sls /nsm/backup/detections-migration/2-4-200/
cp /etc/salt/master /nsm/backup/detections-migration/2-4-200/
so-yaml.py remove /opt/so/saltstack/local/pillar/minions/$MINIONID.sls idstools
so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids
}
@@ -1353,7 +1384,7 @@ unmount_update() {
update_airgap_rules() {
# Copy the rules over to update them for airgap.
rsync -a $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/
rsync -a --delete $UPDATE_DIR/agrules/suricata/ /nsm/rules/suricata/etopen/
rsync -a $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
rsync -a $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
# Copy the securityonion-resorces repo over for SOC Detection Summaries and checkout the published summaries branch
@@ -1602,6 +1633,243 @@ verify_latest_update_script() {
fi
}
verify_es_version_compatibility() {
local es_required_version_statefile="/opt/so/state/so_es_required_upgrade_version.txt"
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
# supported upgrade paths for SO-ES versions
declare -A es_upgrade_map=(
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
["8.17.3"]="8.18.4 8.18.6 8.18.8"
["8.18.4"]="8.18.6 8.18.8 9.0.8"
["8.18.6"]="8.18.8 9.0.8"
["8.18.8"]="9.0.8"
)
# Elasticsearch MUST upgrade through these versions
declare -A es_to_so_version=(
["8.18.8"]="2.4.190-20251024"
)
# Get current Elasticsearch version
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
else
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
exit 160
fi
if ! target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p'); then
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
exit 160
fi
# allow upgrade to version < 2.4.110 without checking ES version compatibility
return 0
fi
# if this statefile exists then we have done an intermediate upgrade and we need to ensure that ALL ES nodes have been upgraded to the version in the statefile before allowing soup to continue
if [[ -f "$es_required_version_statefile" ]]; then
# required so verification script should have already been created
if [[ ! -f "$es_verification_script" ]]; then
create_intermediate_upgrade_verification_script $es_verification_script
fi
local es_required_version_statefile_value=$(cat $es_required_version_statefile)
echo -e "\n##############################################################################################################################\n"
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss!"
# create script using version in statefile
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$es_required_version_statefile"
if [[ $? -ne 0 ]]; then
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
exit 161
fi
echo -e "\n##############################################################################################################################\n"
fi
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
# supported upgrade
return 0
else
compatible_versions=${es_upgrade_map[$es_version]}
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
echo -e "\n##############################################################################################################################\n"
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
echo "${compatible_versions##* }" > "$es_required_version_statefile"
# We expect to upgrade to the latest compatiable minor version of ES
create_intermediate_upgrade_verification_script $es_verification_script
if [[ $is_airgap -eq 0 ]]; then
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***"
echo -e "\n##############################################################################################################################\n"
exit 160
else
# preserve BRANCH value if set originally
if [[ -n "$BRANCH" ]]; then
local originally_requested_so_version="$BRANCH"
else
local originally_requested_so_version="2.4/main"
fi
echo "Starting automated intermediate upgrade to $next_step_so_version."
echo "After completion, the system will automatically attempt to upgrade to the latest version."
echo -e "\n##############################################################################################################################\n"
exec bash -c "BRANCH=$next_step_so_version soup -y && BRANCH=$next_step_so_version soup -y && \
echo -e \"\n##############################################################################################################################\n\" && \
echo -e \"Verifying Elasticsearch was successfully upgraded to ${compatible_versions##* } across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n\" \
&& timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh ${compatible_versions##* } $es_required_version_statefile && \
echo -e \"\n##############################################################################################################################\n\" \
&& BRANCH=$originally_requested_so_version soup -y && BRANCH=$originally_requested_so_version soup -y"
fi
fi
}
create_intermediate_upgrade_verification_script() {
# After an intermediate upgrade, verify that ALL nodes running Elasticsearch are at the expected version BEFORE proceeding to the next upgrade step. This is a CRITICAL step
local verification_script="$1"
cat << 'EOF' > "$verification_script"
#!/bin/bash
SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE="/root/so_intermediate_upgrade_verification_failures.log"
CURRENT_TIME=$(date +%Y%m%d.%H%M%S)
EXPECTED_ES_VERSION="$1"
if [[ -z "$EXPECTED_ES_VERSION" ]]; then
echo -e "\nExpected Elasticsearch version not provided. Usage: $0 <expected_es_version>"
exit 1
fi
if [[ -f "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" ]]; then
mv "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE" "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE.$CURRENT_TIME"
fi
check_heavynodes_es_version() {
# Check if heavynodes are in this grid
if ! salt-key -l accepted | grep -q 'heavynode$'; then
# No heavynodes, skip version check
echo "No heavynodes detected in this Security Onion deployment. Skipping heavynode Elasticsearch version verification."
return 0
fi
echo -e "\nOne or more heavynodes detected. Verifying their Elasticsearch versions."
local retries=20
local retry_count=0
local delay=180
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
heavynode_versions=$(salt -C 'G@role:so-heavynode' cmd.run 'so-elasticsearch-query / --retry 3 --retry-delay 10 | jq ".version.number"' shell=/bin/bash --out=json 2> /dev/null)
local exit_status=$?
# Check that all heavynodes returned good data
if [[ $exit_status -ne 0 ]]; then
echo "Failed to retrieve Elasticsearch version from one or more heavynodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
else
if echo "$heavynode_versions" | jq -s --arg expected "\"$EXPECTED_ES_VERSION\"" --exit-status 'all(.[]; . | to_entries | all(.[]; .value == $expected))' > /dev/null; then
echo -e "\nAll heavynodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
return 0
else
echo "One or more heavynodes are not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
fi
fi
done
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "One or more heavynodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
echo "Current versions:"
echo "$heavynode_versions" | jq -s 'add'
echo "$heavynode_versions" | jq -s 'add' >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
echo -e "\n Stopping automatic upgrade to latest Security Onion version. Heavynodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
echo -e "\n Heavynodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a long time depending on network link between Manager and Heavynodes."
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
return 1
}
check_searchnodes_es_version() {
local retries=20
local retry_count=0
local delay=180
while [[ $retry_count -lt $retries ]]; do
# keep stderr with variable for logging
cluster_versions=$(so-elasticsearch-query _nodes/_all/version --retry 5 --retry-delay 10 --fail 2>&1)
local exit_status=$?
if [[ $exit_status -ne 0 ]]; then
echo "Failed to retrieve Elasticsearch versions from searchnodes... Retrying in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
else
if echo "$cluster_versions" | jq --arg expected "$EXPECTED_ES_VERSION" --exit-status '.nodes | to_entries | all(.[].value.version; . == $expected)' > /dev/null; then
echo "All Searchnodes are at the expected Elasticsearch version $EXPECTED_ES_VERSION."
return 0
else
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION. Rechecking in $delay seconds. Attempt $((retry_count + 1)) of $retries."
((retry_count++))
sleep $delay
continue
fi
fi
done
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "One or more Searchnodes is not at the expected Elasticsearch version $EXPECTED_ES_VERSION."
echo "Current versions:"
echo "$cluster_versions" | jq '.nodes | to_entries | map({(.value.name): .value.version}) | sort | add'
echo "$cluster_versions" >> "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
echo -e "\nStopping automatic upgrade to latest version. Searchnodes must ALL be at Elasticsearch version $EXPECTED_ES_VERSION before proceeding with the next upgrade step to avoid potential data loss!"
echo -e "\nSearchnodes will upgrade themselves to Elasticsearch $EXPECTED_ES_VERSION on their own, but this process can take a while depending on cluster size / network link between Manager and Searchnodes."
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "$cluster_versions" > "$SOUP_INTERMEDIATE_UPGRADE_FAILURES_LOG_FILE"
return 1
}
# Need to add a check for heavynodes and ensure all heavynodes get their own "cluster" upgraded before moving on to final upgrade.
check_searchnodes_es_version || exit 1
check_heavynodes_es_version || exit 1
# Remove required version state file after successful verification
rm -f "$2"
exit 0
EOF
}
# Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
@@ -1698,6 +1966,8 @@ main() {
echo "Verifying we have the latest soup script."
verify_latest_update_script
verify_es_version_compatibility
echo "Let's see if we need to update Security Onion."
upgrade_check
upgrade_space
@@ -1863,7 +2133,7 @@ main() {
if [[ $is_airgap -eq 0 ]]; then
echo ""
echo "Cleaning repos on remote Security Onion nodes."
salt -C 'not *_eval and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
salt -C 'not *_eval and not *_manager* and not *_standalone and G@os:OEL' cmd.run "dnf clean all"
echo ""
fi
fi

View File

@@ -0,0 +1,91 @@
Onion AI Session Report
==========================
## Session Details
**Session ID:** {{.Session.SessionId}}
**Title:** {{.Session.Title}}
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Session.CreateTime}}
**Updated:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Session.UpdateTime}}
{{ if .Session.DeleteTime }}
**Deleted:** {{ formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Session.DeleteTime}}
{{ end }}
**User ID:** {{getUserDetail "email" .Session.UserId}}
## Session Usage
**Total Input Tokens** {{.Session.Usage.TotalInputTokens}}
**Total Output Tokens** {{.Session.Usage.TotalOutputTokens}}
**Total Credits:** {{.Session.Usage.TotalCredits}}
**Total Messages:** {{.Session.Usage.TotalMessages}}
## Messages
{{ range $index, $msg := sortAssistantMessages "CreateTime" "asc" .History }}
#### Message {{ add $index 1 }}
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $msg.CreateTime}}
**User ID:** {{getUserDetail "email" $msg.UserId}}
**Role:** {{$msg.Message.Role}}
{{ range $i, $block := $msg.Message.ContentBlocks }}
---
{{ if eq $block.Type "text" }}
**Text:** {{ stripEmoji $block.Text }}
{{ else if eq $block.Type "tool_use" }}
**Tool:** {{ $block.Name }}
{{ if $block.Input }}
**Parameters:**
{{ range $key, $value := parseJSON $block.Input }}
{{ if eq $key "limit" }}- {{ $key }}: {{ $value }}
{{ else }}- {{ $key }}: "{{ $value }}"
{{ end }}{{ end }}{{ end }}
{{ else if $block.ToolResult }}
**Tool Result:**
{{ if $block.ToolResult.Content }}
{{ range $j, $contentBlock := $block.ToolResult.Content }}
{{ if gt $j 0 }}
---
{{ end }}
{{ if $contentBlock.Text }}
{{ if $block.ToolResult.IsError }}
**Error:** {{ $contentBlock.Text }}
{{ else }}
{{ $contentBlock.Text }}
{{ end }}
{{ else if $contentBlock.Json }}
```json
{{ toJSON $contentBlock.Json }}
```
{{ end }}{{ end }}
{{ end }}{{ end }}{{ end }}
{{ if eq $msg.Message.Role "assistant" }}{{ if $msg.Message.Usage }}
---
**Message Usage:**
- Input Tokens: {{$msg.Message.Usage.InputTokens}}
- Output Tokens: {{$msg.Message.Usage.OutputTokens}}
- Credits: {{$msg.Message.Usage.Credits}}
{{end}}{{end}}
---
{{end}}

View File

@@ -130,4 +130,42 @@ Security Onion Case Report
| ---- | ---- | ------ | --------- |
{{ range sortHistory "CreateTime" "asc" .History -}}
| {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .CreateTime}} | {{getUserDetail "email" .UserId}} | {{.Kind}} | {{.Operation}} |
{{end}}
## Attached Onion AI Sessions
{{ range $idx, $session := sortAssistantSessionDetails "CreateTime" "desc" .AssistantSessions }}
#### Session {{ add $idx 1 }}
**Session ID:** {{$session.Session.SessionId}}
**Title:** {{$session.Session.Title}}
**User ID:** {{getUserDetail "email" $session.Session.UserId}}
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $session.Session.CreateTime}}
**Updated:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $session.Session.UpdateTime}}
{{ if $session.Session.DeleteTime }}
**Deleted:** {{ formatDateTime "Mon Jan 02 15:04:05 -0700 2006" $session.Session.DeleteTime}}
{{ end }}
#### Messages
{{ range $index, $msg := sortAssistantMessages "CreateTime" "asc" $session.History }}
{{ range $i, $block := $msg.Message.ContentBlocks }}
{{ if eq $block.Type "text" }}
**Role:** {{$msg.Message.Role}}
{{ stripEmoji $block.Text }}
---
{{ end }}{{ end }}
{{end}}
{{end}}

View File

@@ -357,7 +357,7 @@ sensoroni:
reports:
standard:
case_report__md:
title: Case report Template
title: Case Report Template
description: The template used when generating a case report. Supports markdown format.
file: True
global: True
@@ -370,6 +370,13 @@ sensoroni:
global: True
syntax: md
helpLink: reports.html
assistant_session_report__md:
title: Assistant Session Report Template
description: The template used when generating an assistant session report. Supports markdown format.
file: True
global: True
syntax: md
helplink: reports.html
custom:
generic_report1__md:
title: Custom Report 1

View File

@@ -115,16 +115,16 @@ soc:
':kratos:':
- soc_timestamp
- event.dataset
- http_request.headers.x-real-ip
- http.request.headers.x-real-ip
- user.name
- http_request.headers.user-agent
- http.useragent
- msg
':hydra:':
- soc_timestamp
- event.dataset
- http_request.headers.x-real-ip
- http.request.headers.x-real-ip
- user.name
- http_request.headers.user-agent
- http.useragent
- msg
'::conn':
- soc_timestamp
@@ -1622,12 +1622,11 @@ soc:
sourceType: directory
airgap:
- name: Emerging-Threats
description: "Emerging Threats ruleset - To enable ET Pro, enter your license key below. Leave empty for ET Open (free) rules."
description: "Emerging Threats ruleset - To enable ET Pro on Airgap, review the documentation at https://docs.securityonion.net/suricata"
licenseKey: ""
enabled: true
sourceType: url
sourcePath: 'https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz'
urlHash: "https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz.md5"
sourceType: directory
sourcePath: /nsm/rules/suricata/etopen/
license: "BSD"
excludeFiles:
- "*deleted*"
@@ -1748,7 +1747,7 @@ soc:
showSubtitle: true
- name: SOC - Auth
description: Users authenticated to SOC grouped by IP address and identity
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip user.name'
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http.request.headers.x-real-ip user.name'
showSubtitle: true
- name: SOC - App
description: Logs generated by the Security Onion Console (SOC) server and modules
@@ -2028,10 +2027,10 @@ soc:
query: '* | groupby event.category | groupby -sankey event.category event.module | groupby event.module | groupby -sankey event.module event.dataset | groupby event.dataset | groupby observer.name | groupby host.name | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: SOC Logins
description: SOC (Security Onion Console) logins
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip user.name | groupby user.name | groupby http_request.headers.user-agent'
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http.request.headers.x-real-ip | groupby -sankey http.request.headers.x-real-ip user.name | groupby user.name | groupby http.useragent'
- name: SOC Login Failures
description: SOC (Security Onion Console) login failures
query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby user.name | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip http_request.headers.user-agent | groupby http_request.headers.user-agent'
query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby user.name | groupby http.request.headers.x-real-ip | groupby -sankey http.request.headers.x-real-ip http.useragent | groupby http.useragent'
- name: Alerts
description: Overview of all alerts
query: 'tags:alert | groupby event.module* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby event.severity | groupby destination.as.organization.name'
@@ -2653,26 +2652,16 @@ soc:
thresholdColorRatioMed: 0.75
thresholdColorRatioMax: 1
availableModels:
- id: sonnet-4
displayName: Claude Sonnet 4
contextLimitSmall: 200000
contextLimitLarge: 1000000
lowBalanceColorAlert: 500000
enabled: true
- id: sonnet-4.5
displayName: Claude Sonnet 4.5
displayName: Claude Sonnet 4.5 ($$$)
origin: USA
contextLimitSmall: 200000
contextLimitLarge: 1000000
lowBalanceColorAlert: 500000
enabled: true
- id: gptoss-120b
displayName: GPT-OSS 120B
contextLimitSmall: 128000
contextLimitLarge: 128000
lowBalanceColorAlert: 500000
enabled: true
- id: qwen-235b
displayName: QWEN 235B
displayName: QWEN 235B ($)
origin: China
contextLimitSmall: 256000
contextLimitLarge: 256000
lowBalanceColorAlert: 500000

View File

@@ -6,6 +6,7 @@
# This script queries Elasticsearch for Custom Detections and all Overrides,
# and git commits them to disk at $OUTPUT_DIR
import argparse
import os
import subprocess
import json
@@ -18,10 +19,10 @@ from datetime import datetime
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Constants
ES_URL = "https://localhost:9200/so-detection/_search"
DEFAULT_INDEX = "so-detection"
DEFAULT_OUTPUT_DIR = "/nsm/backup/detections/repo"
QUERY_DETECTIONS = '{"query": {"bool": {"must": [{"match_all": {}}, {"term": {"so_detection.ruleset": "__custom__"}}]}},"size": 10000}'
QUERY_OVERRIDES = '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}},"size": 10000}'
OUTPUT_DIR = "/nsm/backup/detections/repo"
AUTH_FILE = "/opt/so/conf/elasticsearch/curl.config"
def get_auth_credentials(auth_file):
@@ -30,9 +31,10 @@ def get_auth_credentials(auth_file):
if line.startswith('user ='):
return line.split('=', 1)[1].strip().replace('"', '')
def query_elasticsearch(query, auth):
def query_elasticsearch(query, auth, index):
url = f"https://localhost:9200/{index}/_search"
headers = {"Content-Type": "application/json"}
response = requests.get(ES_URL, headers=headers, data=query, auth=auth, verify=False)
response = requests.get(url, headers=headers, data=query, auth=auth, verify=False)
response.raise_for_status()
return response.json()
@@ -47,12 +49,12 @@ def save_content(hit, base_folder, subfolder="", extension="txt"):
f.write(content)
return file_path
def save_overrides(hit):
def save_overrides(hit, output_dir):
so_detection = hit["_source"]["so_detection"]
public_id = so_detection["publicId"]
overrides = so_detection["overrides"]
language = so_detection["language"]
folder = os.path.join(OUTPUT_DIR, language, "overrides")
folder = os.path.join(output_dir, language, "overrides")
os.makedirs(folder, exist_ok=True)
extension = "yaml" if language == "sigma" else "txt"
file_path = os.path.join(folder, f"{public_id}.{extension}")
@@ -60,20 +62,20 @@ def save_overrides(hit):
f.write('\n'.join(json.dumps(override) for override in overrides) if isinstance(overrides, list) else overrides)
return file_path
def ensure_git_repo():
if not os.path.isdir(os.path.join(OUTPUT_DIR, '.git')):
def ensure_git_repo(output_dir):
if not os.path.isdir(os.path.join(output_dir, '.git')):
subprocess.run(["git", "config", "--global", "init.defaultBranch", "main"], check=True)
subprocess.run(["git", "-C", OUTPUT_DIR, "init"], check=True)
subprocess.run(["git", "-C", OUTPUT_DIR, "remote", "add", "origin", "default"], check=True)
subprocess.run(["git", "-C", output_dir, "init"], check=True)
subprocess.run(["git", "-C", output_dir, "remote", "add", "origin", "default"], check=True)
def commit_changes():
ensure_git_repo()
subprocess.run(["git", "-C", OUTPUT_DIR, "config", "user.email", "securityonion@local.invalid"], check=True)
subprocess.run(["git", "-C", OUTPUT_DIR, "config", "user.name", "securityonion"], check=True)
subprocess.run(["git", "-C", OUTPUT_DIR, "add", "."], check=True)
status_result = subprocess.run(["git", "-C", OUTPUT_DIR, "status"], capture_output=True, text=True)
def commit_changes(output_dir):
ensure_git_repo(output_dir)
subprocess.run(["git", "-C", output_dir, "config", "user.email", "securityonion@local.invalid"], check=True)
subprocess.run(["git", "-C", output_dir, "config", "user.name", "securityonion"], check=True)
subprocess.run(["git", "-C", output_dir, "add", "."], check=True)
status_result = subprocess.run(["git", "-C", output_dir, "status"], capture_output=True, text=True)
print(status_result.stdout)
commit_result = subprocess.run(["git", "-C", OUTPUT_DIR, "commit", "-m", "Update detections and overrides"], check=False, capture_output=True)
commit_result = subprocess.run(["git", "-C", output_dir, "commit", "-m", "Update detections and overrides"], check=False, capture_output=True)
if commit_result.returncode == 1:
print("No changes to commit.")
elif commit_result.returncode == 0:
@@ -81,29 +83,41 @@ def commit_changes():
else:
commit_result.check_returncode()
def parse_args():
parser = argparse.ArgumentParser(description="Backup custom detections and overrides from Elasticsearch")
parser.add_argument("--output", "-o", default=DEFAULT_OUTPUT_DIR,
help=f"Output directory for backups (default: {DEFAULT_OUTPUT_DIR})")
parser.add_argument("--index", "-i", default=DEFAULT_INDEX,
help=f"Elasticsearch index to query (default: {DEFAULT_INDEX})")
return parser.parse_args()
def main():
args = parse_args()
output_dir = args.output
index = args.index
try:
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"Backing up Custom Detections and all Overrides to {OUTPUT_DIR} - {timestamp}\n")
os.makedirs(OUTPUT_DIR, exist_ok=True)
print(f"Backing up Custom Detections and all Overrides to {output_dir} - {timestamp}\n")
os.makedirs(output_dir, exist_ok=True)
auth_credentials = get_auth_credentials(AUTH_FILE)
username, password = auth_credentials.split(':', 1)
auth = HTTPBasicAuth(username, password)
# Query and save custom detections
detections = query_elasticsearch(QUERY_DETECTIONS, auth)["hits"]["hits"]
detections = query_elasticsearch(QUERY_DETECTIONS, auth, index)["hits"]["hits"]
for hit in detections:
save_content(hit, OUTPUT_DIR, hit["_source"]["so_detection"]["language"], "yaml" if hit["_source"]["so_detection"]["language"] == "sigma" else "txt")
save_content(hit, output_dir, hit["_source"]["so_detection"]["language"], "yaml" if hit["_source"]["so_detection"]["language"] == "sigma" else "txt")
# Query and save overrides
overrides = query_elasticsearch(QUERY_OVERRIDES, auth)["hits"]["hits"]
overrides = query_elasticsearch(QUERY_OVERRIDES, auth, index)["hits"]["hits"]
for hit in overrides:
save_overrides(hit)
commit_changes()
save_overrides(hit, output_dir)
commit_changes(output_dir)
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"Backup Completed - {timestamp}")
except Exception as e:

View File

@@ -57,12 +57,12 @@ class TestBackupScript(unittest.TestCase):
mock_response.json.return_value = {'hits': {'hits': []}}
mock_response.raise_for_status = MagicMock()
mock_get.return_value = mock_response
response = ds.query_elasticsearch(ds.QUERY_DETECTIONS, self.auth)
response = ds.query_elasticsearch(ds.QUERY_DETECTIONS, self.auth, ds.DEFAULT_INDEX)
self.assertEqual(response, {'hits': {'hits': []}})
mock_get.assert_called_once_with(
ds.ES_URL,
f"https://localhost:9200/{ds.DEFAULT_INDEX}/_search",
headers={"Content-Type": "application/json"},
data=ds.QUERY_DETECTIONS,
auth=self.auth,
@@ -81,7 +81,7 @@ class TestBackupScript(unittest.TestCase):
@patch('os.makedirs')
@patch('builtins.open', new_callable=mock_open)
def test_save_overrides(self, mock_file, mock_makedirs):
file_path = ds.save_overrides(self.mock_override_hit)
file_path = ds.save_overrides(self.mock_override_hit, self.output_dir)
expected_path = f'{self.output_dir}/sigma/overrides/test_id.yaml'
self.assertEqual(file_path, expected_path)
mock_makedirs.assert_called_once_with(f'{self.output_dir}/sigma/overrides', exist_ok=True)
@@ -90,9 +90,9 @@ class TestBackupScript(unittest.TestCase):
@patch('subprocess.run')
def test_ensure_git_repo(self, mock_run):
mock_run.return_value = MagicMock(returncode=0)
ds.ensure_git_repo()
ds.ensure_git_repo(self.output_dir)
mock_run.assert_has_calls([
call(["git", "config", "--global", "init.defaultBranch", "main"], check=True),
call(["git", "-C", self.output_dir, "init"], check=True),
@@ -106,9 +106,9 @@ class TestBackupScript(unittest.TestCase):
mock_commit_result = MagicMock(returncode=1)
# Ensure sufficient number of MagicMock instances for each subprocess.run call
mock_run.side_effect = [mock_status_result, mock_commit_result, MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0)]
print("Running test_commit_changes...")
ds.commit_changes()
ds.commit_changes(self.output_dir)
print("Finished test_commit_changes.")
mock_run.assert_has_calls([
@@ -120,39 +120,45 @@ class TestBackupScript(unittest.TestCase):
])
@patch('builtins.print')
@patch('so-detections-backup.commit_changes')
@patch('so-detections-backup.save_overrides')
@patch('so-detections-backup.save_content')
@patch('so-detections-backup.query_elasticsearch')
@patch('so-detections-backup.get_auth_credentials')
@patch.object(ds, 'commit_changes')
@patch.object(ds, 'save_overrides')
@patch.object(ds, 'save_content')
@patch.object(ds, 'query_elasticsearch')
@patch.object(ds, 'get_auth_credentials')
@patch('os.makedirs')
def test_main(self, mock_makedirs, mock_get_auth, mock_query, mock_save_content, mock_save_overrides, mock_commit, mock_print):
@patch.object(ds, 'parse_args')
def test_main(self, mock_parse_args, mock_makedirs, mock_get_auth, mock_query, mock_save_content, mock_save_overrides, mock_commit, mock_print):
mock_args = MagicMock()
mock_args.output = self.output_dir
mock_args.index = ds.DEFAULT_INDEX
mock_parse_args.return_value = mock_args
mock_get_auth.return_value = self.auth_credentials
mock_query.side_effect = [
{'hits': {'hits': [{"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}}]}},
{'hits': {'hits': [{"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}}]}}
]
with patch('datetime.datetime') as mock_datetime:
mock_datetime.now.return_value.strftime.return_value = "2024-05-23 20:49:44"
ds.main()
mock_makedirs.assert_called_once_with(self.output_dir, exist_ok=True)
mock_get_auth.assert_called_once_with(ds.AUTH_FILE)
mock_query.assert_has_calls([
call(ds.QUERY_DETECTIONS, self.auth),
call(ds.QUERY_OVERRIDES, self.auth)
call(ds.QUERY_DETECTIONS, self.auth, ds.DEFAULT_INDEX),
call(ds.QUERY_OVERRIDES, self.auth, ds.DEFAULT_INDEX)
])
mock_save_content.assert_called_once_with(
{"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}},
self.output_dir,
"sigma",
{"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}},
self.output_dir,
"sigma",
"yaml"
)
mock_save_overrides.assert_called_once_with(
{"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}}
{"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}},
self.output_dir
)
mock_commit.assert_called_once()
mock_commit.assert_called_once_with(self.output_dir)
mock_print.assert_called()
if __name__ == '__main__':

View File

@@ -70,7 +70,7 @@
{# Define the Detections custom ruleset that should always be present #}
{% set CUSTOM_RULESET = {
'name': 'custom',
'name': '__custom__',
'description': 'User-created custom rules created via the Detections module in the SOC UI',
'sourceType': 'elasticsearch',
'sourcePath': 'so_detection.ruleset:__custom__',
@@ -83,7 +83,7 @@
{# Always append the custom ruleset to suricataengine.rulesetSources if not already present #}
{% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %}
{% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %}
{% set custom_names = SOCMERGED.config.server.modules.suricataengine.rulesetSources | selectattr('name', 'equalto', 'custom') | list %}
{% set custom_names = SOCMERGED.config.server.modules.suricataengine.rulesetSources | selectattr('name', 'equalto', '__custom__') | list %}
{% if custom_names | length == 0 %}
{% do SOCMERGED.config.server.modules.suricataengine.rulesetSources.append(CUSTOM_RULESET) %}
{% endif %}
@@ -108,21 +108,39 @@
{% if ruleset.name == 'Emerging-Threats' %}
{% if ruleset.licenseKey and ruleset.licenseKey != '' %}
{# License key is defined - transform to ETPRO #}
{# Engine Version is hardcoded in the URL - this does not change often: https://community.emergingthreats.net/t/supported-engines/71 #}
{% do ruleset.update({
'name': 'ETPRO',
'sourcePath': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz',
'urlHash': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz.md5',
'license': 'Commercial'
}) %}
{% if ruleset.sourceType == 'directory' %}
{# Airgap mode - update directory path #}
{% do ruleset.update({
'name': 'ETPRO',
'sourcePath': '/nsm/rules/custom-local-repos/local-etpro-suricata/etpro.rules.tar.gz',
'license': 'Commercial'
}) %}
{% else %}
{# Engine Version is hardcoded in the URL - this does not change often: https://community.emergingthreats.net/t/supported-engines/71 #}
{% do ruleset.update({
'name': 'ETPRO',
'sourcePath': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz',
'urlHash': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz.md5',
'license': 'Commercial'
}) %}
{% endif %}
{% else %}
{# No license key - explicitly set to ETOPEN #}
{% do ruleset.update({
'name': 'ETOPEN',
'sourcePath': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz',
'urlHash': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz.md5',
'license': 'BSD'
}) %}
{% if ruleset.sourceType == 'directory' %}
{# Airgap mode - update directory path #}
{% do ruleset.update({
'name': 'ETOPEN',
'sourcePath': '/nsm/rules/suricata/etopen/',
'license': 'BSD'
}) %}
{% else %}
{% do ruleset.update({
'name': 'ETOPEN',
'sourcePath': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz',
'urlHash': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz.md5',
'license': 'BSD'
}) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}

View File

@@ -608,6 +608,18 @@ soc:
label: Delete Unreferenced (Deletes rules that are no longer referenced by ruleset source)
forcedType: bool
required: False
- field: proxyURL
label: HTTP/HTTPS proxy URL for downloading the ruleset.
required: False
- field: proxyUsername
label: Proxy authentication username.
required: False
- field: proxyPassword
label: Proxy authentication password.
required: False
- field: proxyCACert
label: Path to CA certificate file for MITM proxy verification.
required: False
airgap: *serulesetSources
navigator:
intervalMinutes:
@@ -640,7 +652,6 @@ soc:
assistant:
apiUrl:
description: The URL of the AI gateway.
advanced: True
global: True
healthTimeoutSeconds:
description: Timeout in seconds for the Onion AI health check.
@@ -696,6 +707,9 @@ soc:
- field: displayName
label: Display Name
required: True
- field: origin
label: Country of Origin for the Model Training
required: false
- field: contextLimitSmall
label: Context Limit (Small)
forcedType: int

View File

@@ -17,14 +17,23 @@ query() {
STATS=$(query "ruleset-stats")
RELOAD=$(query "ruleset-reload-time")
[ -z "$RELOAD" ] && RELOAD='{}'
if echo "$STATS" | jq -e '.return == "OK"' > /dev/null 2>&1; then
LOADED=$(echo "$STATS" | jq -r '.message[0].rules_loaded')
FAILED=$(echo "$STATS" | jq -r '.message[0].rules_failed')
LAST_RELOAD=$(echo "$RELOAD" | jq -r '.message[0].last_reload')
# Outputs valid JSON on success, empty on failure
OUTPUT=$(jq -n \
--argjson stats "$STATS" \
--argjson reload "$RELOAD" \
'if $stats.return == "OK" and ($stats.message[0].rules_loaded | type) == "number" and ($stats.message[0].rules_failed | type) == "number" then
{
rules_loaded: $stats.message[0].rules_loaded,
rules_failed: $stats.message[0].rules_failed,
last_reload: ($reload.message[0].last_reload // ""),
return: "OK"
}
else empty end' 2>/dev/null)
jq -n --argjson loaded "$LOADED" --argjson failed "$FAILED" --arg reload "$LAST_RELOAD" \
'{rules_loaded: $loaded, rules_failed: $failed, last_reload: $reload, return: "OK"}' > "$OUTFILE"
if [ -n "$OUTPUT" ]; then
echo "$OUTPUT" > "$OUTFILE"
else
echo '{"return":"FAIL"}' > "$OUTFILE"
fi

View File

@@ -18,11 +18,15 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
if [ -f "$STATSFILE" ] && [ $(($(date +%s) - $(stat -c %Y "$STATSFILE"))) -lt 90 ] && jq -e '.return == "OK" and .rules_loaded != null and .rules_failed != null' "$STATSFILE" > /dev/null 2>&1; then
LOADED=$(jq -r '.rules_loaded' "$STATSFILE")
FAILED=$(jq -r '.rules_failed' "$STATSFILE")
RELOAD_TIME=$(jq -r '.last_reload // ""' "$STATSFILE")
RELOAD_TIME=$(jq -r 'if .last_reload then .last_reload else "" end' "$STATSFILE")
echo "surirules loaded=${LOADED}i,failed=${FAILED}i,reload_time=\"${RELOAD_TIME}\",status=\"ok\""
if [ -n "$RELOAD_TIME" ]; then
echo "surirules loaded=${LOADED}i,failed=${FAILED}i,reload_time=\"${RELOAD_TIME}\",status=\"ok\""
else
echo "surirules loaded=${LOADED}i,failed=${FAILED}i,status=\"ok\""
fi
else
echo "surirules loaded=0i,failed=0i,reload_time=\"\",status=\"unknown\""
echo "surirules loaded=0i,failed=0i,status=\"unknown\""
fi
fi

Binary file not shown.