Merge pull request #13576 from Security-Onion-Solutions/2.4/dev

2.4.100
This commit is contained in:
Mike Reeves
2024-08-29 16:18:20 -04:00
committed by GitHub
68 changed files with 2648 additions and 1340 deletions

View File

@@ -1,17 +1,17 @@
### 2.4.90-20240729 ISO image released on 2024/07/29
### 2.4.100-20240829 ISO image released on 2024/08/29
### Download and Verify
2.4.90-20240729 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.90-20240729.iso
2.4.100-20240829 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.100-20240829.iso
MD5: 9A7714F5922EE555F08675D25E6237D5
SHA1: D3B331452627DB716906BA9F3922574DFA3852DC
SHA256: 5B0CE32543944DBC50C4E906857384211E1BE83EF409619778F18FC62017E0E0
MD5: 377586C143FABD662DB414DEA49D46B7
SHA1: 69D4B94522789AF47075A9FF1354B069679AC366
SHA256: 52FBA5C8762B8DCF2945AD2837B3A19E63ADCC209AB510D7FD0F86AE713AA153
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.90-20240729.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.100-20240829.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.90-20240729.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.100-20240829.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.90-20240729.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.100-20240829.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.90-20240729.iso.sig securityonion-2.4.90-20240729.iso
gpg --verify securityonion-2.4.100-20240829.iso.sig securityonion-2.4.100-20240829.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Thu 25 Jul 2024 06:51:11 PM EDT using RSA key ID FE507013
gpg: Signature made Thu 29 Aug 2024 12:02:55 PM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -5,9 +5,11 @@
| Version | Supported |
| ------- | ------------------ |
| 2.4.x | :white_check_mark: |
| 2.3.x | :white_check_mark: |
| 2.3.x | :x: |
| 16.04.x | :x: |
Security Onion 2.3 has reached End Of Life and is no longer supported.
Security Onion 16.04 has reached End Of Life and is no longer supported.
## Reporting a Vulnerability

View File

@@ -1 +1 @@
2.4.90
2.4.100

View File

@@ -14,6 +14,11 @@ net.core.wmem_default:
sysctl.present:
- value: 26214400
# Users are not a fan of console messages
kernel.printk:
sysctl.present:
- value: "3 4 1 3"
# Remove variables.txt from /tmp - This is temp
rmvariablesfile:
file.absent:

View File

@@ -8,7 +8,7 @@
# Elastic agent is not managed by salt. Because of this we must store this base information in a
# script that accompanies the soup system. Since so-common is one of those special soup files,
# and since this same logic is required during installation, it's included in this file.
ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
ELASTIC_AGENT_TARBALL_VERSION="8.14.3"
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"

View File

@@ -95,6 +95,8 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|shutdown process" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|contain valid certificates" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failedaction" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in start_workers" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in buffer_initialize" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready
@@ -147,6 +149,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncing rule" # false positive (rule sync log line includes rule name which can contain 'error')
fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -170,6 +173,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cannot join on an empty table" # InfluxDB flux query, import nodes
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exhausting result iterator" # InfluxDB flux query mismatched table results (temporary data issue)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to finish run" # InfluxDB rare error, self-recoverable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to gather disk name" # InfluxDB known error, can't read disks because the container doesn't have them mounted
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed"
@@ -205,6 +209,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
fi
RESULT=0

View File

@@ -9,6 +9,9 @@
. /usr/sbin/so-common
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
{%- if salt['grains.get']('sosmodel', '') %}
{%- set model = salt['grains.get']('sosmodel') %}
model={{ model }}
@@ -16,25 +19,35 @@ model={{ model }}
if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then
exit 0
fi
for i in "${software_raid[@]}"; do
if [[ "$model" == $i ]]; then
is_softwareraid=true
is_hwraid=false
break
fi
done
for i in "${hardware_raid[@]}"; do
if [[ "$model" == $i ]]; then
is_softwareraid=false
is_hwraid=true
break
fi
done
{%- else %}
echo "This is not an appliance"
exit 0
{%- endif %}
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200|SOSSNNV|SOSMN)$ ]]; then
is_bossraid=true
fi
if [[ $model =~ ^(SOSSNNV|SOSMN)$ ]]; then
is_swraid=true
fi
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200)$ ]]; then
is_hwraid=true
fi
check_nsm_raid() {
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
if [[ $APPLIANCE == '1' ]]; then
if [[ "$model" == "SOS500" || "$model" == "SOS500-DE02" ]]; then
#This doesn't have raid
HWRAID=0
else
if [[ -n $PERCCLI ]]; then
HWRAID=0
elif [[ -n $MEGACTL ]]; then
@@ -42,7 +55,6 @@ check_nsm_raid() {
else
HWRAID=1
fi
fi
}
@@ -50,7 +62,16 @@ check_nsm_raid() {
check_boss_raid() {
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
MVTEST=$(/usr/local/bin/mvcli info -o vd | grep "No adapter")
BOSSNVMECLI=$(/usr/local/bin/mnv_cli info -o vd -i 0 | grep Functional)
# Is this NVMe Boss Raid?
if [[ "$model" =~ "-DE02" ]]; then
if [[ -n $BOSSNVMECLI ]]; then
BOSSRAID=0
else
BOSSRAID=1
fi
else
# Check to see if this is a SM based system
if [[ -z $MVTEST ]]; then
if [[ -n $MVCLI ]]; then
@@ -62,6 +83,7 @@ check_boss_raid() {
# This doesn't have boss raid so lets make it 0
BOSSRAID=0
fi
fi
}
check_software_raid() {
@@ -79,14 +101,13 @@ SWRAID=0
BOSSRAID=0
HWRAID=0
if [[ $is_hwraid ]]; then
if [[ "$is_hwraid" == "true" ]]; then
check_nsm_raid
fi
if [[ $is_bossraid ]]; then
check_boss_raid
fi
if [[ $is_swraid ]]; then
if [[ "$is_softwareraid" == "true" ]]; then
check_software_raid
check_boss_raid
fi
sum=$(($SWRAID + $BOSSRAID + $HWRAID))

View File

@@ -3,8 +3,8 @@ elastalert:
description: You can enable or disable Elastalert.
helpLink: elastalert.html
alerter_parameters:
title: Alerter Parameters
description: Optional configuration parameters for additional alerters that can be enabled for all Sigma rules. Filter for 'Alerter' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
title: Custom Configuration Parameters
description: Optional configuration parameters made available as defaults for all rules and alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available configuration parameters. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml

View File

@@ -97,6 +97,7 @@ elasticfleet:
- symantec_endpoint
- system
- tcp
- tenable_io
- tenable_sc
- ti_abusech
- ti_anomali

View File

@@ -0,0 +1,19 @@
{
"package": {
"name": "fleet_server",
"version": ""
},
"name": "fleet_server-1",
"namespace": "default",
"policy_id": "FleetServer_hostname",
"vars": {},
"inputs": {
"fleet_server-fleet-server": {
"enabled": true,
"vars": {
"custom": "server.ssl.supported_protocols: [\"TLSv1.2\", \"TLSv1.3\"]\nserver.ssl.cipher_suites: [ \"ECDHE-RSA-AES-128-GCM-SHA256\", \"ECDHE-RSA-AES-256-GCM-SHA384\", \"ECDHE-RSA-AES-128-CBC-SHA\", \"ECDHE-RSA-AES-256-CBC-SHA\", \"RSA-AES-128-GCM-SHA256\", \"RSA-AES-256-GCM-SHA384\"]"
},
"streams": {}
}
}
}

View File

@@ -5,7 +5,7 @@
"package": {
"name": "endpoint",
"title": "Elastic Defend",
"version": "8.10.2"
"version": "8.14.0"
},
"enabled": true,
"policy_id": "endpoints-initial",

View File

@@ -11,7 +11,7 @@
"winlogs-winlog": {
"enabled": true,
"streams": {
"winlog.winlog": {
"winlog.winlogs": {
"enabled": true,
"vars": {
"channel": "Microsoft-Windows-Windows Defender/Operational",

View File

@@ -20,7 +20,7 @@
],
"data_stream.dataset": "import",
"custom": "",
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.43.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.38.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.43.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.43.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.38.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.59.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.45.1\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.59.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.59.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.45.1\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [
"import"
]

View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-elastic-fleet-common
# Get all the fleet policies
json_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true')
# Extract the IDs that start with "FleetServer_"
POLICY=$(echo "$json_output" | jq -r '.items[] | select(.id | startswith("FleetServer_")) | .id')
# Iterate over each ID in the POLICY variable
for POLICYNAME in $POLICY; do
printf "\nUpdating Policy: $POLICYNAME\n"
# First get the Integration ID
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$POLICYNAME" | jq -r '.item.package_policies[] | select(.package.name == "fleet_server") | .id')
# Modify the default integration policy to update the policy_id and an with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "$POLICYNAME" --arg name "fleet_server-$POLICYNAME" '
.policy_id = $policy_id |
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Now update the integration policy using the modified JSON
elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
done

View File

@@ -12,7 +12,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# First, check for any package upgrades
/usr/sbin/so-elastic-fleet-package-upgrade
# Second, configure Elastic Defend Integration seperately
# Second, update Fleet Server policies
/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
# Third, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints

View File

@@ -53,7 +53,8 @@ fi
printf "\n### Create ES Token ###\n"
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
### Create Outputs & Fleet URLs ###
### Create Outputs, Fleet Policy and Fleet URLs ###
# Create the Manager Elasticsearch Output first and set it as the default output
printf "\nAdd Manager Elasticsearch Output...\n"
ESCACRT=$(openssl x509 -in $INTCA)
JSON_STRING=$( jq -n \
@@ -62,7 +63,21 @@ JSON_STRING=$( jq -n \
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
printf "\n\n"
printf "\nCreate Logstash Output Config if node is not an Import or Eval install\n"
# Create the Manager Fleet Server Host Agent Policy
# This has to be done while the Elasticsearch Output is set to the default Output
printf "Create Manager Fleet Server Policy...\n"
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
# Modify the default integration policy to update the policy_id with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
.policy_id = $policy_id |
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Add the Fleet Server Integration to the new Fleet Policy
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
# Now we can create the Logstash Output and set it to to be the default Output
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
{% if grains.role not in ['so-import', 'so-eval'] %}
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
@@ -101,16 +116,6 @@ printf "\n\n"
# Load Elasticsearch templates
/usr/sbin/so-elasticsearch-templates-load
# Manager Fleet Server Host
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "true" "120"
#Temp Fixup for ES Output bug
JSON_STRING=$( jq -n \
--arg NAME "FleetServer_{{ GLOBALS.hostname }}" \
'{"name": $NAME,"description": $NAME,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":120,"data_output_id":"so-manager_elasticsearch"}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_{{ GLOBALS.hostname }}" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
# Initial Endpoints Policy
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"

View File

@@ -118,6 +118,11 @@ esingestconf:
- user: 930
- group: 939
# Remove .fleet_final_pipeline-1 because we are using global@custom now
so-fleet-final-pipeline-remove:
file.absent:
- name: /opt/so/conf/elasticsearch/ingest/.fleet_final_pipeline-1
# Auto-generate Elasticsearch ingest node pipelines from pillar
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
es_ingest_conf_{{pipeline}}:

File diff suppressed because it is too large Load Diff

View File

@@ -62,6 +62,7 @@
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
{%- endraw %}
{%- if HIGHLANDER %}
@@ -73,6 +74,8 @@
}
{%- endif %}
{%- raw %}
,
{ "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } }
]
}
{% endraw %}

View File

@@ -1,107 +0,0 @@
{
"version": 3,
"_meta": {
"managed_by": "fleet",
"managed": true
},
"description": "Final pipeline for processing all incoming Fleet Agent documents. \n",
"processors": [
{
"date": {
"description": "Add time when event was ingested (and remove sub-seconds to improve storage efficiency)",
"tag": "truncate-subseconds-event-ingested",
"field": "_ingest.timestamp",
"target_field": "event.ingested",
"formats": [
"ISO8601"
],
"output_format": "date_time_no_millis",
"ignore_failure": true
}
},
{
"remove": {
"description": "Remove any pre-existing untrusted values.",
"field": [
"event.agent_id_status",
"_security"
],
"ignore_missing": true
}
},
{
"set_security_user": {
"field": "_security",
"properties": [
"authentication_type",
"username",
"realm",
"api_key"
]
}
},
{
"script": {
"description": "Add event.agent_id_status based on the API key metadata and the agent.id contained in the event.\n",
"tag": "agent-id-status",
"source": "boolean is_user_trusted(def ctx, def users) {\n if (ctx?._security?.username == null) {\n return false;\n }\n\n def user = null;\n for (def item : users) {\n if (item?.username == ctx._security.username) {\n user = item;\n break;\n }\n }\n\n if (user == null || user?.realm == null || ctx?._security?.realm?.name == null) {\n return false;\n }\n\n if (ctx._security.realm.name != user.realm) {\n return false;\n }\n\n return true;\n}\n\nString verified(def ctx, def params) {\n // No agent.id field to validate.\n if (ctx?.agent?.id == null) {\n return \"missing\";\n }\n\n // Check auth metadata from API key.\n if (ctx?._security?.authentication_type == null\n // Agents only use API keys.\n || ctx._security.authentication_type != 'API_KEY'\n // Verify the API key owner before trusting any metadata it contains.\n || !is_user_trusted(ctx, params.trusted_users)\n // Verify the API key has metadata indicating the assigned agent ID.\n || ctx?._security?.api_key?.metadata?.agent_id == null) {\n return \"auth_metadata_missing\";\n }\n\n // The API key can only be used represent the agent.id it was issued to.\n if (ctx._security.api_key.metadata.agent_id != ctx.agent.id) {\n // Potential masquerade attempt.\n return \"mismatch\";\n }\n\n return \"verified\";\n}\n\nif (ctx?.event == null) {\n ctx.event = [:];\n}\n\nctx.event.agent_id_status = verified(ctx, params);",
"params": {
"trusted_users": [
{
"username": "elastic/fleet-server",
"realm": "_service_account"
},
{
"username": "cloud-internal-agent-server",
"realm": "found"
},
{
"username": "elastic",
"realm": "reserved"
}
]
}
}
},
{
"remove": {
"field": "_security",
"ignore_missing": true
}
},
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}" } },
{ "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
],
"on_failure": [
{
"remove": {
"field": "_security",
"ignore_missing": true,
"ignore_failure": true
}
},
{
"append": {
"field": "error.message",
"value": [
"failed in Fleet agent final_pipeline: {{ _ingest.on_failure_message }}"
]
}
}
]
}

View File

@@ -0,0 +1,27 @@
{
"version": 3,
"_meta": {
"managed_by": "securityonion",
"managed": true
},
"description": "Custom pipeline for processing all incoming Fleet Agent documents. \n",
"processors": [
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}" } },
{ "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
]
}

View File

@@ -466,6 +466,13 @@ elasticsearch:
so-logs-sonicwall_firewall_x_log: *indexSettings
so-logs-snort_x_log: *indexSettings
so-logs-symantec_endpoint_x_log: *indexSettings
so-logs-tenable_io_x_asset: *indexSettings
so-logs-tenable_io_x_plugin: *indexSettings
so-logs-tenable_io_x_scan: *indexSettings
so-logs-tenable_io_x_vulnerability: *indexSettings
so-logs-tenable_sc_x_asset: *indexSettings
so-logs-tenable_sc_x_plugin: *indexSettings
so-logs-tenable_sc_x_vulnerability: *indexSettings
so-logs-ti_abusech_x_malware: *indexSettings
so-logs-ti_abusech_x_malwarebazaar: *indexSettings
so-logs-ti_abusech_x_threatfox: *indexSettings
@@ -530,6 +537,58 @@ elasticsearch:
so-strelka: *indexSettings
so-syslog: *indexSettings
so-zeek: *indexSettings
so-metrics-fleet_server_x_agent_status: &fleetMetricsSettings
index_sorting:
description: Sorts the index by event time, at the cost of additional processing resource consumption.
advanced: True
readonly: True
helpLink: elasticsearch.html
index_template:
ignore_missing_component_templates:
description: Ignore component templates if they aren't in Elasticsearch.
advanced: True
readonly: True
helpLink: elasticsearch.html
index_patterns:
description: Patterns for matching multiple indices or tables.
advanced: True
readonly: True
helpLink: elasticsearch.html
template:
settings:
index:
mode:
description: Type of mode used for this index. Time series indices can be used for metrics to reduce necessary storage.
advanced: True
readonly: True
helpLink: elasticsearch.html
number_of_replicas:
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
advanced: True
readonly: True
helpLink: elasticsearch.html
composed_of:
description: The index template is composed of these component templates.
advanced: True
readonly: True
helpLink: elasticsearch.html
priority:
description: The priority of the index template.
advanced: True
readonly: True
helpLink: elasticsearch.html
data_stream:
hidden:
description: Hide the data stream.
advanced: True
readonly: True
helpLink: elasticsearch.html
allow_custom_routing:
description: Allow custom routing for the data stream.
advanced: True
readonly: True
helpLink: elasticsearch.html
so-metrics-fleet_server_x_agent_versions: *fleetMetricsSettings
so_roles:
so-manager: &soroleSettings
config:

View File

@@ -6,7 +6,7 @@
"name": "logs"
},
"codec": "best_compression",
"default_pipeline": "logs-elastic_agent-1.13.1",
"default_pipeline": "logs-elastic_agent-1.20.0",
"mapping": {
"total_fields": {
"limit": "10000"

View File

@@ -0,0 +1,201 @@
{
"template": {
"settings": {
"index": {
"lifecycle": {
"name": "metrics"
},
"default_pipeline": "metrics-fleet_server.agent_status-1.5.0",
"mapping": {
"total_fields": {
"limit": "1000"
}
}
}
},
"mappings": {
"dynamic": false,
"_source": {
"mode": "synthetic"
},
"properties": {
"cluster": {
"properties": {
"id": {
"time_series_dimension": true,
"type": "keyword"
}
}
},
"fleet": {
"properties": {
"agents": {
"properties": {
"offline": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"total": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"updating": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"inactive": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"healthy": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"unhealthy": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"unenrolled": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"enrolled": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"unhealthy_reason": {
"properties": {
"output": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"input": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"other": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
}
}
},
"upgrading_step": {
"properties": {
"rollback": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"requested": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"restarting": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"downloading": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"scheduled": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"extracting": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"replacing": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"failed": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"watching": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
}
}
}
}
}
}
},
"agent": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"@timestamp": {
"ignore_malformed": false,
"type": "date"
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"kibana": {
"properties": {
"uuid": {
"path": "agent.id",
"type": "alias"
},
"version": {
"path": "agent.version",
"type": "alias"
}
}
}
}
}
},
"_meta": {
"package": {
"name": "fleet_server"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -0,0 +1,102 @@
{
"template": {
"settings": {
"index": {
"lifecycle": {
"name": "metrics"
},
"default_pipeline": "metrics-fleet_server.agent_versions-1.5.0",
"mapping": {
"total_fields": {
"limit": "1000"
}
}
}
},
"mappings": {
"dynamic": false,
"_source": {
"mode": "synthetic"
},
"properties": {
"cluster": {
"properties": {
"id": {
"time_series_dimension": true,
"type": "keyword"
}
}
},
"fleet": {
"properties": {
"agent": {
"properties": {
"count": {
"time_series_metric": "gauge",
"meta": {},
"type": "long"
},
"version": {
"time_series_dimension": true,
"type": "keyword"
}
}
}
}
},
"agent": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"@timestamp": {
"ignore_malformed": false,
"type": "date"
},
"data_stream": {
"properties": {
"namespace": {
"type": "constant_keyword"
},
"type": {
"type": "constant_keyword"
},
"dataset": {
"type": "constant_keyword"
}
}
},
"kibana": {
"properties": {
"uuid": {
"path": "agent.id",
"type": "alias"
},
"version": {
"path": "agent.version",
"type": "alias"
}
}
}
}
}
},
"_meta": {
"package": {
"name": "fleet_server"
},
"managed_by": "fleet",
"managed": true
}
}

View File

@@ -0,0 +1,29 @@
{
"template": {
"mappings": {
"properties": {
"host": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"related": {
"properties":{
"ip": {
"type": "ip"
}
}
},
"source": {
"properties":{
"ip": {
"type": "ip"
}
}
}
}
}
}
}

View File

@@ -20,7 +20,7 @@ if [ ! -f /opt/so/state/espipelines.txt ]; then
cd ${ELASTICSEARCH_INGEST_PIPELINES}
echo "Loading pipelines..."
for i in .[a-z]* *;
for i in *;
do
echo $i;
retry 5 5 "so-elasticsearch-query _ingest/pipeline/$i -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load pipeline: $i"

View File

@@ -40,9 +40,9 @@ fi
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space
{% if GLOBALS.role == 'so-manager' %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $5}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $8}'); do
{% else %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $8}'); do
{% endif %}
size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}')
unit=$(echo $i | grep -oE '[A-Za-z]+')

View File

@@ -13,10 +13,10 @@ TOTAL_USED_SPACE=0
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space
{% if GLOBALS.role == 'so-manager' %}
# Get total disk space - disk.total
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $3}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $6}'); do
{% else %}
# Get disk space taken up by indices - disk.indices
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
{% endif %}
size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}')
unit=$(echo $i | grep -oE '[A-Za-z]+')

View File

@@ -10,10 +10,26 @@
{%- for index, settings in ES_INDEX_SETTINGS.items() %}
{%- if settings.policy is defined %}
{%- if index == 'so-logs-detections.alerts' %}
echo
echo "Setting up so-logs-detections.alerts-so policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-so" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
echo
{%- elif index == 'so-logs-soc' %}
echo
echo "Setting up so-soc-logs policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/so-soc-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
echo
echo
echo "Setting up {{ index }}-logs policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
echo
{%- else %}
echo
echo "Setting up {{ index }}-logs policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
echo
{%- endif %}
{%- endif %}
{%- endfor %}
echo

View File

@@ -5,7 +5,6 @@
# Elastic License 2.0.
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt
STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
@@ -68,9 +67,9 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
echo -n "Waiting for ElasticSearch..."
retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
{% if GLOBALS.role != 'so-heavynode' %}
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} )
if [ "$INSTALLED" != "installed" ]; then
TEMPLATE="logs-endpoint.alerts@package"
INSTALLED=$(so-elasticsearch-query _component_template/$TEMPLATE | jq -r .component_templates[0].name)
if [ "$INSTALLED" != "$TEMPLATE" ]; then
echo
echo "Packages not yet installed."
echo
@@ -134,7 +133,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
TEMPLATE=${i::-14}
COMPONENT_PATTERN=${TEMPLATE:3}
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" ]]; then
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ logs-http_endpoint\.generic|logs-winlog\.winlog ]]; then
load_failures=$((load_failures+1))
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
else
@@ -153,7 +152,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
cd - >/dev/null
if [[ $load_failures -eq 0 ]]; then
echo "All template loaded successfully"
echo "All templates loaded successfully"
touch $STATE_FILE_SUCCESS
else
echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate"

View File

@@ -120,7 +120,10 @@ firewall:
influxdb:
tcp: *tcpsettings
udp: *udpsettings
kafka:
kafka_controller:
tcp: *tcpsettings
udp: *udpsettings
kafka_data:
tcp: *tcpsettings
udp: *udpsettings
kibana:

View File

@@ -1,6 +1,6 @@
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
{%- from 'soc/merged.map.jinja' import SOCMERGED -%}
--suricata-version=6.0
--suricata-version=7.0.3
--merged=/opt/so/rules/nids/suri/all.rules
--output=/nsm/rules/detect-suricata/custom_temp
--local=/opt/so/rules/nids/suri/local.rules

View File

@@ -23,9 +23,9 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
{%- if not GLOBALS.airgap %}
# Download the rules from the internet
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
{%- endif %}
{%- endif %}

View File

@@ -1 +1,2 @@
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.4","id": "8.10.4","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.14.3","id": "8.14.3","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}

View File

@@ -63,7 +63,7 @@ update() {
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
for i in "${LINES[@]}"; do
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.14.3" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
done

View File

@@ -9,6 +9,10 @@ if [ -f /usr/sbin/so-common ]; then
. /usr/sbin/so-common
fi
if [ -f /usr/sbin/so-elastic-fleet-common ]; then
. /usr/sbin/so-elastic-fleet-common
fi
function usage() {
echo "Usage: $0 -o=<operation> -m=[id]"
echo ""
@@ -380,23 +384,31 @@ function add_elastic_fleet_package_registry_to_minion() {
function create_fleet_policy() {
# First, set the default output to Elasticsearch
# This is required because of the license output bug
JSON_STRING=$(jq -n \
--arg NAME "FleetServer_$LSHOSTNAME" \
--arg DESC "Fleet Server - $LSHOSTNAME" \
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":1209600,"has_fleet_server":true}'
)
'{
"name": "so-manager_elasticsearch",
"type": "elasticsearch",
"is_default": true,
"is_default_monitoring": false
}')
# Create Fleet Sever Policy
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
JSON_STRING_UPDATE=$( jq -n \
--arg NAME "FleetServer_$LSHOSTNAME" \
--arg DESC "Fleet Server - $LSHOSTNAME" \
'{"name":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":120,"data_output_id":"so-manager_elasticsearch"}'
)
# Create the Fleet Server Policy
elastic_fleet_policy_create "FleetServer_$LSHOSTNAME" "Fleet Server - $LSHOSTNAME" "false" "120"
# Update Fleet Policy - ES Output
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_$LSHOSTNAME" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING_UPDATE"
# Modify the default integration policy to update the policy_id with the correct naming
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_$LSHOSTNAME" --arg name "fleet_server-$LSHOSTNAME" '
.policy_id = $policy_id |
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
# Add the Fleet Server Integration to the new Fleet Policy
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
# Set the default output back to the default
/sbin/so-elastic-fleet-outputs-update
}
function update_fleet_host_urls() {

View File

@@ -234,10 +234,14 @@ function updatePassword() {
passwordHash=$(hashPassword "$password")
# Update DB with new hash
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB), created_at=datetime('now'), updated_at=datetime('now') where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='password');" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
# Deactivate MFA
echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
[[ $? != 0 ]] && fail "Unable to update password"
# Deactivate MFA
echo "delete from identity_credential_identifiers where identity_credential_id in (select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id in (select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
[[ $? != 0 ]] && fail "Unable to clear aal2 identity IDs"
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id in (select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
[[ $? != 0 ]] && fail "Unable to clear aal2 identity credentials"
echo "update identities set available_aal='aal1' where id='${identityId}';" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
[[ $? != 0 ]] && fail "Unable to reset aal"
fi
}

View File

@@ -401,6 +401,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
[[ "$INSTALLEDVERSION" == 2.4.70 ]] && up_to_2.4.80
[[ "$INSTALLEDVERSION" == 2.4.80 ]] && up_to_2.4.90
[[ "$INSTALLEDVERSION" == 2.4.90 ]] && up_to_2.4.100
true
}
@@ -420,6 +421,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
[[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
[[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
true
}
@@ -451,8 +453,6 @@ post_to_2.4.20() {
}
post_to_2.4.30() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
# there is an occasional error with this state: pki_public_ca_crt: TypeError: list indices must be integers or slices, not str
set +e
salt-call state.apply ca queue=True
@@ -477,8 +477,7 @@ post_to_2.4.50() {
}
post_to_2.4.60() {
echo "Regenerating Elastic Agent Installers..."
so-elastic-agent-gen-installers
echo "Nothing to apply"
POSTVERSION=2.4.60
}
@@ -504,6 +503,12 @@ post_to_2.4.90() {
POSTVERSION=2.4.90
}
post_to_2.4.100() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
POSTVERSION=2.4.100
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -580,18 +585,7 @@ up_to_2.4.20() {
}
up_to_2.4.30() {
# Remove older defend integration json & installed integration
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
. $UPDATE_DIR/salt/elasticfleet/tools/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
rm -f /opt/so/state/eaintegrations.txt
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
rm -f /opt/so/state/estemplates*.txt
echo "Nothing to do for 2.4.30"
INSTALLEDVERSION=2.4.30
}
@@ -681,10 +675,16 @@ up_to_2.4.90() {
so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.password "$kafkatrimpass"
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.trustpass "$kafkatrust"
echo "If the Detection index exists, update the refresh_interval"
so-elasticsearch-query so-detection*/_settings -X PUT -d '{"index":{"refresh_interval":"1s"}}'
INSTALLEDVERSION=2.4.90
}
up_to_2.4.100() {
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
INSTALLEDVERSION=2.4.100
}
add_detection_test_pillars() {
if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then
@@ -945,7 +945,9 @@ upgrade_salt() {
if [[ $is_rpm ]]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
yum versionlock delete "salt"
yum versionlock delete "salt-minion"
yum versionlock delete "salt-master"
echo "Updating Salt packages."
echo ""
set +e
@@ -963,7 +965,9 @@ upgrade_salt() {
set -e
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
# Else do Ubuntu things
elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt."
@@ -1404,6 +1408,8 @@ Please review the following for more information about the update process and re
$DOC_BASE_URL/soup.html
https://blog.securityonion.net
WARNING: If you run soup via an SSH session and that SSH session terminates, then any processes running in that session would terminate. You should avoid leaving soup unattended especially if the machine you are SSHing from is configured to sleep after a period of time. You might also consider using something like screen or tmux so that if your SSH session terminates, the processes will continue running on the server.
EOF
cat << EOF

View File

@@ -14,7 +14,7 @@ include:
# Install the registry container
so-dockerregistry:
docker_container.running:
- image: ghcr.io/security-onion-solutions/registry:2.8.2
- image: ghcr.io/security-onion-solutions/registry:2.8.3
- hostname: so-registry
- networks:
- sobridge:

View File

@@ -3,12 +3,10 @@
{% if grains.os_family == 'Debian' %}
{% set SPLITCHAR = '+' %}
{% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %}
{% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %}
{% else %}
{% set SPLITCHAR = '-' %}
{% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %}
{% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %}
{% endif %}

View File

@@ -1,4 +1,4 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt:
master:
version: 3006.6
version: 3006.9

View File

@@ -1,16 +1,13 @@
{% from 'salt/map.jinja' import SALTNOTHELD %}
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
include:
- salt.minion
{% if SALTNOTHELD == 1 %}
hold_salt_master_package:
module.run:
- pkg.hold:
- name: salt-master
{% endif %}
# prior to 2.4.30 this engine ran on the manager with salt-minion
# this has changed to running with the salt-master in 2.4.30

View File

@@ -1,6 +1,6 @@
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
salt:
minion:
version: 3006.6
version: 3006.9
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds.

View File

@@ -2,13 +2,13 @@
{% from 'salt/map.jinja' import UPGRADECOMMAND with context %}
{% from 'salt/map.jinja' import SALTVERSION %}
{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %}
{% from 'salt/map.jinja' import SALTNOTHELD %}
{% from 'salt/map.jinja' import SALTPACKAGES %}
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
{% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %}
{% set service_start_delay = SALTMINION.salt.minion.service_start_delay %}
include:
- salt.python_modules
- salt
- systemd.reload
- repo.client
@@ -19,15 +19,12 @@ include:
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
{% if SALTNOTHELD | int == 0 %}
unhold_salt_packages:
module.run:
- pkg.unhold:
pkg.unheld:
- pkgs:
{% for package in SALTPACKAGES %}
- {{ package }}
{% endfor %}
{% endif %}
install_salt_minion:
cmd.run:
@@ -41,15 +38,12 @@ install_salt_minion:
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
{% if SALTNOTHELD | int == 1 %}
hold_salt_packages:
module.run:
- pkg.hold:
pkg.held:
- pkgs:
{% for package in SALTPACKAGES %}
- {{ package }}
- {{ package }}: {{SALTVERSION}}-0.*
{% endfor %}
{% endif %}
remove_error_log_level_logfile:
file.line:

View File

@@ -0,0 +1,21 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
docker_module_package:
file.recurse:
- name: /opt/so/conf/salt/module_packages/docker
- source: salt://salt/module_packages/docker
- clean: True
- makedirs: True
# fail hard on this state so that soup would be cancelled on a manager (eventhough salt would have already updated)
# on a non manager, failing hard here will prevent the minion from upgrading
# we want to fail hard here to prevent the minion from upgrading and potetially being able to manager docker containers from a dep mismatch
docker_python_module_install:
cmd.run:
- name: /opt/saltstack/salt/bin/python3.10 -m pip install docker --no-index --find-links=/opt/so/conf/salt/module_packages/docker/ --upgrade
- onchanges:
- file: docker_module_package
- failhard: True

View File

@@ -190,6 +190,14 @@ socsigmarepo:
- group: 939
- mode: 775
socsensoronirepos:
file.directory:
- name: /opt/so/conf/soc/ai_summary_repos
- user: 939
- group: 939
- mode: 775
- makedirs: True
{% else %}
{{sls}}_state_not_allowed:

View File

@@ -1304,6 +1304,7 @@ soc:
maxPacketCount: 5000
htmlDir: html
importUploadDir: /nsm/soc/uploads
forceUserOtp: false
modules:
cases: soc
filedatastore:
@@ -1311,6 +1312,10 @@ soc:
kratos:
hostUrl:
elastalertengine:
aiRepoUrl: https://github.com/Security-Onion-Solutions/securityonion-resources
aiRepoBranch: generated-summaries-stable
aiRepoPath: /opt/sensoroni/ai_summary_repos
showAiSummaries: true
autoUpdateEnabled: true
autoEnabledSigmaRules:
default:
@@ -1390,6 +1395,10 @@ soc:
userFiles:
- rbac/users_roles
strelkaengine:
aiRepoUrl: https://github.com/Security-Onion-Solutions/securityonion-resources
aiRepoBranch: generated-summaries-stable
aiRepoPath: /opt/sensoroni/ai_summary_repos
showAiSummaries: true
autoEnabledYaraRules:
- securityonion-yara
autoUpdateEnabled: true
@@ -1411,6 +1420,10 @@ soc:
stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state
integrityCheckFrequencySeconds: 1200
suricataengine:
aiRepoUrl: https://github.com/Security-Onion-Solutions/securityonion-resources
aiRepoBranch: generated-summaries-stable
aiRepoPath: /opt/sensoroni/ai_summary_repos
showAiSummaries: true
autoUpdateEnabled: true
communityRulesImportFrequencySeconds: 86400
communityRulesImportErrorSeconds: 300

View File

@@ -33,6 +33,7 @@ so-soc:
- /nsm/soc/uploads:/nsm/soc/uploads:rw
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw
- /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro
- /opt/so/conf/soc/ai_summary_repos:/opt/sensoroni/ai_summary_repos:rw
{% if SOCMERGED.telemetryEnabled and not GLOBALS.airgap %}
- /opt/so/conf/soc/analytics.js:/opt/sensoroni/html/js/analytics.js:ro
{% endif %}

View File

@@ -81,15 +81,133 @@ soc:
description: Maximum number of packets to show in the PCAP viewer. Larger values can cause more resource utilization on both the SOC server and the browser.
global: True
advanced: True
forceUserOtp:
title: Require TOTP
description: Require all users to enable Time-based One Time Passwords (MFA) upon login to SOC.
global: True
modules:
elastalertengine:
additionalAlerters:
title: Additional Alerters
description: Specify additional alerters to enable for all Sigma rules, one alerter name per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. Note that the configuration parameters for these alerters must be provided in the ElastAlert configuration section. Filter for 'Alerter' to find this related setting. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
aiRepoUrl:
description: URL to the AI repository. This is used to pull in AI models for use in ElastAlert rules.
global: True
helpLink: sigma.html
advanced: True
aiRepoBranch:
description: The branch to pull from the AI repository. Leaving this blank will pull the default branch.
global: True
advanced: True
aiRepoPath:
description: Path to the AI repository. This is used to pull in AI models for use in ElastAlert rules.
global: True
advanced: True
showAiSummaries:
description: Show AI summaries for ElastAlert rules.
global: True
additionalAlerters:
title: "Notifications: Sev 0/Default Alerters"
description: "Specify default alerters to enable for outbound notifications. These alerters will be used unless overridden by higher severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
global: True
helpLink: notifications.html
forcedType: "[]string"
multiline: True
additionalSev0AlertersParams:
title: "Notifications: Sev 0/Default Parameters"
description: Optional configuration parameters for default alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: notifications.html
forcedType: string
additionalSev1Alerters:
title: "Notifications: Sev 1/Informational Alerters"
description: "Specify specific alerters to use when alerting at the info severity level or higher. These alerters will be used unless overridden by higher severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
global: True
helpLink: notifications.html
forcedType: "[]string"
multiline: True
additionalSev1AlertersParams:
title: "Notifications: Sev 1/Informational Parameters"
description: Optional configuration parameters for informational severity alerters. Info level is less severe than 'Low Severity'. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: notifications.html
forcedType: string
additionalSev2Alerters:
title: "Notifications: Sev 2/Low Alerters"
description: "Specify specific alerters to use when alerting at the low severity level or higher. These alerters will be used unless overridden by higher severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
global: True
helpLink: notifications.html
forcedType: "[]string"
multiline: True
additionalSev2AlertersParams:
title: "Notifications: Sev 2/Low Parameters"
description: Optional configuration parameters for low severity alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: notifications.html
forcedType: string
additionalSev3Alerters:
title: "Notifications: Sev 3/Medium Alerters"
description: "Specify specific alerters to use when alerting at the medium severity level or higher. These alerters will be used unless overridden by higher severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
global: True
helpLink: notifications.html
forcedType: "[]string"
multiline: True
additionalSev3AlertersParams:
title: "Notifications: Sev 3/Medium Parameters"
description: Optional configuration parameters for medium severity alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: notifications.html
forcedType: string
additionalSev4Alerters:
title: "Notifications: Sev 4/High Alerters"
description: "Specify specific alerters to use when alerting at the high severity level or critical severity level. These alerters will be used unless overridden by critical severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
global: True
helpLink: notifications.html
forcedType: "[]string"
multiline: True
additionalSev4AlertersParams:
title: "Notifications: Sev 4/High Parameters"
description: Optional configuration parameters for high severity alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: notifications.html
forcedType: string
additionalSev5Alerters:
title: "Notifications: Sev 5/Critical Alerters"
description: "Specify specific alerters to use when alerting at the critical severity level. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
global: True
helpLink: notifications.html
forcedType: "[]string"
multiline: True
additionalSev5AlertersParams:
title: "Notifications: Sev 5/Critical Parameters"
description: Optional configuration parameters for critical severity alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
global: True
multiline: True
syntax: yaml
helpLink: notifications.html
forcedType: string
additionalUserDefinedNotifications:
customAlerters:
description: "Specify custom notification alerters to use when the Sigma rule contains the following tag: so.alerters.customAlerters. This setting can be duplicated to create new custom alerter configurations. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
global: True
helpLink: notifications.html
forcedType: "[]string"
duplicates: True
multiline: True
customAlertersParams:
description: "Optional configuration parameters for custom notification alerters, used when the Sigma rule contains the following tag: so.params.customAlertersParams. This setting can be duplicated to create new custom alerter configurations. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
global: True
multiline: True
syntax: yaml
helpLink: notifications.html
duplicates: True
forcedType: string
autoEnabledSigmaRules:
default: &autoEnabledSigmaRules
description: 'Sigma rules to automatically enable on initial import. Format is $Ruleset+$Level - for example, for the core community ruleset and critical level rules: core+critical. These will be applied based on role if defined and default if not.'
@@ -189,6 +307,21 @@ soc:
advanced: True
forcedType: int
strelkaengine:
aiRepoUrl:
description: URL to the AI repository. This is used to pull in AI models for use in Strelka rules.
global: True
advanced: True
aiRepoBranch:
description: The branch to pull from the AI repository. Leaving this blank will pull the default branch.
global: True
advanced: True
aiRepoPath:
description: Path to the AI repository. This is used to pull in AI models for use in Strelka rules.
global: True
advanced: True
showAiSummaries:
description: Show AI summaries for Strelka rules.
global: True
autoEnabledYaraRules:
description: 'YARA rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara'
global: True
@@ -212,6 +345,21 @@ soc:
helpLink: yara.html
airgap: *serulesRepos
suricataengine:
aiRepoUrl:
description: URL to the AI repository. This is used to pull in AI models for use in Suricata rules.
global: True
advanced: True
aiRepoBranch:
description: The branch to pull from the AI repository. Leaving this blank will pull the default branch.
global: True
advanced: True
aiRepoPath:
description: Path to the AI repository. This is used to pull in AI models for use in Suricata rules.
global: True
advanced: True
showAiSummaries:
description: Show AI summaries for Suricata rules.
global: True
communityRulesImportFrequencySeconds:
description: 'How often to check for new Suricata rules (in seconds).'
global: True

View File

@@ -101,20 +101,20 @@
{# change address-groups vars from list to comma seperated string #}
{% for k, v in SURICATAMERGED.config.vars['address-groups'].items() %}
{% if v is string %}
{% do SURICATAMERGED.config.vars['address-groups'].update({k: '[' ~ v ~ ']'}) %}
{# if address-group value is a list #}
{% if v is iterable and (v is not string and v is not mapping and v | length > 1) %}
{% elif v is iterable and v is not mapping %}
{% do SURICATAMERGED.config.vars['address-groups'].update({k: '[' ~ v | join(',') ~ ']'}) %}
{% else %}
{% do SURICATAMERGED.config.vars['address-groups'].update({k: v[0]}) %}
{% endif %}
{% endfor %}
{# change port-groups vars from list to comma seperated string #}
{% for k, v in SURICATAMERGED.config.vars['port-groups'].items() %}
{% if v is string %}
{% do SURICATAMERGED.config.vars['port-groups'].update({k: '[' ~ v ~ ']'}) %}
{# if address-group value is a list #}
{% if v is iterable and (v is not string and v is not mapping and v | length > 1) %}
{% elif v is iterable and v is not mapping %}
{% do SURICATAMERGED.config.vars['port-groups'].update({k: '[' ~ v | join(',') ~ ']'}) %}
{% else %}
{% do SURICATAMERGED.config.vars['port-groups'].update({k: v[0]}) %}
{% endif %}
{% endfor %}

View File

@@ -1814,7 +1814,7 @@ repo_sync_local() {
if [[ ! $is_airgap ]]; then
curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install
logCmd "dnf reposync --norepopath -g --delete -m -c /opt/so/conf/reposync/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/"
retry 5 60 "dnf reposync --norepopath -g --delete -m -c /opt/so/conf/reposync/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/" >> "$setup_log" 2>&1 || fail_setup
# After the download is complete run createrepo
create_repo
fi
@@ -1931,7 +1931,7 @@ saltify() {
}
salt_install_module_deps() {
logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/"
logCmd "salt-call state.apply salt.python_modules --local --file-root=../salt/"
}
salt_patch_x509_v2() {

Binary file not shown.