Merge remote-tracking branch 'origin/2.4/dev' into reyesj2/pypy

This commit is contained in:
reyesj2
2025-09-12 11:05:54 -05:00
51 changed files with 1606 additions and 89 deletions

View File

@@ -262,6 +262,9 @@ base:
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- kafka.nodes - kafka.nodes
- kafka.soc_kafka - kafka.soc_kafka
- stig.soc_stig
- elasticfleet.soc_elasticfleet
- elasticfleet.adv_elasticfleet
'*_import': '*_import':
- node_data.ips - node_data.ips
@@ -319,10 +322,12 @@ base:
- elasticfleet.adv_elasticfleet - elasticfleet.adv_elasticfleet
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
'*_hypervisor': '*_hypervisor':
- minions.{{ grains.id }} - minions.{{ grains.id }}
- minions.adv_{{ grains.id }} - minions.adv_{{ grains.id }}
- stig.soc_stig
'*_desktop': '*_desktop':
- minions.{{ grains.id }} - minions.{{ grains.id }}

View File

@@ -38,6 +38,7 @@ elasticfleet:
- elasticsearch - elasticsearch
- endpoint - endpoint
- fleet_server - fleet_server
- filestream
- http_endpoint - http_endpoint
- httpjson - httpjson
- log - log

View File

@@ -0,0 +1,48 @@
{
"package": {
"name": "filestream",
"version": ""
},
"name": "agent-monitor",
"namespace": "",
"description": "",
"policy_ids": [
"so-grid-nodes_general"
],
"output_id": null,
"vars": {},
"inputs": {
"filestream-filestream": {
"enabled": true,
"streams": {
"filestream.generic": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/agents/agent-monitor.log"
],
"data_stream.dataset": "agent-monitor",
"pipeline": "elasticagent.monitor",
"parsers": "",
"exclude_files": [
"\\.gz$"
],
"include_files": [],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: gridmetrics",
"tags": [],
"recursive_glob": true,
"ignore_older": "72h",
"clean_inactive": -1,
"harvester_limit": 0,
"fingerprint": true,
"fingerprint_offset": 0,
"fingerprint_length": 64,
"file_identity_native": false,
"exclude_lines": [],
"include_lines": []
}
}
}
}
}
}

View File

@@ -20,7 +20,7 @@
], ],
"data_stream.dataset": "import", "data_stream.dataset": "import",
"custom": "", "custom": "",
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.3.3\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.3.3\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.3.3\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import", "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.5.4\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.5.4\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.5.4\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [ "tags": [
"import" "import"
] ]

View File

@@ -23,14 +23,20 @@ function update_logstash_outputs() {
} }
function update_kafka_outputs() { function update_kafka_outputs() {
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup # Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl') if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
JSON_STRING=$(jq -n \ SECRETS=$(echo "$kafka_policy" | jq -r '.item.secrets')
--arg UPDATEDLIST "$NEW_LIST_JSON" \ JSON_STRING=$(jq -n \
--argjson SSL_CONFIG "$SSL_CONFIG" \ --arg UPDATEDLIST "$NEW_LIST_JSON" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}') --argjson SSL_CONFIG "$SSL_CONFIG" \
# Update Kafka outputs --argjson SECRETS "$SECRETS" \
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
# Update Kafka outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
else
printf "Failed to get current Kafka output policy..."
exit 1
fi
} }
{% if GLOBALS.pipeline == "KAFKA" %} {% if GLOBALS.pipeline == "KAFKA" %}

View File

@@ -5,46 +5,78 @@
# Elastic License 2.0. # Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %} {% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-managerhype'] %}
. /usr/sbin/so-common . /usr/sbin/so-common
force=false
while [[ $# -gt 0 ]]; do
case $1 in
-f|--force)
force=true
shift
;;
*)
echo "Unknown option $1"
echo "Usage: $0 [-f|--force]"
exit 1
;;
esac
done
# Check to make sure that Kibana API is up & ready # Check to make sure that Kibana API is up & ready
RETURN_CODE=0 RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config" wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$? RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then if [[ "$RETURN_CODE" != "0" ]]; then
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..." echo -e "\nKibana API not accessible, can't setup Elastic Fleet output policy for Kafka...\n"
exit 1 exit 1
fi fi
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id) KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
KAFKA_OUTPUT_VERSION="2.6.0"
if ! echo "$output" | grep -q "so-manager_kafka"; then if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) # Create a new output policy for Kafka. Default is disabled 'is_default: false & is_default_monitoring: false'
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
KAFKA_OUTPUT_VERSION="2.6.0"
JSON_STRING=$( jq -n \ JSON_STRING=$( jq -n \
--arg KAFKACRT "$KAFKACRT" \ --arg KAFKACRT "$KAFKACRT" \
--arg KAFKAKEY "$KAFKAKEY" \ --arg KAFKAKEY "$KAFKAKEY" \
--arg KAFKACA "$KAFKACA" \ --arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \ --arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 10 }, "topics":[{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' '{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
) )
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id) echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
exit 1
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then else
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n" echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
exit 0
fi
elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null) && [[ "$force" == "true" ]]; then
# force an update to Kafka policy. Keep the current value of Kafka output policy (enabled/disabled).
ENABLED_DISABLED=$(echo "$kafka_output" | jq -e .item.is_default)
HOSTS=$(echo "$kafka_output" | jq -r '.item.hosts')
JSON_STRING=$( jq -n \
--arg KAFKACRT "$KAFKACRT" \
--arg KAFKAKEY "$KAFKAKEY" \
--arg KAFKACA "$KAFKACA" \
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
--argjson HOSTS "$HOSTS" \
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
)
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
exit 1 exit 1
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then else
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n" echo -e "\nForced update to Elastic Fleet output policy for Kafka...\n"
fi fi
elif echo "$output" | grep -q "so-manager_kafka"; then else
echo -e "\nElastic Fleet output policy for Kafka already exists...\n" echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
fi fi
{% else %} {% else %}

View File

@@ -1,6 +1,6 @@
elasticsearch: elasticsearch:
enabled: false enabled: false
version: 8.18.4 version: 8.18.6
index_clean: true index_clean: true
config: config:
action: action:
@@ -284,6 +284,86 @@ elasticsearch:
hot: hot:
actions: {} actions: {}
min_age: 0ms min_age: 0ms
so-assistant-chat:
index_sorting: false
index_template:
composed_of:
- assistant-chat-mappings
- assistant-chat-settings
data_stream:
allow_custom_routing: false
hidden: false
ignore_missing_component_templates: []
index_patterns:
- so-assistant-chat*
priority: 501
template:
mappings:
date_detection: false
dynamic_templates:
- strings_as_keyword:
mapping:
ignore_above: 1024
type: keyword
match_mapping_type: string
settings:
index:
lifecycle:
name: so-assistant-chat-logs
mapping:
total_fields:
limit: 1500
number_of_replicas: 0
number_of_shards: 1
refresh_interval: 1s
sort:
field: '@timestamp'
order: desc
policy:
phases:
hot:
actions: {}
min_age: 0ms
so-assistant-session:
index_sorting: false
index_template:
composed_of:
- assistant-session-mappings
- assistant-session-settings
data_stream:
allow_custom_routing: false
hidden: false
ignore_missing_component_templates: []
index_patterns:
- so-assistant-session*
priority: 501
template:
mappings:
date_detection: false
dynamic_templates:
- strings_as_keyword:
mapping:
ignore_above: 1024
type: keyword
match_mapping_type: string
settings:
index:
lifecycle:
name: so-assistant-session-logs
mapping:
total_fields:
limit: 1500
number_of_replicas: 0
number_of_shards: 1
refresh_interval: 1s
sort:
field: '@timestamp'
order: desc
policy:
phases:
hot:
actions: {}
min_age: 0ms
so-endgame: so-endgame:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -1243,6 +1323,70 @@ elasticsearch:
set_priority: set_priority:
priority: 50 priority: 50
min_age: 30d min_age: 30d
so-logs-agent-monitor:
index_sorting: false
index_template:
composed_of:
- event-mappings
- so-elastic-agent-monitor
- so-fleet_integrations.ip_mappings-1
- so-fleet_globals-1
- so-fleet_agent_id_verification-1
data_stream:
allow_custom_routing: false
hidden: false
ignore_missing_component_templates:
- logs-agent-monitor@custom
index_patterns:
- logs-agent-monitor-*
priority: 501
template:
mappings:
_meta:
managed: true
managed_by: security_onion
package:
name: elastic_agent
settings:
index:
lifecycle:
name: so-logs-agent-monitor-logs
mapping:
total_fields:
limit: 5000
number_of_replicas: 0
sort:
field: '@timestamp'
order: desc
policy:
_meta:
managed: true
managed_by: security_onion
package:
name: elastic_agent
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 60d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-elastic_agent_x_apm_server: so-logs-elastic_agent_x_apm_server:
index_sorting: false index_sorting: false
index_template: index_template:
@@ -4031,7 +4175,7 @@ elasticsearch:
hot: hot:
actions: actions:
rollover: rollover:
max_age: 1d max_age: 30d
max_primary_shard_size: 50gb max_primary_shard_size: 50gb
set_priority: set_priority:
priority: 100 priority: 100

View File

@@ -0,0 +1,36 @@
{
"processors": [
{
"set": {
"field": "event.dataset",
"value": "gridmetrics.agents",
"ignore_failure": true
}
},
{
"set": {
"field": "event.module",
"value": "gridmetrics",
"ignore_failure": true
}
},
{
"remove": {
"field": [
"host",
"elastic_agent",
"agent"
],
"ignore_missing": true,
"ignore_failure": true
}
},
{
"json": {
"field": "message",
"add_to_root": true,
"ignore_failure": true
}
}
]
}

View File

@@ -107,61 +107,61 @@
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-firewall", "name": "logs-pfsense.log-1.23.1-firewall",
"if": "ctx.event.provider == 'filterlog'" "if": "ctx.event.provider == 'filterlog'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-openvpn", "name": "logs-pfsense.log-1.23.1-openvpn",
"if": "ctx.event.provider == 'openvpn'" "if": "ctx.event.provider == 'openvpn'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-ipsec", "name": "logs-pfsense.log-1.23.1-ipsec",
"if": "ctx.event.provider == 'charon'" "if": "ctx.event.provider == 'charon'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-dhcp", "name": "logs-pfsense.log-1.23.1-dhcp",
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)" "if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
} }
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-unbound", "name": "logs-pfsense.log-1.23.1-unbound",
"if": "ctx.event.provider == 'unbound'" "if": "ctx.event.provider == 'unbound'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-haproxy", "name": "logs-pfsense.log-1.23.1-haproxy",
"if": "ctx.event.provider == 'haproxy'" "if": "ctx.event.provider == 'haproxy'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-php-fpm", "name": "logs-pfsense.log-1.23.1-php-fpm",
"if": "ctx.event.provider == 'php-fpm'" "if": "ctx.event.provider == 'php-fpm'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-squid", "name": "logs-pfsense.log-1.23.1-squid",
"if": "ctx.event.provider == 'squid'" "if": "ctx.event.provider == 'squid'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-snort", "name": "logs-pfsense.log-1.23.1-snort",
"if": "ctx.event.provider == 'snort'" "if": "ctx.event.provider == 'snort'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"name": "logs-pfsense.log-1.23.0-suricata", "name": "logs-pfsense.log-1.23.1-suricata",
"if": "ctx.event.provider == 'suricata'" "if": "ctx.event.provider == 'suricata'"
} }
}, },

View File

@@ -0,0 +1,43 @@
{
"template": {
"mappings": {
"properties": {
"agent": {
"type": "object",
"properties": {
"hostname": {
"ignore_above": 1024,
"type": "keyword"
},
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"last_checkin_status": {
"ignore_above": 1024,
"type": "keyword"
},
"last_checkin": {
"type": "date"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"offline_duration_hours": {
"type": "integer"
},
"policy_id": {
"ignore_above": 1024,
"type": "keyword"
},
"status": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,104 @@
{
"template": {
"mappings": {
"properties": {
"@timestamp": {
"type": "date"
},
"so_kind": {
"ignore_above": 1024,
"type": "keyword"
},
"so_operation": {
"ignore_above": 1024,
"type": "keyword"
},
"so_chat": {
"properties": {
"role": {
"ignore_above": 1024,
"type": "keyword"
},
"content": {
"type": "object",
"enabled": false
},
"sessionId": {
"ignore_above": 1024,
"type": "keyword"
},
"createTime": {
"type": "date"
},
"deletedAt": {
"type": "date"
},
"tags": {
"ignore_above": 1024,
"type": "keyword"
},
"tool_use_id": {
"ignore_above": 1024,
"type": "keyword"
},
"userId": {
"ignore_above": 1024,
"type": "keyword"
},
"message": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"type": {
"ignore_above": 1024,
"type": "keyword"
},
"role": {
"ignore_above": 1024,
"type": "keyword"
},
"model": {
"ignore_above": 1024,
"type": "keyword"
},
"contentStr": {
"type": "text"
},
"contentBlocks": {
"type": "nested",
"enabled": false
},
"stopReason": {
"ignore_above": 1024,
"type": "keyword"
},
"stopSequence": {
"ignore_above": 1024,
"type": "keyword"
},
"usage": {
"properties": {
"input_tokens": {
"type": "long"
},
"output_tokens": {
"type": "long"
},
"credits": {
"type": "long"
}
}
}
}
}
}
}
}
}
},
"_meta": {
"ecs_version": "1.12.2"
}
}

View File

@@ -0,0 +1,7 @@
{
"template": {},
"version": 1,
"_meta": {
"description": "default settings for common Security Onion Assistant indices"
}
}

View File

@@ -0,0 +1,44 @@
{
"template": {
"mappings": {
"properties": {
"@timestamp": {
"type": "date"
},
"so_kind": {
"ignore_above": 1024,
"type": "keyword"
},
"so_session": {
"properties": {
"title": {
"ignore_above": 1024,
"type": "keyword"
},
"sessionId": {
"ignore_above": 1024,
"type": "keyword"
},
"createTime": {
"type": "date"
},
"deleteTime": {
"type": "date"
},
"tags": {
"ignore_above": 1024,
"type": "keyword"
},
"userId": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
},
"_meta": {
"ecs_version": "1.12.2"
}
}

View File

@@ -0,0 +1,7 @@
{
"template": {},
"version": 1,
"_meta": {
"description": "default settings for common Security Onion Assistant indices"
}
}

View File

@@ -1230,6 +1230,10 @@ firewall:
portgroups: portgroups:
- elasticsearch_node - elasticsearch_node
- elasticsearch_rest - elasticsearch_rest
managerhype:
portgroups:
- elasticsearch_node
- elasticsearch_rest
standalone: standalone:
portgroups: portgroups:
- elasticsearch_node - elasticsearch_node
@@ -1377,6 +1381,10 @@ firewall:
portgroups: portgroups:
- elasticsearch_node - elasticsearch_node
- elasticsearch_rest - elasticsearch_rest
managerhype:
portgroups:
- elasticsearch_node
- elasticsearch_rest
standalone: standalone:
portgroups: portgroups:
- elasticsearch_node - elasticsearch_node
@@ -1579,6 +1587,9 @@ firewall:
portgroups: portgroups:
- redis - redis
- elastic_agent_data - elastic_agent_data
managerhype:
portgroups:
- elastic_agent_data
self: self:
portgroups: portgroups:
- redis - redis
@@ -1696,6 +1707,9 @@ firewall:
managersearch: managersearch:
portgroups: portgroups:
- openssh - openssh
managerhype:
portgroups:
- openssh
standalone: standalone:
portgroups: portgroups:
- openssh - openssh
@@ -1758,6 +1772,8 @@ firewall:
portgroups: [] portgroups: []
managersearch: managersearch:
portgroups: [] portgroups: []
managerhype:
portgroups: []
standalone: standalone:
portgroups: [] portgroups: []
customhostgroup0: customhostgroup0:

View File

@@ -25,7 +25,7 @@
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %} {% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
{% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %} {% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
{% if role in ['manager', 'managersearch', 'standalone'] %} {% if role.startswith('manager') or role == 'standalone' %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
{% endif %} {% endif %}
@@ -38,8 +38,8 @@
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
{% endif %} {% endif %}
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] %} {% if role.startswith('manager') or role in ['standalone', 'receiver'] %}
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %} {% for r in ['manager', 'managersearch', 'managerhype', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %} {% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
{% endif %} {% endif %}
@@ -48,7 +48,7 @@
{% if KAFKA_EXTERNAL_ACCESS %} {% if KAFKA_EXTERNAL_ACCESS %}
{# Kafka external access only applies for Kafka nodes with the broker role. #} {# Kafka external access only applies for Kafka nodes with the broker role. #}
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] and 'broker' in kafka_node_type %} {% if role.startswith('manager') or role in ['standalone', 'receiver'] and 'broker' in kafka_node_type %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %} {% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %}
{% endif %} {% endif %}
{% endif %} {% endif %}

View File

@@ -22,7 +22,7 @@ kibana:
- default - default
- file - file
migrations: migrations:
discardCorruptObjects: "8.18.4" discardCorruptObjects: "8.18.6"
telemetry: telemetry:
enabled: False enabled: False
security: security:

View File

@@ -3,8 +3,7 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'libvirt/map.jinja' import LIBVIRTMERGED %} # We do not import GLOBALS in this state because it is called during setup
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
down_original_mgmt_interface: down_original_mgmt_interface:
cmd.run: cmd.run:
@@ -30,6 +29,8 @@ wait_for_br0_ip:
- onchanges: - onchanges:
- cmd: down_original_mgmt_interface - cmd: down_original_mgmt_interface
{% if grains.role == 'so-hypervisor' %}
update_mine_functions: update_mine_functions:
file.managed: file.managed:
- name: /etc/salt/minion.d/mine_functions.conf - name: /etc/salt/minion.d/mine_functions.conf
@@ -38,6 +39,10 @@ update_mine_functions:
mine_functions: mine_functions:
network.ip_addrs: network.ip_addrs:
- interface: br0 - interface: br0
{%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %}
x509.get_pem_entries:
- glob_path: '/etc/pki/ca.crt'
{% endif %}
- onchanges: - onchanges:
- cmd: wait_for_br0_ip - cmd: wait_for_br0_ip
@@ -47,3 +52,5 @@ restart_salt_minion_service:
- enable: True - enable: True
- listen: - listen:
- file: update_mine_functions - file: update_mine_functions
{% endif %}

View File

@@ -268,3 +268,12 @@ logrotate:
- nocompress - nocompress
- create - create
- sharedscripts - sharedscripts
/opt/so/log/agents/agent-monitor*_x_log:
- daily
- rotate 14
- missingok
- compress
- create
- extension .log
- dateext
- dateyesterday

View File

@@ -175,3 +175,10 @@ logrotate:
multiline: True multiline: True
global: True global: True
forcedType: "[]string" forcedType: "[]string"
"/opt/so/log/agents/agent-monitor*_x_log":
description: List of logrotate options for this file.
title: /opt/so/log/agents/agent-monitor*.log
advanced: True
multiline: True
global: True
forcedType: "[]string"

View File

@@ -17,7 +17,7 @@
{% for node_type, node_details in redis_node_data.items() | sort %} {% for node_type, node_details in redis_node_data.items() | sort %}
{% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch', 'so-fleet'] %} {% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch', 'so-fleet'] %}
{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %} {% if node_type.startswith('manager') or node_type in ['standalone', 'receiver'] %}
{% for hostname in redis_node_data[node_type].keys() %} {% for hostname in redis_node_data[node_type].keys() %}
{% do LOGSTASH_REDIS_NODES.append({hostname:node_details[hostname].ip}) %} {% do LOGSTASH_REDIS_NODES.append({hostname:node_details[hostname].ip}) %}
{% endfor %} {% endfor %}
@@ -47,7 +47,7 @@
{% endif %} {% endif %}
{# Disable logstash on manager & receiver nodes unless it has an override configured #} {# Disable logstash on manager & receiver nodes unless it has an override configured #}
{% if not KAFKA_LOGSTASH %} {% if not KAFKA_LOGSTASH %}
{% if GLOBALS.role in ['so-manager', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %} {% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %}
{% do LOGSTASH_MERGED.update({'enabled': False}) %} {% do LOGSTASH_MERGED.update({'enabled': False}) %}
{% endif %} {% endif %}
{% endif %} {% endif %}

View File

@@ -5,3 +5,12 @@ manager:
minute: 0 minute: 0
additionalCA: '' additionalCA: ''
insecureSkipVerify: False insecureSkipVerify: False
agent_monitoring:
enabled: False
config:
critical_agents: []
custom_kquery:
offline_threshold: 5
realert_threshold: 5
page_size: 250
run_interval: 5

View File

@@ -34,6 +34,26 @@ agents_log_dir:
- user - user
- group - group
agents_conf_dir:
file.directory:
- name: /opt/so/conf/agents
- user: root
- group: root
- recurse:
- user
- group
{% if MANAGERMERGED.agent_monitoring.config.critical_agents | length > 0 %}
critical_agents_patterns:
file.managed:
- name: /opt/so/conf/agents/critical-agents.txt
- contents: {{ MANAGERMERGED.agent_monitoring.config.critical_agents }}
{% else %}
remove_critical_agents_config:
file.absent:
- name: /opt/so/conf/agents/critical-agents.txt
{% endif %}
yara_log_dir: yara_log_dir:
file.directory: file.directory:
- name: /opt/so/log/yarasync - name: /opt/so/log/yarasync
@@ -127,6 +147,21 @@ so_fleetagent_status:
- month: '*' - month: '*'
- dayweek: '*' - dayweek: '*'
so_fleetagent_monitor:
{% if MANAGERMERGED.agent_monitoring.enabled %}
cron.present:
{% else %}
cron.absent:
{% endif %}
- name: /bin/flock -n /opt/so/log/agents/agent-monitor.lock /usr/sbin/so-elastic-agent-monitor
- identifier: so_fleetagent_monitor
- user: root
- minute: '*/{{ MANAGERMERGED.agent_monitoring.config.run_interval }}'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
socore_own_saltstack_default: socore_own_saltstack_default:
file.directory: file.directory:
- name: /opt/so/saltstack/default - name: /opt/so/saltstack/default

View File

@@ -37,3 +37,44 @@ manager:
forcedType: bool forcedType: bool
global: True global: True
helpLink: proxy.html helpLink: proxy.html
agent_monitoring:
enabled:
description: Enable monitoring elastic agents for health issues. Can be used to trigger an alert when a 'critical' agent hasn't checked in with fleet for longer than the configured offline threshold.
global: True
helpLink: elastic-fleet.html
forcedType: bool
config:
critical_agents:
description: List of 'critical' agents to log when they haven't checked in longer than the maximum allowed time. If there are no 'critical' agents specified all offline agents will be logged once they reach the offline threshold.
global: True
multiline: True
helpLink: elastic-fleet.html
forcedType: "[]string"
custom_kquery:
description: For more granular control over what agents to monitor for offline|degraded status add a kquery here. It is recommended to create & test within Elastic Fleet first to ensure your agents are targeted correctly using the query. eg 'status:offline AND tags:INFRA'
global: True
helpLink: elastic-fleet.html
forcedType: string
advanced: True
offline_threshold:
description: The maximum allowed time in hours a 'critical' agent has been offline before being logged.
global: True
helpLink: elastic-fleet.html
forcedType: int
realert_threshold:
description: The time to pass before another alert for an offline agent exceeding the offline_threshold is generated.
global: True
helpLink: elastic-fleet.html
forcedType: int
page_size:
description: The amount of agents that can be processed per API request to fleet.
global: True
helpLink: elastic-fleet.html
forcedType: int
advanced: True
run_interval:
description: The time in minutes between checking fleet agent statuses.
global: True
advanced: True
helpLink: elastic-fleet.html
forcedType: int

View File

@@ -454,6 +454,7 @@ function add_sensor_to_minion() {
echo "sensor:" echo "sensor:"
echo " interface: '$INTERFACE'" echo " interface: '$INTERFACE'"
echo " mtu: 9000" echo " mtu: 9000"
echo " channels: 1"
echo "zeek:" echo "zeek:"
echo " enabled: True" echo " enabled: True"
echo " config:" echo " config:"

View File

@@ -419,6 +419,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.141 ]] && up_to_2.4.150 [[ "$INSTALLEDVERSION" == 2.4.141 ]] && up_to_2.4.150
[[ "$INSTALLEDVERSION" == 2.4.150 ]] && up_to_2.4.160 [[ "$INSTALLEDVERSION" == 2.4.150 ]] && up_to_2.4.160
[[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170 [[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170
[[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180
true true
} }
@@ -448,6 +449,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150 [[ "$POSTVERSION" == 2.4.141 ]] && post_to_2.4.150
[[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160 [[ "$POSTVERSION" == 2.4.150 ]] && post_to_2.4.160
[[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170 [[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170
[[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180
true true
} }
@@ -599,6 +601,13 @@ post_to_2.4.170() {
POSTVERSION=2.4.170 POSTVERSION=2.4.170
} }
post_to_2.4.180() {
# Force update to Kafka output policy
/usr/sbin/so-kafka-fleet-output-policy --force
POSTVERSION=2.4.180
}
repo_sync() { repo_sync() {
echo "Sync the local repo." echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -856,6 +865,12 @@ up_to_2.4.170() {
INSTALLEDVERSION=2.4.170 INSTALLEDVERSION=2.4.170
} }
up_to_2.4.180() {
echo "Nothing to do for 2.4.180"
INSTALLEDVERSION=2.4.180
}
add_hydra_pillars() { add_hydra_pillars() {
mkdir -p /opt/so/saltstack/local/pillar/hydra mkdir -p /opt/so/saltstack/local/pillar/hydra
touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls

View File

@@ -0,0 +1,254 @@
{%- from 'manager/map.jinja' import MANAGERMERGED -%}
{%- set OFFLINE_THRESHOLD_HOURS = MANAGERMERGED.agent_monitoring.config.offline_threshold -%}
{%- set PAGE_SIZE = MANAGERMERGED.agent_monitoring.config.page_size -%}
{%- set CUSTOM_KQUERY = MANAGERMERGED.agent_monitoring.config.custom_kquery -%}
{%- set REALERT_THRESHOLD = MANAGERMERGED.agent_monitoring.config.realert_threshold -%}
#!/bin/bash
set -euo pipefail
LOG_DIR="/opt/so/log/agents"
LOG_FILE="$LOG_DIR/agent-monitor.log"
CURL_CONFIG="/opt/so/conf/elasticsearch/curl.config"
FLEET_API="http://localhost:5601/api/fleet/agents"
{#- When using custom kquery ignore critical agents patterns. Since we want all the results of custom query logged #}
{%- if CUSTOM_KQUERY != None and CUSTOM_KQUERY | length > 0 %}
CRITICAL_AGENTS_FILE="/dev/null"
{%- else %}
CRITICAL_AGENTS_FILE="/opt/so/conf/agents/critical-agents.txt"
{%- endif %}
OFFLINE_THRESHOLD_HOURS={{ OFFLINE_THRESHOLD_HOURS }}
REALERT_THRESHOLD={{ REALERT_THRESHOLD }}
PAGE_SIZE="{{ PAGE_SIZE }}"
log_message() {
local level="$1"
local message="$2"
echo "$(date -u +"%Y-%m-%dT%H:%M:%SZ") [$level] $message" >&2
}
matches_critical_pattern() {
local hostname="$1"
local pattern_file="$2"
# If critical agents file doesn't exist or is empty, match all
if [ ! -f "$pattern_file" ] || [ ! -s "$pattern_file" ]; then
return 0
fi
local hostname_lower=$(echo "$hostname" | tr '[:upper:]' '[:lower:]')
while IFS= read -r pattern || [ -n "$pattern" ]; do
# empty lines and comments
[[ -z "$pattern" || "$pattern" =~ ^[[:space:]]*# ]] && continue
# cut whitespace
pattern=$(echo "$pattern" | xargs)
local pattern_lower=$(echo "$pattern" | tr '[:upper:]' '[:lower:]')
# Replace * with bash wildcard
local bash_pattern="${pattern_lower//\*/.*}"
# Check if hostname matches the pattern
if [[ "$hostname_lower" =~ ^${bash_pattern}$ ]]; then
return 0
fi
done < "$pattern_file"
return 1
}
calculate_offline_hours() {
local last_checkin="$1"
local current_time=$(date +%s)
local checkin_time=$(date -d "$last_checkin" +%s 2>/dev/null || echo "0")
if [ "$checkin_time" -eq "0" ]; then
echo "0"
return
fi
local diff=$((current_time - checkin_time))
echo $((diff / 3600))
}
check_recent_log_entries() {
local agent_hostname="$1"
if [ ! -f "$LOG_FILE" ]; then
return 1
fi
local current_time=$(date +%s)
local threshold_seconds=$((REALERT_THRESHOLD * 3600))
local agent_hostname_lower=$(echo "$agent_hostname" | tr '[:upper:]' '[:lower:]')
local most_recent_timestamp=""
while IFS= read -r line; do
[ -z "$line" ] && continue
local logged_hostname=$(echo "$line" | jq -r '.["agent.hostname"] // empty' 2>/dev/null)
local logged_timestamp=$(echo "$line" | jq -r '.["@timestamp"] // empty' 2>/dev/null)
[ -z "$logged_hostname" ] || [ -z "$logged_timestamp" ] && continue
local logged_hostname_lower=$(echo "$logged_hostname" | tr '[:upper:]' '[:lower:]')
if [ "$logged_hostname_lower" = "$agent_hostname_lower" ]; then
most_recent_timestamp="$logged_timestamp"
fi
done < <(tail -n 1000 "$LOG_FILE" 2>/dev/null)
# If there is agent entry (within last 1000), check the time difference
if [ -n "$most_recent_timestamp" ]; then
local logged_time=$(date -d "$most_recent_timestamp" +%s 2>/dev/null || echo "0")
if [ "$logged_time" -ne "0" ]; then
local time_diff=$((current_time - logged_time))
local hours_diff=$((time_diff / 3600))
# Skip if last agent timestamp was more recent than realert threshold
if ((hours_diff < REALERT_THRESHOLD)); then
return 0
fi
fi
fi
# Agent has not been logged within realert threshold
return 1
}
main() {
log_message "INFO" "Starting Fleet agent status check"
# Check if critical agents file is configured
if [ -f "$CRITICAL_AGENTS_FILE" ] && [ -s "$CRITICAL_AGENTS_FILE" ]; then
log_message "INFO" "Using critical agents filter from: $CRITICAL_AGENTS_FILE"
log_message "INFO" "Patterns: $(grep -v '^#' "$CRITICAL_AGENTS_FILE" 2>/dev/null | xargs | tr ' ' ',')"
else
log_message "INFO" "No critical agents filter found, monitoring all agents"
fi
log_message "INFO" "Querying Fleet API"
local page=1
local total_agents=0
local processed_agents=0
local current_timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
{%- if CUSTOM_KQUERY != None and CUSTOM_KQUERY | length > 0 %}
log_message "INFO" "Using custom kquery: {{ CUSTOM_KQUERY }}"
FLEET_QUERY="${FLEET_API}?kuery={{ CUSTOM_KQUERY | urlencode }}&perPage=${PAGE_SIZE}&page=${page}"
{%- else %}
log_message "INFO" "Using default query (all offline or degraded agents)"
FLEET_QUERY="${FLEET_API}?kuery=status%3Aoffline%20OR%20status%3Adegraded&perPage=${PAGE_SIZE}&page=${page}"
{%- endif %}
while true; do
log_message "INFO" "Fetching page $page (${PAGE_SIZE} agents per page)"
if ! response_body=$(curl -K "$CURL_CONFIG" \
-s --fail \
"$FLEET_QUERY" \
-H 'kbn-xsrf: true' 2>/dev/null); then
log_message "ERROR" "Failed to query Fleet API (page $page)"
exit 1
fi
# pagination info
current_total=$(echo "$response_body" | jq -r '.total // 0')
current_page=$(echo "$response_body" | jq -r '.page // 1')
agents_in_page=$(echo "$response_body" | jq -r '.list | length')
# Update total
if [ "$page" -eq 1 ]; then
total_agents="$current_total"
log_message "INFO" "Found $total_agents total agents across all pages"
fi
log_message "INFO" "Processing page $current_page with $agents_in_page agents"
# Process agents from current page
mapfile -t agents < <(echo "$response_body" | jq -c '.list[]')
for agent in "${agents[@]}"; do
# Grab agent details
agent_id=$(echo "$agent" | jq -r '.id // "unknown"')
agent_hostname=$(echo "$agent" | jq -r '.local_metadata.host.hostname // "unknown"')
agent_name=$(echo "$agent" | jq -r '.local_metadata.host.name // "unknown"')
agent_status=$(echo "$agent" | jq -r '.status // "unknown"')
last_checkin=$(echo "$agent" | jq -r '.last_checkin // ""')
last_checkin_status=$(echo "$agent" | jq -r '.last_checkin_status // "unknown"')
policy_id=$(echo "$agent" | jq -r '.policy_id // "unknown"')
# Only log agents that are offline or degraded (skip inactive agents)
# Fleetserver agents can show multiple versions as 'inactive'
if [ "$agent_status" = "offline" ] || [ "$agent_status" = "degraded" ]; then
# Check if agent matches critical agent patterns (if configured)
if ! matches_critical_pattern "$agent_hostname" "$CRITICAL_AGENTS_FILE"; then
log_message "WARN" "${agent_hostname^^} is ${agent_status^^}, but does not match configured critical agents patterns. Not logging ${agent_status^^} agent"
continue # Skip this agent if it doesn't match any critical agent pattern
fi
offline_hours=$(calculate_offline_hours "$last_checkin")
if [ "$offline_hours" -lt "$OFFLINE_THRESHOLD_HOURS" ]; then
log_message "INFO" "${agent_hostname^^} has been offline for ${offline_hours}h (threshold: ${OFFLINE_THRESHOLD_HOURS}h). Not logging ${agent_status^^} agent until it reaches threshold"
continue
fi
# Check if this agent was already logged within the realert_threshold
if check_recent_log_entries "$agent_hostname"; then
log_message "INFO" "Skipping $agent_hostname (status: $agent_status) - already logged within last ${REALERT_THRESHOLD}h"
continue
fi
log_entry=$(echo 'null' | jq -c \
--arg ts "$current_timestamp" \
--arg id "$agent_id" \
--arg hostname "$agent_hostname" \
--arg name "$agent_name" \
--arg status "$agent_status" \
--arg last_checkin "$last_checkin" \
--arg last_checkin_status "$last_checkin_status" \
--arg policy_id "$policy_id" \
--arg offline_hours "$offline_hours" \
'{
"@timestamp": $ts,
"agent.id": $id,
"agent.hostname": $hostname,
"agent.name": $name,
"agent.status": $status,
"agent.last_checkin": $last_checkin,
"agent.last_checkin_status": $last_checkin_status,
"agent.policy_id": $policy_id,
"agent.offline_duration_hours": ($offline_hours | tonumber)
}')
echo "$log_entry" >> "$LOG_FILE"
log_message "INFO" "Logged offline agent: $agent_hostname (status: $agent_status, offline: ${offline_hours}h)"
fi
done
processed_agents=$((processed_agents + agents_in_page))
if [ "$agents_in_page" -eq 0 ] || [ "$processed_agents" -ge "$total_agents" ]; then
log_message "INFO" "Completed processing all pages. Total processed: $processed_agents agents"
break
fi
page=$((page + 1))
# Limit pagination loops incase of any issues. If agent count is high enough increase page_size in SOC manager.agent_monitoring.config.page_size
if [ "$page" -gt 100 ]; then
log_message "ERROR" "Reached maximum page limit (100). Issue with script or extremely large fleet deployment. Consider increasing page_size in SOC -> manager.agent_monitoring.config.page_size"
break
fi
done
log_message "INFO" "Fleet agent status check completed. Processed $processed_agents out of $total_agents agents"
}
main "$@"

View File

@@ -15,6 +15,7 @@ require_manager
echo echo
echo "This script will remove the current Elastic Fleet install and all of its data and then rerun Elastic Fleet setup." echo "This script will remove the current Elastic Fleet install and all of its data and then rerun Elastic Fleet setup."
echo "Deployed Elastic Agents will no longer be enrolled and will need to be reinstalled." echo "Deployed Elastic Agents will no longer be enrolled and will need to be reinstalled."
echo "Only the Elastic Fleet instance on the Manager will be reinstalled - dedicated Fleet node config will removed and will need to be reinstalled."
echo "This script should only be used as a last resort to reinstall Elastic Fleet." echo "This script should only be used as a last resort to reinstall Elastic Fleet."
echo echo
echo "If you would like to proceed, then type AGREE and press ENTER." echo "If you would like to proceed, then type AGREE and press ENTER."

View File

@@ -196,19 +196,23 @@ http {
} }
location / { location / {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
auth_request_set $userid $upstream_http_x_kratos_authenticated_identity_id; auth_request_set $userid $upstream_http_x_kratos_authenticated_identity_id;
proxy_set_header x-user-id $userid; proxy_set_header x-user-id $userid;
proxy_pass http://{{ GLOBALS.manager }}:9822/; proxy_pass http://{{ GLOBALS.manager }}:9822/;
proxy_read_timeout 300; proxy_read_timeout 300;
proxy_connect_timeout 300; proxy_connect_timeout 300;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy ""; proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade"; proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
proxy_cache off;
proxy_request_buffering off;
} }
location ~ ^/auth/.*?(login|oidc/callback) { location ~ ^/auth/.*?(login|oidc/callback) {

View File

@@ -23,7 +23,7 @@ include:
# /nsm/pcap is empty until stenographer is used as pcap engine # /nsm/pcap is empty until stenographer is used as pcap engine
{% set pcap_id = 941 %} {% set pcap_id = 941 %}
{% set user_list = salt['user.list_users']() %} {% set user_list = salt['user.list_users']() %}
{% if 'stenographer' not in user_list %} {% if GLOBALS.pcap_engine == "SURICATA" and 'stenographer' not in user_list %}
{% set pcap_id = 939 %} {% set pcap_id = 939 %}
{% endif %} {% endif %}
pcapdir: pcapdir:

View File

@@ -26,9 +26,9 @@
'rocky-devel.repo', 'rocky-devel.repo',
'rocky-extras.repo', 'rocky-extras.repo',
'rocky.repo', 'rocky.repo',
'oracle-linux-ol9', 'oracle-linux-ol9.repo',
'uek-ol9', 'uek-ol9.repo',
'virt-oll9' 'virt-ol9.repo'
] ]
%} %}
{% else %} {% else %}

View File

@@ -95,7 +95,7 @@ enable_startup_states:
- unless: pgrep so-setup - unless: pgrep so-setup
# prior to 2.4.30 this managed file would restart the salt-minion service when updated # prior to 2.4.30 this managed file would restart the salt-minion service when updated
# since this file is currently only adding a sleep timer on service start # since this file is currently only adding a delay service start
# it is not required to restart the service # it is not required to restart the service
salt_minion_service_unit_file: salt_minion_service_unit_file:
file.managed: file.managed:

View File

@@ -0,0 +1,4 @@
sensor:
interface: bond0
mtu: 9000
channels: 1

View File

@@ -9,6 +9,8 @@
# in the software, and you may not remove or obscure any functionality in the # in the software, and you may not remove or obscure any functionality in the
# software that is protected by the license key." # software that is protected by the license key."
{% from 'sensor/map.jinja' import SENSORMERGED %}
{% if 'vrt' in salt['pillar.get']('features') and salt['grains.get']('salt-cloud', {}) %} {% if 'vrt' in salt['pillar.get']('features') and salt['grains.get']('salt-cloud', {}) %}
include: include:
@@ -28,3 +30,18 @@ execute_checksum:
- name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable - name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable
- onchanges: - onchanges:
- file: offload_script - file: offload_script
combine_bond_script:
file.managed:
- name: /usr/sbin/so-combine-bond
- source: salt://sensor/tools/sbin_jinja/so-combine-bond
- mode: 755
- template: jinja
- defaults:
CHANNELS: {{ SENSORMERGED.channels }}
execute_combine_bond:
cmd.run:
- name: /usr/sbin/so-combine-bond
- onlyif:
- ip link show bond0

7
salt/sensor/map.jinja Normal file
View File

@@ -0,0 +1,7 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% import_yaml 'sensor/defaults.yaml' as SENSORDEFAULTS %}
{% set SENSORMERGED = salt['pillar.get']('sensor', SENSORDEFAULTS.sensor, merge=True) %}

View File

@@ -7,3 +7,9 @@ sensor:
description: Maximum Transmission Unit (MTU) of the sensor monitoring interface. description: Maximum Transmission Unit (MTU) of the sensor monitoring interface.
helpLink: network.html helpLink: network.html
readonly: True readonly: True
channels:
description: Set the size of the nic channels. This is rarely changed from 1
helpLink: network.html
forcedType: int
node: True
advanced: True

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Script to find all interfaces of bond0 and set channel parameters
# Compatible with Oracle Linux 9, Ubuntu, and Debian
. /usr/sbin/so-common
# Number of channels to set
CHANNELS={{ CHANNELS }}
# Exit on any error
set -e
# Check if running as root
if [[ $EUID -ne 0 ]]; then
exit 1
fi
# Check if bond0 exists
if ! ip link show bond0 &>/dev/null; then
exit 0
fi
# Function to get slave interfaces - works across distributions
get_bond_slaves() {
local bond_name="$1"
local slaves=""
# Method 1: Try /sys/class/net first (most reliable)
if [ -f "/sys/class/net/$bond_name/bonding/slaves" ]; then
slaves=$(cat "/sys/class/net/$bond_name/bonding/slaves" 2>/dev/null)
fi
# Method 2: Try /proc/net/bonding (older systems)
if [ -z "$slaves" ] && [ -f "/proc/net/bonding/$bond_name" ]; then
slaves=$(grep "Slave Interface:" "/proc/net/bonding/$bond_name" 2>/dev/null | awk '{print $3}' | tr '\n' ' ')
fi
# Method 3: Parse ip link output (universal fallback)
if [ -z "$slaves" ]; then
slaves=$(ip -o link show | grep "master $bond_name" | awk -F': ' '{print $2}' | cut -d'@' -f1 | tr '\n' ' ')
fi
echo "$slaves"
}
# Get slave interfaces
SLAVES=$(get_bond_slaves bond0)
if [ -z "$SLAVES" ]; then
exit 0
fi
# Process each slave interface
for interface in $SLAVES; do
# Skip if interface doesn't exist
if ! ip link show "$interface" &>/dev/null; then
continue
fi
# Try combined mode first
if ethtool -L "$interface" combined $CHANNELS &>/dev/null; then
continue
fi
# Fall back to separate rx/tx
ethtool -L "$interface" rx $CHANNELS tx $CHANNELS &>/dev/null || true
done
exit 0

View File

@@ -18,6 +18,7 @@ sensoroniagentconf:
- group: 939 - group: 939
- mode: 600 - mode: 600
- template: jinja - template: jinja
- show_changes: False
analyzersdir: analyzersdir:
file.directory: file.directory:
@@ -43,6 +44,22 @@ analyzerscripts:
- source: salt://sensoroni/files/analyzers - source: salt://sensoroni/files/analyzers
- show_changes: False - show_changes: False
templatesdir:
file.directory:
- name: /opt/so/conf/sensoroni/templates
- user: 939
- group: 939
- makedirs: True
sensoronitemplates:
file.recurse:
- name: /opt/so/conf/sensoroni/templates
- source: salt://sensoroni/files/templates
- user: 939
- group: 939
- file_mode: 664
- show_changes: False
sensoroni_sbin: sensoroni_sbin:
file.recurse: file.recurse:
- name: /usr/sbin - name: /usr/sbin

View File

@@ -22,6 +22,7 @@ so-sensoroni:
- /nsm/pcapout:/nsm/pcapout:rw - /nsm/pcapout:/nsm/pcapout:rw
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro - /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
- /opt/so/conf/sensoroni/analyzers:/opt/sensoroni/analyzers:rw - /opt/so/conf/sensoroni/analyzers:/opt/sensoroni/analyzers:rw
- /opt/so/conf/sensoroni/templates:/opt/sensoroni/templates:ro
- /opt/so/log/sensoroni:/opt/sensoroni/logs:rw - /opt/so/log/sensoroni:/opt/sensoroni/logs:rw
- /nsm/suripcap/:/nsm/suripcap:rw - /nsm/suripcap/:/nsm/suripcap:rw
{% if DOCKER.containers['so-sensoroni'].custom_bind_mounts %} {% if DOCKER.containers['so-sensoroni'].custom_bind_mounts %}

View File

@@ -21,6 +21,7 @@
}, },
{%- endif %} {%- endif %}
"importer": {}, "importer": {},
"export": {},
"statickeyauth": { "statickeyauth": {
"apiKey": "{{ GLOBALS.sensoroni_key }}" "apiKey": "{{ GLOBALS.sensoroni_key }}"
{% if GLOBALS.is_sensor %} {% if GLOBALS.is_sensor %}

View File

@@ -0,0 +1,39 @@
{{- /* query.myDocEvents.Oql = metadata.type: _doc | groupby event.module, event.dataset | sortby @timestamp desc */ -}}
{{- /* query.myDocEvents.MetricLimit = 10 */ -}}
{{- /* query.myDocEvents.EventLimit = 100 */ -}}
Security Onion Custom Report
============================
{{ if .Error }}
**NOTE: This report encountered a problem extracting the relevant data and may not be complete.**
**Error:** {{.Error}}
{{ end }}
Records must have been created or updated during the following time frame in order to be reflected in this report.
**Report Start Date:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .BeginDate}}
**Report End Date:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .EndDate}}
## Sample Doc Events
**Total Events:** {{ formatNumber "%d" "en" .Results.myDocEvents.TotalEvents}}
### Event Counts By Module and Dataset
| Count | Proportion | Module | Dataset |
| ----- | ---------- | ------ | ------- |
{{ range sortMetrics "Value" "desc" .Results.myDocEvents.Metrics.groupby_0_event_module_event_dataset -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} | {{index .Keys 1}} |
{{end}}
### Individual Events (Limited to first {{.Results.myDocEvents.Criteria.EventLimit}})
| Event Time | Module | Dataset | Category |
| ---------- | ------ | ------- | -------- |
{{ range .Results.myDocEvents.Events -}}
| {{.Timestamp}} | {{.Payload.event_module}} | {{.Payload.event_dataset}} | {{.Payload.event_category}} |
{{end}}

View File

@@ -0,0 +1,133 @@
Security Onion Case Report
==========================
## Case Details
**Case ID:** {{.Case.Id}}
**Title:** {{.Case.Title}}
## Description
{{.Case.Description}}
## Details
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Case.CreateTime}}
**Updated:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .Case.UpdateTime}}
**Author:** {{getUserDetail "email" .Case.UserId}}
**Status:** {{.Case.Status}}
**TLP:** {{.Case.Tlp}}
**PAP:** {{.Case.Pap}}
**Severity:** {{.Case.Severity}}
**Priority:** {{.Case.Priority}}
**Category:** {{.Case.Category}}
**Tags:** {{join .Case.Tags ", " }}
**Assignee:** {{getUserDetail "email" .Case.AssigneeId}}
**Hours Logged:** {{ formatNumber "%.2f" "en" .TotalHours}}
## Comments
{{ range sortComments "CreateTime" "asc" .Comments }}
**Created:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .CreateTime}}
**Updated:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .UpdateTime}}
**Author:** {{getUserDetail "email" .UserId}}
**Hours Logged:** {{ formatNumber "%.2f" "en" .Hours}}
{{.Description}}
---
{{end}}
## Detections
{{ range sortDetections "Title" "asc" .Detections }}
**Title:** {{.Title}}
**Description:** {{.Description}}
**Severity:** {{.Severity}}
**Rule Engine:** {{.Engine}}
**Rule Set:** {{.Ruleset}}
**Community Rule:** {{.IsCommunity}}
**Tags:** {{.Tags}}
{{.Content}}
---
{{end}}
## Attachments
{{ range sortArtifacts "CreateTime" "asc" .Attachments }}
**Added:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .CreateTime}}
**Updated:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .UpdateTime}}
**Added By:** {{getUserDetail "email" .UserId}}
**TLP:** {{.Tlp}}
**Filename:** {{.Value}}
**Size:** {{ formatNumber "%.0d" "en" .StreamLen}} bytes
**SHA256:** {{.Sha256}}
**SHA1:** {{.Sha1}}
**MD5:** {{.Md5}}
**Tags:** {{.Tags}}
**Protected (Zipped):** {{.Protected}}
{{.Description}}
---
{{end}}
## Observables
| Date Added | Tlp | Type | IOC | Value | Description |
| ---------- | --- | ---- | --- | ----- | ----------- |
{{ range sortArtifacts "CreateTime" "asc" .Observables -}}
| {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .CreateTime}} | {{.Tlp}} | {{.ArtifactType}} | {{.Ioc}} | {{.Value}} | {{.Description}} |
{{end}}
## Related Events
| Event Time | Log ID | Source IP | Destination IP |
| ---------- | ------ | --------- | -------------- |
{{ range sortRelatedEvents "fields:soc_timestamp" "asc" .RelatedEvents -}}
| {{.Fields.soc_timestamp}} | {{.Fields.log_id_uid}} | {{.Fields.source_ip}} | {{.Fields.destination_ip}} |
{{end}}
## Case History
| Date | User | Object | Operation |
| ---- | ---- | ------ | --------- |
{{ range sortHistory "CreateTime" "asc" .History -}}
| {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .CreateTime}} | {{getUserDetail "email" .UserId}} | {{.Kind}} | {{.Operation}} |
{{end}}

View File

@@ -0,0 +1,189 @@
Security Onion Productivity Report
==================================
{{ if .Error }}
**NOTE: This report encountered a problem extracting the relevant data and may not be complete.**
**Error:** {{.Error}}
{{ end }}
Records must have been created or updated during the following time frame in order to be reflected in this report.
**Report Start Date:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .BeginDate}}
**Report End Date:** {{formatDateTime "Mon Jan 02 15:04:05 -0700 2006" .EndDate}}
## Ingested Events
**Total Events:** {{ formatNumber "%d" "en" .TotalEvents}}
### Events By Module
| Count | Proportion | Module |
| ----- | ---------- | ------ |
{{ range sortMetrics "Value" "desc" .TotalEventsByModule -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Events By Module and Severity Label
| Count | Proportion | Module | Severity |
| ----- | ---------- | ------ | -------- |
{{ range sortMetrics "Value" "desc" .TotalEventsByModuleDataset -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} | {{index .Keys 1}} |
{{end}}
## Alerts
**Total Alerts:** {{ formatNumber "%d" "en" .TotalAlerts}}
{{ range sortMetrics "Value" "desc" .TotalAlertsByAcknowledged -}}
{{ if index .Keys 0 | eq "true" }}
**Acknowledged Alerts:** {{ formatNumber "%.0f" "en" .Value}} ({{ formatNumber "%.1f" "en" .Percentage}}%)
{{ end }}
{{end}}
{{ range sortMetrics "Value" "desc" .TotalAlertsByEscalated -}}
{{ if index .Keys 0 | eq "true" }}
**Escalated Alerts:** {{ formatNumber "%.0f" "en" .Value}} ({{ formatNumber "%.1f" "en" .Percentage}}%)
{{ end }}
{{end}}
### Alerts By Severity
| Count | Proportion | Severity |
| ----- | ---------- | -------- |
{{ range sortMetrics "Value" "desc" .TotalAlertsBySeverityLabel -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Alerts By Module
| Count | Proportion | Module |
| ----- | ---------- | ------ |
{{ range sortMetrics "Value" "desc" .TotalAlertsByModule -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Alerts By Module and Severity Label
| Count | Proportion | Module | Severity |
| ----- | ---------- | ------ | -------- |
{{ range sortMetrics "Value" "desc" .TotalAlertsByModuleSeverityLabel -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} | {{index .Keys 1}} |
{{end}}
### Alerts By Ruleset
| Count | Proportion | Ruleset |
| ----- | ---------- | ------- |
{{ range sortMetrics "Value" "desc" .TotalAlertsByRuleset -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Alerts By Rule Category
| Count | Proportion | Category |
| ----- | ---------- | -------- |
{{ range sortMetrics "Value" "desc" .TotalAlertsByCategory -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
## Cases
**Total Cases:** {{ formatNumber "%d" "en" .TotalCases}}
**Average Elapsed Time To Complete:** {{ formatNumber "%.1f" "en" .AverageHoursToComplete }} hours
### Cases By Status
| Count | Proportion | Status |
| ----- | ---------- | ------ |
{{ range sortMetrics "Value" "desc" .TotalCasesByStatus -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Cases By Assignee
| Count | Proportion | Assignee |
| ----- | ---------- | -------- |
{{ range sortMetrics "Value" "desc" .TotalCasesByAssignee -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0 | getUserDetail "email"}} |
{{end}}
### Cases By Status and Assignee
| Count | Proportion | Status | Assignee |
| ----- | ---------- | ------ | -------- |
{{ range sortMetrics "Value" "desc" .TotalCasesByStatusAssignee -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} | {{index .Keys 1 | getUserDetail "email"}} |
{{end}}
### Cases By Severity
| Count | Proportion | Severity |
| ----- | ---------- | -------- |
{{ range sortMetrics "Value" "desc" .TotalCasesBySeverity -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Cases By Priority
| Count | Proportion | Priority |
| ----- | ---------- | -------- |
{{ range sortMetrics "Value" "desc" .TotalCasesByPriority -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Cases By Traffic Light Protocol (TLP)
| Count | Proportion | TLP |
| ----- | ---------- | ----|
{{ range sortMetrics "Value" "desc" .TotalCasesByTlp -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Cases By Permissible Actions Protocol (PAP)
| Count | Proportion | PAP |
| ----- | ---------- | --- |
{{ range sortMetrics "Value" "desc" .TotalCasesByPap -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Cases By Category
| Count | Proportion | Category |
| ----- | ---------- | -------- |
{{ range sortMetrics "Value" "desc" .TotalCasesByCategory -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Cases By Tags
| Count | Proportion | Tags |
| ----- | ---------- | ---- |
{{ range sortMetrics "Value" "desc" .TotalCasesByTags -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0}} |
{{end}}
### Comments By User
| Count | Proportion | User |
| ----- | ---------- | ---- |
{{ range sortMetrics "Value" "desc" .TotalCommentsByUserId -}}
| {{ formatNumber "%.0f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0 | getUserDetail "email"}} |
{{end}}
## Time Tracking
**Total Hours:** {{ formatNumber "%.2f" "en" .TotalHours}}
### Hours By User
| Hours | Proportion | User |
| ----- | ---------- | ---- |
{{ range sortMetrics "Value" "desc" .TotalHoursByUserId -}}
| {{ formatNumber "%.2f" "en" .Value}} | {{ formatNumber "%.1f" "en" .Percentage}}% | {{index .Keys 0 | getUserDetail "email"}} |
{{end}}

View File

@@ -331,3 +331,94 @@ sensoroni:
sensitive: False sensitive: False
advanced: True advanced: True
forcedType: string forcedType: string
files:
templates:
reports:
standard:
case_report__md:
title: Case report Template
description: The template used when generating a case report. Supports markdown format.
file: True
global: True
syntax: md
helpLink: reports.html
productivity_report__md:
title: Productivity Report Template
description: The template used when generating a comprehensive productivity report. Supports markdown format.
file: True
global: True
syntax: md
helpLink: reports.html
custom:
generic_report1__md:
title: Custom Report 1
description: A custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI.
file: True
global: True
syntax: md
helpLink: reports.html
generic_report2__md:
title: Custom Report 2
description: A custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI.
file: True
global: True
syntax: md
helpLink: reports.html
generic_report3__md:
title: Custom Report 3
description: A custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI.
file: True
global: True
syntax: md
helpLink: reports.html
generic_report4__md:
title: Custom Report 4
description: A custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI.
file: True
global: True
syntax: md
helpLink: reports.html
generic_report5__md:
title: Custom Report 5
description: A custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI.
file: True
global: True
syntax: md
helpLink: reports.html
generic_report6__md:
title: Custom Report 6
description: A custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI.
file: True
global: True
syntax: md
helpLink: reports.html
generic_report7__md:
title: Custom Report 7
description: A custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI.
file: True
global: True
syntax: md
helpLink: reports.html
generic_report8__md:
title: Custom Report 8
description: A custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI.
file: True
global: True
syntax: md
helpLink: reports.html
generic_report9__md:
title: Custom Report 9
description: A custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI.
file: True
global: True
syntax: md
helpLink: reports.html
addl_generic_report__md:
title: Additional Custom Report
description: A duplicatable custom, user-defined report. Supports markdown format. The report title inside the file, typically near the top, will be shown in the SOC reporting UI. This is an unsupported feature due to the inability to edit duplicated reports via the SOC app.
advanced: True
file: True
global: True
syntax: md
duplicates: True
helpLink: reports.html

View File

@@ -35,5 +35,6 @@
{% do SOCDEFAULTS.soc.config.server.modules.statickeyauth.update({'anonymousCidr': DOCKER.range, 'apiKey': pillar.sensoroni.config.sensoronikey}) %} {% do SOCDEFAULTS.soc.config.server.modules.statickeyauth.update({'anonymousCidr': DOCKER.range, 'apiKey': pillar.sensoroni.config.sensoronikey}) %}
{% do SOCDEFAULTS.soc.config.server.client.case.update({'analyzerNodeId': GLOBALS.hostname}) %} {% do SOCDEFAULTS.soc.config.server.client.case.update({'analyzerNodeId': GLOBALS.hostname}) %}
{% do SOCDEFAULTS.soc.config.server.client.update({'exportNodeId': GLOBALS.hostname}) %}
{% set SOCDEFAULTS = SOCDEFAULTS.soc %} {% set SOCDEFAULTS = SOCDEFAULTS.soc %}

View File

@@ -1358,6 +1358,8 @@ soc:
htmlDir: html htmlDir: html
importUploadDir: /nsm/soc/uploads importUploadDir: /nsm/soc/uploads
forceUserOtp: false forceUserOtp: false
customReportsPath: /opt/sensoroni/templates/reports/custom
enableReverseLookup: false
modules: modules:
cases: soc cases: soc
filedatastore: filedatastore:
@@ -1489,6 +1491,8 @@ soc:
- repo: file:///nsm/airgap-resources/playbooks/securityonion-resources-playbooks - repo: file:///nsm/airgap-resources/playbooks/securityonion-resources-playbooks
branch: main branch: main
folder: securityonion-normalized folder: securityonion-normalized
assistant:
apiUrl: https://onionai.securityonion.net
salt: salt:
queueDir: /opt/sensoroni/queue queueDir: /opt/sensoroni/queue
timeoutMs: 45000 timeoutMs: 45000
@@ -1565,7 +1569,6 @@ soc:
outputPath: /opt/sensoroni/navigator outputPath: /opt/sensoroni/navigator
lookbackDays: 3 lookbackDays: 3
client: client:
enableReverseLookup: false
docsUrl: /docs/ docsUrl: /docs/
cheatsheetUrl: /docs/cheatsheet.pdf cheatsheetUrl: /docs/cheatsheet.pdf
releaseNotesUrl: /docs/release-notes.html releaseNotesUrl: /docs/release-notes.html
@@ -1576,6 +1579,7 @@ soc:
casesEnabled: true casesEnabled: true
detectionsEnabled: true detectionsEnabled: true
inactiveTools: ['toolUnused'] inactiveTools: ['toolUnused']
exportNodeId:
tools: tools:
- name: toolKibana - name: toolKibana
description: toolKibanaHelp description: toolKibanaHelp
@@ -2539,3 +2543,12 @@ soc:
- ' -priv' - ' -priv'
condition: all of selection_* condition: all of selection_*
level: 'high' # info | low | medium | high | critical level: 'high' # info | low | medium | high | critical
assistant:
enabled: false
investigationPrompt: Investigate Alert ID {socid}
contextLimitSmall: 200000
contextLimitLarge: 1000000
thresholdColorRatioLow: 0.5
thresholdColorRatioMed: 0.75
thresholdColorRatioMax: 1
lowBalanceColorAlert: 500000

View File

@@ -48,6 +48,7 @@ so-soc:
- /opt/so/conf/soc/custom_roles:/opt/sensoroni/rbac/custom_roles:ro - /opt/so/conf/soc/custom_roles:/opt/sensoroni/rbac/custom_roles:ro
- /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw - /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw
- /opt/so/conf/soc/soc_clients_roles:/opt/sensoroni/rbac/clients_roles:rw - /opt/so/conf/soc/soc_clients_roles:/opt/sensoroni/rbac/clients_roles:rw
- /opt/so/conf/sensoroni/templates:/opt/sensoroni/templates:ro
- /opt/so/conf/soc/queue:/opt/sensoroni/queue:rw - /opt/so/conf/soc/queue:/opt/sensoroni/queue:rw
- /opt/so/saltstack:/opt/so/saltstack:rw - /opt/so/saltstack:/opt/so/saltstack:rw
- /opt/so/conf/soc/migrations:/opt/so/conf/soc/migrations:rw - /opt/so/conf/soc/migrations:/opt/so/conf/soc/migrations:rw

View File

@@ -138,6 +138,11 @@ soc:
title: Require TOTP title: Require TOTP
description: Require all users to enable Time-based One Time Passwords (MFA) upon login to SOC. description: Require all users to enable Time-based One Time Passwords (MFA) upon login to SOC.
global: True global: True
customReportsPath:
title: Custom Reports Path
description: Path to custom markdown templates for PDF report generation. All markdown files in this directory will be available as custom reports in the SOC Reports interface.
global: True
advanced: True
subgrids: subgrids:
title: Subordinate Grids title: Subordinate Grids
description: | description: |
@@ -175,6 +180,10 @@ soc:
label: Subgrid Enabled label: Subgrid Enabled
forcedType: bool forcedType: bool
default: false default: false
enableReverseLookup:
description: "Set to true to enable reverse DNS lookups for IP addresses in the SOC UI. To add your own local lookups, create a CSV file at /nsm/custom-mappings/ip-descriptions.csv on your Manager and populate the file with IP addresses and descriptions as follows: IP, Description. Elasticsearch will then ingest the CSV during the next high state."
global: True
helpLink: soc-customization.html#reverse-dns
modules: modules:
elastalertengine: elastalertengine:
aiRepoUrl: aiRepoUrl:
@@ -561,6 +570,8 @@ soc:
forcedType: "[]{}" forcedType: "[]{}"
syntax: json syntax: json
uiElements: uiElements:
- field: rulesetName
label: Playbook Source Name
- field: repo - field: repo
label: Repo URL label: Repo URL
required: True required: True
@@ -569,10 +580,42 @@ soc:
- field: folder - field: folder
label: Folder label: Folder
airgap: *pbRepos airgap: *pbRepos
assistant:
apiUrl:
description: The URL of the AI gateway.
advanced: True
global: True
client: client:
enableReverseLookup: assistant:
description: Set to true to enable reverse DNS lookups for IP addresses in the SOC UI. enabled:
global: True description: Set to true to enable the Onion AI assistant in SOC.
global: True
investigationPrompt:
description: Prompt given to Onion AI when beginning an investigation.
global: True
contextLimitSmall:
description: Smaller context limit for Onion AI.
global: True
advanced: True
contextLimitLarge:
description: Larger context limit for Onion AI.
global: True
advanced: True
thresholdColorRatioLow:
description: Lower visual context color change threshold.
global: True
advanced: True
thresholdColorRatioMed:
description: Middle visual context color change threshold.
global: True
advanced: True
thresholdColorRatioMax:
description: Max visual context color change threshold.
global: True
advanced: True
lowBalanceColorAlert:
description: Onion AI credit amount at which balance turns red.
advanced: True
apiTimeoutMs: apiTimeoutMs:
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
global: True global: True
@@ -606,6 +649,10 @@ soc:
global: True global: True
advanced: True advanced: True
forcedType: "[]{}" forcedType: "[]{}"
exportNodeId:
description: The node ID on which export jobs will be executed.
global: True
advanced: True
hunt: &appSettings hunt: &appSettings
groupItemsPerPage: groupItemsPerPage:
description: Default number of aggregations to show per page. Larger values consume more vertical area in the SOC UI. description: Default number of aggregations to show per page. Larger values consume more vertical area in the SOC UI.

View File

@@ -29,7 +29,7 @@ title() {
} }
fail_setup() { fail_setup() {
error "Setup encounted an unrecoverable failure, exiting" error "Setup encountered an unrecoverable failure, exiting"
touch /root/failure touch /root/failure
exit 1 exit 1
} }
@@ -1187,15 +1187,18 @@ get_minion_type() {
} }
hypervisor_local_states() { hypervisor_local_states() {
# these states need to run before the first highstate so that we dont deal with the salt-minion restarting # these states need to run before the first highstate so that we dont deal with the salt-minion restarting
# and we need these setup prior to the highstate # and we need these setup prior to the highstate
info "Check if hypervisor or managerhype" info "Check if hypervisor or managerhype"
if [ $is_hypervisor ] || [ $is_managerhype ]; then if [ $is_hypervisor ] || [ $is_managerhype ]; then
info "Running libvirt states for hypervisor" info "Running libvirt states for hypervisor"
logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info" logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info queue=True"
info "Setting up bridge for $MNIC" info "Setting up bridge for $MNIC"
salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar="{\"host\": {\"mainint\": \"$MNIC\"}}" salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "'$MNIC'"}}' queue=True
fi if [ $is_managerhype ]; then
logCmd "salt-call state.apply salt.minion queue=True"
fi
fi
} }
install_cleanup() { install_cleanup() {
@@ -1642,7 +1645,7 @@ reserve_ports() {
reinstall_init() { reinstall_init() {
info "Putting system in state to run setup again" info "Putting system in state to run setup again"
if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then if [[ $install_type =~ ^(MANAGER|EVAL|MANAGERSEARCH|MANAGERHYPE|STANDALONE|FLEET|IMPORT)$ ]]; then
local salt_services=( "salt-master" "salt-minion" ) local salt_services=( "salt-master" "salt-minion" )
else else
local salt_services=( "salt-minion" ) local salt_services=( "salt-minion" )

View File

@@ -654,9 +654,10 @@ whiptail_install_type_dist_new() {
Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations. Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations.
EOM EOM
install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 2 \ install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 3 \
"MANAGER" "New grid, requires separate search node(s) " \ "MANAGER" "New grid, requires separate search node(s) " \
"MANAGERSEARCH" "New grid, separate search node(s) are optional " \ "MANAGERSEARCH" "New grid, separate search node(s) are optional " \
"MANAGERHYPE" "Manager with hypervisor - Security Onion Pro required " \
3>&1 1>&2 2>&3 3>&1 1>&2 2>&3
) )