Merge remote-tracking branch 'remotes/origin/2.4/dev' into reyesj2/kafka

This commit is contained in:
reyesj2
2024-05-02 15:12:27 -04:00
53 changed files with 1317 additions and 440 deletions

View File

@@ -15,6 +15,7 @@ concurrency:
jobs: jobs:
close-threads: close-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
issues: write issues: write

View File

@@ -15,6 +15,7 @@ concurrency:
jobs: jobs:
lock-threads: lock-threads:
if: github.repository_owner == 'security-onion-solutions'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: jertel/lock-threads@main - uses: jertel/lock-threads@main

View File

@@ -5,8 +5,13 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
salt-call state.highstate -l info cat << EOF
so-checkin will run a full salt highstate to apply all salt states. If a highstate is already running, this request will be queued and so it may pause for a few minutes before you see any more output. For more information about so-checkin and salt, please see:
https://docs.securityonion.net/en/2.4/salt.html
EOF
salt-call state.highstate -l info queue=True

View File

@@ -236,6 +236,7 @@ exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable
for log_file in $(cat /tmp/log_check_files); do for log_file in $(cat /tmp/log_check_files); do
status "Checking log file $log_file" status "Checking log file $log_file"

View File

@@ -180,6 +180,8 @@ docker:
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: [] extra_env: []
ulimits:
- memlock=524288000
'so-zeek': 'so-zeek':
final_octet: 99 final_octet: 99
custom_bind_mounts: [] custom_bind_mounts: []

View File

@@ -63,6 +63,42 @@ docker:
so-elastic-agent: *dockerOptions so-elastic-agent: *dockerOptions
so-telegraf: *dockerOptions so-telegraf: *dockerOptions
so-steno: *dockerOptions so-steno: *dockerOptions
so-suricata: *dockerOptions so-suricata:
final_octet:
description: Last octet of the container IP address.
helpLink: docker.html
readonly: True
advanced: True
global: True
port_bindings:
description: List of port bindings for the container.
helpLink: docker.html
advanced: True
multiline: True
forcedType: "[]string"
custom_bind_mounts:
description: List of custom local volume bindings.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_hosts:
description: List of additional host entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_env:
description: List of additional ENV entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
ulimits:
description: Ulimits for the container, in bytes.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
so-zeek: *dockerOptions so-zeek: *dockerOptions
so-kafka: *dockerOptions so-kafka: *dockerOptions

View File

@@ -1,38 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class PlaybookESAlerter(Alerter):
"""
Use matched data to create alerts in elasticsearch
"""
required_options = set(['play_title','play_url','sigma_level'])
def alert(self, matches):
for match in matches:
today = strftime("%Y.%m.%d", gmtime())
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
payload = {"tags":"alert","rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'PlaybookESAlerter'}

View File

@@ -0,0 +1,63 @@
# -*- coding: utf-8 -*-
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from time import gmtime, strftime
import requests,json
from elastalert.alerts import Alerter
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class SecurityOnionESAlerter(Alerter):
"""
Use matched data to create alerts in Elasticsearch.
"""
required_options = set(['detection_title', 'sigma_level'])
optional_fields = ['sigma_category', 'sigma_product', 'sigma_service']
def alert(self, matches):
for match in matches:
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
headers = {"Content-Type": "application/json"}
creds = None
if 'es_username' in self.rule and 'es_password' in self.rule:
creds = (self.rule['es_username'], self.rule['es_password'])
# Start building the rule dict
rule_info = {
"name": self.rule['detection_title'],
"uuid": self.rule['detection_public_id']
}
# Add optional fields if they are present in the rule
for field in self.optional_fields:
rule_key = field.split('_')[-1] # Assumes field format "sigma_<key>"
if field in self.rule:
rule_info[rule_key] = self.rule[field]
# Construct the payload with the conditional rule_info
payload = {
"tags": "alert",
"rule": rule_info,
"event": {
"severity": self.rule['event.severity'],
"module": self.rule['event.module'],
"dataset": self.rule['event.dataset'],
"severity_label": self.rule['sigma_level']
},
"sigma_level": self.rule['sigma_level'],
"event_data": match,
"@timestamp": timestamp
}
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/logs-playbook.alerts-so/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
def get_info(self):
return {'type': 'SecurityOnionESAlerter'}

View File

@@ -118,3 +118,8 @@ elasticfleet:
base_url: https://api.platform.sublimesecurity.com base_url: https://api.platform.sublimesecurity.com
poll_interval: 5m poll_interval: 5m
limit: 100 limit: 100
kismet:
base_url: http://localhost:2501
poll_interval: 1m
api_key:
enabled_nodes: []

View File

@@ -0,0 +1,36 @@
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
{% raw %}
{
"package": {
"name": "httpjson",
"version": ""
},
"name": "kismet-logs",
"namespace": "so",
"description": "Kismet Logs",
"policy_id": "FleetServer_{% endraw %}{{ NAME }}{% raw %}",
"inputs": {
"generic-httpjson": {
"enabled": true,
"streams": {
"httpjson.generic": {
"enabled": true,
"vars": {
"data_stream.dataset": "kismet",
"request_url": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.base_url }}{% raw %}/devices/last-time/-600/devices.tjson",
"request_interval": "{% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.poll_interval }}{% raw %}",
"request_method": "GET",
"request_transforms": "- set:\r\n target: header.Cookie\r\n value: 'KISMET={% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.kismet.api_key }}{% raw %}'",
"request_redirect_headers_ban_list": [],
"oauth_scopes": [],
"processors": "",
"tags": [],
"pipeline": "kismet.common"
}
}
}
}
},
"force": true
}
{% endraw %}

View File

@@ -0,0 +1,35 @@
{
"policy_id": "so-grid-nodes_general",
"package": {
"name": "log",
"version": ""
},
"name": "soc-detections-logs",
"description": "Security Onion Console - Detections Logs",
"namespace": "so",
"inputs": {
"logs-logfile": {
"enabled": true,
"streams": {
"log.logs": {
"enabled": true,
"vars": {
"paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log"
],
"exclude_files": [],
"ignore_older": "72h",
"data_stream.dataset": "soc",
"tags": [
"so-soc"
],
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
"custom": "pipeline: common"
}
}
}
}
},
"force": true
}

View File

@@ -79,3 +79,29 @@ elasticfleet:
helpLink: elastic-fleet.html helpLink: elastic-fleet.html
advanced: True advanced: True
forcedType: int forcedType: int
kismet:
base_url:
description: Base URL for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
poll_interval:
description: Poll interval for wireless device data from Kismet. Integration is currently configured to return devices seen as active by any Kismet sensor within the last 10 minutes.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
api_key:
description: API key for Kismet.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
sensitive: True
enabled_nodes:
description: Fleet nodes with the Kismet integration enabled. Enter one per line.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: "[]string"

File diff suppressed because it is too large Load Diff

View File

@@ -200,9 +200,15 @@ so-elasticsearch-roles-load:
- require: - require:
- docker_container: so-elasticsearch - docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja - file: elasticsearch_sbin_jinja
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %} {% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
{% if ELASTICSEARCHMERGED.index_clean %}
{% set ap = "present" %}
{% else %}
{% set ap = "absent" %}
{% endif %}
so-elasticsearch-indices-delete: so-elasticsearch-indices-delete:
cron.present: cron.{{ap}}:
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1 - name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
- identifier: so-elasticsearch-indices-delete - identifier: so-elasticsearch-indices-delete
- user: root - user: root
@@ -212,6 +218,7 @@ so-elasticsearch-indices-delete:
- month: '*' - month: '*'
- dayweek: '*' - dayweek: '*'
{% endif %} {% endif %}
{% endif %} {% endif %}
{% else %} {% else %}

View File

@@ -80,7 +80,7 @@
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } }, { "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } }, { "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },

View File

@@ -0,0 +1,10 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "network.wireless.bssid"
}
}
]
}

View File

@@ -0,0 +1,50 @@
{
"processors": [
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_cloaked",
"target_field": "network.wireless.ssid_cloaked",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_cloaked != null"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_ssid",
"target_field": "network.wireless.ssid",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_ssid != null"
}
},
{
"set": {
"field": "network.wireless.ssid",
"value": "Hidden",
"if": "ctx?.network?.wireless?.ssid_cloaked != null && ctx?.network?.wireless?.ssid_cloaked == 1"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_beaconed_ssid_record.dot11_advertisedssid_dot11e_channel_utilization_perc",
"target_field": "network.wireless.channel_utilization",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_beaconed_ssid_record?.dot11_advertisedssid_dot11e_channel_utilization_perc != null"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.bssid"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_associated_client_map",
"processor": {
"append": {
"field": "network.wireless.associated_clients",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
}
}
]
}

View File

@@ -0,0 +1,16 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.bssid"
}
}
]
}

View File

@@ -0,0 +1,29 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
},
{
"rename": {
"field": "message2.dot11_device.dot11_device_last_bssid",
"target_field": "network.wireless.last_connected_bssid",
"if": "ctx?.message2?.dot11_device?.dot11_device_last_bssid != null"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_client_map",
"processor": {
"append": {
"field": "network.wireless.known_connected_bssid",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_client_map != null"
}
}
]
}

View File

@@ -0,0 +1,159 @@
{
"processors": [
{
"json": {
"field": "message",
"target_field": "message2"
}
},
{
"date": {
"field": "message2.kismet_device_base_mod_time",
"formats": [
"epoch_second"
],
"target_field": "@timestamp"
}
},
{
"set": {
"field": "event.category",
"value": "network"
}
},
{
"dissect": {
"field": "message2.kismet_device_base_type",
"pattern": "%{wifi} %{device_type}"
}
},
{
"lowercase": {
"field": "device_type"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.{{device_type}}"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.wds_ap",
"if": "ctx?.device_type == 'wds ap'"
}
},
{
"set": {
"field": "event.dataset",
"value": "kismet.ad_hoc",
"if": "ctx?.device_type == 'ad-hoc'"
}
},
{
"set": {
"field": "event.module",
"value": "kismet"
}
},
{
"rename": {
"field": "message2.kismet_device_base_packets_tx_total",
"target_field": "source.packets"
}
},
{
"rename": {
"field": "message2.kismet_device_base_num_alerts",
"target_field": "kismet.alerts.count"
}
},
{
"rename": {
"field": "message2.kismet_device_base_channel",
"target_field": "network.wireless.channel",
"if": "ctx?.message2?.kismet_device_base_channel != ''"
}
},
{
"rename": {
"field": "message2.kismet_device_base_frequency",
"target_field": "network.wireless.frequency",
"if": "ctx?.message2?.kismet_device_base_frequency != 0"
}
},
{
"rename": {
"field": "message2.kismet_device_base_last_time",
"target_field": "kismet.last_seen"
}
},
{
"date": {
"field": "kismet.last_seen",
"formats": [
"epoch_second"
],
"target_field": "kismet.last_seen"
}
},
{
"rename": {
"field": "message2.kismet_device_base_first_time",
"target_field": "kismet.first_seen"
}
},
{
"date": {
"field": "kismet.first_seen",
"formats": [
"epoch_second"
],
"target_field": "kismet.first_seen"
}
},
{
"rename": {
"field": "message2.kismet_device_base_seenby",
"target_field": "kismet.seenby"
}
},
{
"foreach": {
"field": "kismet.seenby",
"processor": {
"pipeline": {
"name": "kismet.seenby"
}
}
}
},
{
"rename": {
"field": "message2.kismet_device_base_manuf",
"target_field": "device.manufacturer"
}
},
{
"pipeline": {
"name": "{{event.dataset}}"
}
},
{
"remove": {
"field": [
"message2",
"message",
"device_type",
"wifi",
"agent",
"host",
"event.created"
],
"ignore_failure": true
}
}
]
}

View File

@@ -0,0 +1,9 @@
{
"processors": [
{
"pipeline": {
"name": "kismet.client"
}
}
]
}

View File

@@ -0,0 +1,52 @@
{
"processors": [
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_num_packets",
"target_field": "_ingest._value.packets_seen",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_uuid",
"target_field": "_ingest._value.serial_number",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_first_time",
"target_field": "_ingest._value.first_seen",
"ignore_missing": true
}
},
{
"rename": {
"field": "_ingest._value.kismet_common_seenby_last_time",
"target_field": "_ingest._value.last_seen",
"ignore_missing": true
}
},
{
"date": {
"field": "_ingest._value.first_seen",
"formats": [
"epoch_second"
],
"target_field": "_ingest._value.first_seen",
"ignore_failure": true
}
},
{
"date": {
"field": "_ingest._value.last_seen",
"formats": [
"epoch_second"
],
"target_field": "_ingest._value.last_seen",
"ignore_failure": true
}
}
]
}

View File

@@ -0,0 +1,10 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_macaddr",
"target_field": "client.mac"
}
}
]
}

View File

@@ -0,0 +1,22 @@
{
"processors": [
{
"rename": {
"field": "message2.kismet_device_base_commonname",
"target_field": "network.wireless.bssid"
}
},
{
"foreach": {
"field": "message2.dot11_device.dot11_device_associated_client_map",
"processor": {
"append": {
"field": "network.wireless.associated_clients",
"value": "{{_ingest._key}}"
}
},
"if": "ctx?.message2?.dot11_device?.dot11_device_associated_client_map != null"
}
}
]
}

View File

@@ -27,7 +27,8 @@
"monitor", "monitor",
"read", "read",
"read_cross_cluster", "read_cross_cluster",
"view_index_metadata" "view_index_metadata",
"write"
] ]
} }
], ],

View File

@@ -13,7 +13,8 @@
"monitor", "monitor",
"read", "read",
"read_cross_cluster", "read_cross_cluster",
"view_index_metadata" "view_index_metadata",
"write"
] ]
} }
], ],

View File

@@ -5,6 +5,10 @@ elasticsearch:
esheap: esheap:
description: Specify the memory heap size in (m)egabytes for Elasticsearch. description: Specify the memory heap size in (m)egabytes for Elasticsearch.
helpLink: elasticsearch.html helpLink: elasticsearch.html
index_clean:
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings.
forcedType: bool
helpLink: elasticsearch.html
retention: retention:
retention_pct: retention_pct:
decription: Total percentage of space used by Elasticsearch for multi node clusters decription: Total percentage of space used by Elasticsearch for multi node clusters
@@ -98,10 +102,6 @@ elasticsearch:
policy: policy:
phases: phases:
hot: hot:
max_age:
description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier.
global: True
helpLink: elasticsearch.html
actions: actions:
set_priority: set_priority:
priority: priority:
@@ -120,7 +120,9 @@ elasticsearch:
helpLink: elasticsearch.html helpLink: elasticsearch.html
cold: cold:
min_age: min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
actions: actions:
@@ -131,8 +133,8 @@ elasticsearch:
helpLink: elasticsearch.html helpLink: elasticsearch.html
warm: warm:
min_age: min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier.
regex: ^\[0-9\]{1,5}d$ regex: ^[0-9]{1,5}d$
forcedType: string forcedType: string
global: True global: True
actions: actions:
@@ -145,6 +147,8 @@ elasticsearch:
delete: delete:
min_age: min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. description: Minimum age of index. ex. 90d - This determines when the index should be deleted.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
so-logs: &indexSettings so-logs: &indexSettings
@@ -271,7 +275,9 @@ elasticsearch:
helpLink: elasticsearch.html helpLink: elasticsearch.html
warm: warm:
min_age: min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier. description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally dont need to be as fast as those in the hot tier.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True global: True
advanced: True advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
@@ -296,7 +302,9 @@ elasticsearch:
helpLink: elasticsearch.html helpLink: elasticsearch.html
cold: cold:
min_age: min_age:
description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True global: True
advanced: True advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
@@ -311,6 +319,8 @@ elasticsearch:
delete: delete:
min_age: min_age:
description: Minimum age of index. This determines when the index should be deleted. description: Minimum age of index. This determines when the index should be deleted.
regex: ^[0-9]{1,5}d$
forcedType: string
global: True global: True
advanced: True advanced: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
@@ -512,6 +522,7 @@ elasticsearch:
so-suricata: *indexSettings so-suricata: *indexSettings
so-import: *indexSettings so-import: *indexSettings
so-kratos: *indexSettings so-kratos: *indexSettings
so-kismet: *indexSettings
so-logstash: *indexSettings so-logstash: *indexSettings
so-redis: *indexSettings so-redis: *indexSettings
so-strelka: *indexSettings so-strelka: *indexSettings

View File

@@ -0,0 +1,36 @@
{
"_meta": {
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-device.html",
"ecs_version": "1.12.2"
},
"template": {
"mappings": {
"properties": {
"device": {
"properties": {
"id": {
"ignore_above": 1024,
"type": "keyword"
},
"manufacturer": {
"ignore_above": 1024,
"type": "keyword"
},
"model": {
"properties": {
"identifier": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
}
}
}
}
}

View File

@@ -0,0 +1,32 @@
{
"_meta": {
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-base.html",
"ecs_version": "1.12.2"
},
"template": {
"mappings": {
"properties": {
"kismet": {
"properties": {
"alerts": {
"properties": {
"count": {
"type": "long"
}
}
},
"first_seen": {
"type": "date"
},
"last_seen": {
"type": "date"
},
"seenby": {
"type": "nested"
}
}
}
}
}
}
}

View File

@@ -77,6 +77,43 @@
"type": "keyword" "type": "keyword"
} }
} }
},
"wireless": {
"properties": {
"associated_clients": {
"ignore_above": 1024,
"type": "keyword"
},
"bssid": {
"ignore_above": 1024,
"type": "keyword"
},
"channel": {
"ignore_above": 1024,
"type": "keyword"
},
"channel_utilization": {
"type": "float"
},
"frequency": {
"type": "double"
},
"ssid": {
"ignore_above": 1024,
"type": "keyword"
},
"ssid_cloaked": {
"type": "integer"
},
"known_connected_bssid": {
"ignore_above": 1024,
"type": "keyword"
},
"last_connected_bssid": {
"ignore_above": 1024,
"type": "keyword"
}
}
} }
} }
} }

View File

@@ -20,10 +20,12 @@
"so_detection": { "so_detection": {
"properties": { "properties": {
"publicId": { "publicId": {
"type": "text" "ignore_above": 1024,
"type": "keyword"
}, },
"title": { "title": {
"type": "text" "ignore_above": 1024,
"type": "keyword"
}, },
"severity": { "severity": {
"ignore_above": 1024, "ignore_above": 1024,
@@ -36,6 +38,18 @@
"description": { "description": {
"type": "text" "type": "text"
}, },
"category": {
"ignore_above": 1024,
"type": "keyword"
},
"product": {
"ignore_above": 1024,
"type": "keyword"
},
"service": {
"ignore_above": 1024,
"type": "keyword"
},
"content": { "content": {
"type": "text" "type": "text"
}, },
@@ -49,7 +63,8 @@
"type": "boolean" "type": "boolean"
}, },
"tags": { "tags": {
"type": "text" "ignore_above": 1024,
"type": "keyword"
}, },
"ruleset": { "ruleset": {
"ignore_above": 1024, "ignore_above": 1024,

View File

@@ -40,7 +40,7 @@ fi
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space # Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space
{% if GLOBALS.role == 'so-manager' %} {% if GLOBALS.role == 'so-manager' %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $5}'); do for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $5}'); do
{% else %} {% else %}
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
{% endif %} {% endif %}

View File

@@ -13,7 +13,7 @@ TOTAL_USED_SPACE=0
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space # Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space
{% if GLOBALS.role == 'so-manager' %} {% if GLOBALS.role == 'so-manager' %}
# Get total disk space - disk.total # Get total disk space - disk.total
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $3}'); do for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $3}'); do
{% else %} {% else %}
# Get disk space taken up by indices - disk.indices # Get disk space taken up by indices - disk.indices
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do

View File

@@ -27,6 +27,7 @@ overlimit() {
# 2. Check if the maximum number of iterations - MAX_ITERATIONS - has been exceeded. If so, exit. # 2. Check if the maximum number of iterations - MAX_ITERATIONS - has been exceeded. If so, exit.
# Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, or the number of iterations has exceeded the maximum allowed number of iterations, we will break out of the loop. # Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, or the number of iterations has exceeded the maximum allowed number of iterations, we will break out of the loop.
while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
# If we can't query Elasticsearch, then immediately return false. # If we can't query Elasticsearch, then immediately return false.
@@ -34,10 +35,17 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
[ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit [ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit
# We iterate through the closed and open indices # We iterate through the closed and open indices
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) CLOSED_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4)
OPEN_SO_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "(^logstash-.*|^so-.*)" | grep -vE "so-case|so-detection" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -E "^.ds-logs-.*" | grep -v "suricata" | sort -t- -k4)
for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do for INDEX in ${CLOSED_SO_INDICES} ${OPEN_SO_INDICES} ${CLOSED_INDICES} ${OPEN_INDICES}; do
# Check if index is an older index. If it is an older index, delete it before moving on to newer indices.
if [[ "$INDEX" =~ "^logstash-.*|so-.*" ]]; then
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
else
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated # To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below # We extract the data stream name using the pattern below
@@ -57,6 +65,7 @@ while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG} printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - There is only one backing index (${INDEX}). Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM -XDELETE >> ${LOG} 2>&1 /usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM -XDELETE >> ${LOG} 2>&1
fi fi
fi
if ! overlimit ; then if ! overlimit ; then
exit exit
fi fi

View File

@@ -7,6 +7,7 @@ firewall:
multiline: True multiline: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR. regexFailureMessage: You must enter a valid IP address or CIDR.
duplicates: True
anywhere: &hostgroupsettingsadv anywhere: &hostgroupsettingsadv
description: List of IP or CIDR blocks to allow access to this hostgroup. description: List of IP or CIDR blocks to allow access to this hostgroup.
forcedType: "[]string" forcedType: "[]string"
@@ -15,6 +16,7 @@ firewall:
advanced: True advanced: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR. regexFailureMessage: You must enter a valid IP address or CIDR.
duplicates: True
beats_endpoint: *hostgroupsettings beats_endpoint: *hostgroupsettings
beats_endpoint_ssl: *hostgroupsettings beats_endpoint_ssl: *hostgroupsettings
dockernet: &ROhostgroupsettingsadv dockernet: &ROhostgroupsettingsadv
@@ -53,6 +55,7 @@ firewall:
multiline: True multiline: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR. regexFailureMessage: You must enter a valid IP address or CIDR.
duplicates: True
customhostgroup1: *customhostgroupsettings customhostgroup1: *customhostgroupsettings
customhostgroup2: *customhostgroupsettings customhostgroup2: *customhostgroupsettings
customhostgroup3: *customhostgroupsettings customhostgroup3: *customhostgroupsettings
@@ -70,12 +73,14 @@ firewall:
helpLink: firewall.html helpLink: firewall.html
advanced: True advanced: True
multiline: True multiline: True
duplicates: True
udp: &udpsettings udp: &udpsettings
description: List of UDP ports for this port group. description: List of UDP ports for this port group.
forcedType: "[]string" forcedType: "[]string"
helpLink: firewall.html helpLink: firewall.html
advanced: True advanced: True
multiline: True multiline: True
duplicates: True
agrules: agrules:
tcp: *tcpsettings tcp: *tcpsettings
udp: *udpsettings udp: *udpsettings
@@ -190,6 +195,7 @@ firewall:
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
helpLink: firewall.html helpLink: firewall.html
duplicates: True
sensor: sensor:
portgroups: *portgroupsdocker portgroups: *portgroupsdocker
searchnode: searchnode:
@@ -243,6 +249,7 @@ firewall:
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
helpLink: firewall.html helpLink: firewall.html
duplicates: True
dockernet: dockernet:
portgroups: *portgroupshost portgroups: *portgroupshost
localhost: localhost:

View File

@@ -9,7 +9,7 @@ idstools:
forcedType: string forcedType: string
helpLink: rules.html helpLink: rules.html
ruleset: ruleset:
description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Suricata --> Full Update. WARNING! Changing the ruleset will remove all existing Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.'
global: True global: True
regex: ETPRO\b|ETOPEN\b regex: ETPRO\b|ETOPEN\b
helpLink: rules.html helpLink: rules.html
@@ -19,33 +19,40 @@ idstools:
helpLink: rules.html helpLink: rules.html
sids: sids:
disabled: disabled:
description: Contains the list of NIDS rules manually disabled across the grid. To disable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To disable multiple rules, you can use regular expressions. description: Contains the list of NIDS rules (or regex patterns) disabled across the grid. This setting is readonly; Use the Detections screen to disable rules.
global: True global: True
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
regex: \d*|re:.* regex: \d*|re:.*
helpLink: managing-alerts.html helpLink: managing-alerts.html
readonlyUi: True
advanced: true
enabled: enabled:
description: Contains the list of NIDS rules manually enabled across the grid. To enable a rule, add its Signature ID (SID) to the Current Grid Value box, one entry per line. To enable multiple rules, you can use regular expressions. description: Contains the list of NIDS rules (or regex patterns) enabled across the grid. This setting is readonly; Use the Detections screen to enable rules.
global: True global: True
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
regex: \d*|re:.* regex: \d*|re:.*
helpLink: managing-alerts.html helpLink: managing-alerts.html
readonlyUi: True
advanced: true
modify: modify:
description: Contains the list of NIDS rules that were modified from their default values. Entries must adhere to the following format - SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM" description: Contains the list of NIDS rules (SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM"). This setting is readonly; Use the Detections screen to modify rules.
global: True global: True
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
helpLink: managing-alerts.html helpLink: managing-alerts.html
readonlyUi: True
advanced: true
rules: rules:
local__rules: local__rules:
description: Contains the list of custom NIDS rules applied to the grid. To add custom NIDS rules to the grid, enter one rule per line in the Current Grid Value box. description: Contains the list of custom NIDS rules applied to the grid. This setting is readonly; Use the Detections screen to adjust rules.
file: True file: True
global: True global: True
advanced: True advanced: True
title: Local Rules title: Local Rules
helpLink: local-rules.html helpLink: local-rules.html
readonlyUi: True
filters__rules: filters__rules:
description: If you are using Suricata for metadata, then you can set custom filters for that metadata here. description: If you are using Suricata for metadata, then you can set custom filters for that metadata here.
file: True file: True

View File

@@ -10,6 +10,7 @@ logstash:
helpLink: logstash.html helpLink: logstash.html
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
duplicates: True
receiver: *assigned_pipelines receiver: *assigned_pipelines
heavynode: *assigned_pipelines heavynode: *assigned_pipelines
searchnode: *assigned_pipelines searchnode: *assigned_pipelines
@@ -23,6 +24,7 @@ logstash:
helpLink: logstash.html helpLink: logstash.html
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
duplicates: True
fleet: *defined_pipelines fleet: *defined_pipelines
manager: *defined_pipelines manager: *defined_pipelines
search: *defined_pipelines search: *defined_pipelines
@@ -38,6 +40,7 @@ logstash:
multiline: True multiline: True
forcedType: string forcedType: string
helpLink: logstash.html helpLink: logstash.html
duplicates: True
custom002: *pipeline_config custom002: *pipeline_config
custom003: *pipeline_config custom003: *pipeline_config
custom004: *pipeline_config custom004: *pipeline_config

View File

@@ -80,6 +80,17 @@ socmotd:
- mode: 600 - mode: 600
- template: jinja - template: jinja
crondetectionsruntime:
cron.present:
- name: /usr/local/bin/so-detections-runtime-status cron
- identifier: detections-runtime-status
- user: root
- minute: '*/10'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
socsigmafinalpipeline: socsigmafinalpipeline:
file.managed: file.managed:
- name: /opt/so/conf/soc/sigma_final_pipeline.yaml - name: /opt/so/conf/soc/sigma_final_pipeline.yaml

View File

@@ -1211,6 +1211,13 @@ soc:
- soc_timestamp - soc_timestamp
- event.dataset - event.dataset
- message - message
':kismet:':
- soc_timestamp
- device.manufacturer
- client.mac
- network.wireless.ssid
- network.wireless.bssid
- event.dataset
':playbook:': ':playbook:':
- soc_timestamp - soc_timestamp
- rule.name - rule.name
@@ -1250,19 +1257,27 @@ soc:
allowRegex: '' allowRegex: ''
autoUpdateEnabled: true autoUpdateEnabled: true
autoEnabledSigmaRules: autoEnabledSigmaRules:
default:
- core+critical - core+critical
- securityonion-resources+critical - securityonion-resources+critical
- securityonion-resources+high - securityonion-resources+high
communityRulesImportFrequencySeconds: 86400 so-eval:
- securityonion-resources+critical
- securityonion-resources+high
so-import:
- securityonion-resources+critical
- securityonion-resources+high
communityRulesImportFrequencySeconds: 28800
denyRegex: '' denyRegex: ''
elastAlertRulesFolder: /opt/sensoroni/elastalert elastAlertRulesFolder: /opt/sensoroni/elastalert
reposFolder: /opt/sensoroni/sigma/repos reposFolder: /opt/sensoroni/sigma/repos
rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint
stateFilePath: /opt/so/conf/soc/fingerprints/elastalertengine.state stateFilePath: /opt/sensoroni/fingerprints/elastalertengine.state
rulesRepos: rulesRepos:
- repo: https://github.com/Security-Onion-Solutions/securityonion-resources - repo: https://github.com/Security-Onion-Solutions/securityonion-resources
license: Elastic-2.0 license: Elastic-2.0
folder: sigma/stable folder: sigma/stable
community: true
sigmaRulePackages: sigmaRulePackages:
- core - core
- emerging_threats_addon - emerging_threats_addon
@@ -1310,24 +1325,27 @@ soc:
- rbac/users_roles - rbac/users_roles
strelkaengine: strelkaengine:
allowRegex: '' allowRegex: ''
autoEnabledYaraRules:
- securityonion-yara
autoUpdateEnabled: true autoUpdateEnabled: true
communityRulesImportFrequencySeconds: 86400 communityRulesImportFrequencySeconds: 28800
compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py compileYaraPythonScriptPath: /opt/sensoroni/yara/compile_yara.py
denyRegex: '' denyRegex: ''
reposFolder: /opt/sensoroni/yara/repos reposFolder: /opt/sensoroni/yara/repos
rulesRepos: rulesRepos:
- repo: https://github.com/Security-Onion-Solutions/securityonion-yara - repo: https://github.com/Security-Onion-Solutions/securityonion-yara
license: DRL license: DRL
community: true
yaraRulesFolder: /opt/sensoroni/yara/rules yaraRulesFolder: /opt/sensoroni/yara/rules
stateFilePath: /opt/so/conf/soc/fingerprints/strelkaengine.state stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state
suricataengine: suricataengine:
allowRegex: '' allowRegex: ''
autoUpdateEnabled: true autoUpdateEnabled: true
communityRulesImportFrequencySeconds: 86400 communityRulesImportFrequencySeconds: 28800
communityRulesFile: /nsm/rules/suricata/emerging-all.rules communityRulesFile: /nsm/rules/suricata/emerging-all.rules
denyRegex: '' denyRegex: ''
rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint
stateFilePath: /opt/so/conf/soc/fingerprints/suricataengine.state stateFilePath: /opt/sensoroni/fingerprints/suricataengine.state
client: client:
enableReverseLookup: false enableReverseLookup: false
docsUrl: /docs/ docsUrl: /docs/
@@ -1714,16 +1732,16 @@ soc:
- name: Host Registry Changes - name: Host Registry Changes
description: Windows Registry changes description: Windows Registry changes
query: 'event.category: registry | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby event.dataset event.action | groupby process.executable | groupby registry.path | groupby process.executable registry.path' query: 'event.category: registry | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby event.dataset event.action | groupby process.executable | groupby registry.path | groupby process.executable registry.path'
- name: Host DNS & Process Mappings - name: Host DNS and Process Mappings
description: DNS queries mapped to originating processes description: DNS queries mapped to originating processes
query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby host.name | groupby -sankey host.name dns.question.name | groupby dns.question.name | groupby event.dataset event.type | groupby process.executable | groupby dns.answers.data' query: 'event.category: network AND _exists_:process.executable AND (_exists_:dns.question.name OR _exists_:dns.answers.data) | groupby host.name | groupby -sankey host.name dns.question.name | groupby dns.question.name | groupby event.dataset event.type | groupby process.executable | groupby dns.answers.data'
- name: Host Process Activity - name: Host Process Activity
description: Process activity captured on an endpoint description: Process activity captured on an endpoint
query: 'event.category:process | groupby host.name | groupby -sankey host.name user.name* | groupby user.name | groupby event.dataset event.action | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.parent.name process.name event.action process.working_directory event.dataset' query: 'event.category:process | groupby host.name | groupby -sankey host.name user.name* | groupby user.name | groupby event.dataset event.action | groupby process.working_directory | groupby process.executable | groupby process.command_line | groupby process.parent.executable | groupby process.parent.command_line | groupby -sankey process.parent.executable process.executable | table soc_timestamp host.name user.name process.parent.name process.name event.action process.working_directory event.dataset'
- name: Host File Activity - name: Host File and Process Mappings
description: File activity captured on an endpoint description: File activity mapped to originating processes
query: 'event.category: file AND _exists_:process.executable | groupby host.name | groupby -sankey host.name process.executable | groupby process.executable | groupby event.dataset event.action event.type | groupby file.name' query: 'event.category: file AND _exists_:process.name AND _exists_:process.executable | groupby host.name | groupby -sankey host.name process.name | groupby process.name | groupby process.executable | groupby event.dataset event.action event.type | groupby file.name'
- name: Host Network & Process Mappings - name: Host Network and Process Mappings
description: Network activity mapped to originating processes description: Network activity mapped to originating processes
query: 'event.category: network AND _exists_:process.executable | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.dataset* event.type* event.action* | groupby dns.question.name | groupby process.executable | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port' query: 'event.category: network AND _exists_:process.executable | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.dataset* event.type* event.action* | groupby dns.question.name | groupby process.executable | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: Sysmon Overview - name: Sysmon Overview
@@ -1900,6 +1918,15 @@ soc:
- name: GeoIP - Source Organizations - name: GeoIP - Source Organizations
description: GeoIP tagged logs visualized by source organizations description: GeoIP tagged logs visualized by source organizations
query: '* AND _exists_:source_geo.organization_name | groupby source_geo.organization_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source.geo.country_name | groupby event.dataset | groupby event.module' query: '* AND _exists_:source_geo.organization_name | groupby source_geo.organization_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source.geo.country_name | groupby event.dataset | groupby event.module'
- name: Kismet - WiFi Devices
description: WiFi devices seen by Kismet sensors
query: 'event.module: kismet | groupby network.wireless.ssid | groupby device.manufacturer | groupby -pie device.manufacturer | groupby event.dataset'
- name: SOC Detections - Runtime Status
description: Runtime Status of Detections
query: 'event.dataset:soc.detections | groupby soc.detection_type soc.error_type | groupby soc.error_analysis | groupby soc.rule.name | groupby soc.error_message'
job: job:
alerts: alerts:
advanced: false advanced: false
@@ -1943,6 +1970,7 @@ soc:
- rule.name - rule.name
- event.severity_label - event.severity_label
- event_data.event.dataset - event_data.event.dataset
- rule.category
- event_data.source.ip - event_data.source.ip
- event_data.source.port - event_data.source.port
- event_data.destination.host - event_data.destination.host
@@ -2088,6 +2116,7 @@ soc:
- red - red
customEnabled: false customEnabled: false
detections: detections:
advanced: true
viewEnabled: true viewEnabled: true
createLink: /detection/create createLink: /detection/create
eventFetchLimit: 500 eventFetchLimit: 500
@@ -2113,23 +2142,32 @@ soc:
- soc_timestamp - soc_timestamp
queries: queries:
- name: "All Detections" - name: "All Detections"
query: "_id:*" query: "_id:* | groupby so_detection.language | groupby so_detection.ruleset so_detection.isEnabled"
description: Show all Detections, community and custom
- name: "Custom Detections" - name: "Custom Detections"
query: "so_detection.isCommunity:false" query: "so_detection.isCommunity:false AND NOT so_detection.ruleset: securityonion-resources"
description: Show all custom detections
- name: "All Detections - Enabled" - name: "All Detections - Enabled"
query: "so_detection.isEnabled:true" query: "so_detection.isEnabled:true | groupby so_detection.language | groupby so_detection.ruleset so_detection.severity"
description: Show all enalbed Detections
- name: "All Detections - Disabled" - name: "All Detections - Disabled"
query: "so_detection.isEnabled:false" query: "so_detection.isEnabled:false | groupby so_detection.language | groupby so_detection.ruleset so_detection.severity"
description: Show all disabled Detections
- name: "Detection Type - Suricata (NIDS)" - name: "Detection Type - Suricata (NIDS)"
query: "so_detection.language:suricata" query: "so_detection.language:suricata | groupby so_detection.ruleset so_detection.isEnabled | groupby so_detection.category"
description: Show all NIDS Detections, which are run with Suricata
- name: "Detection Type - Sigma (Elastalert) - All" - name: "Detection Type - Sigma (Elastalert) - All"
query: "so_detection.language:sigma" query: "so_detection.language:sigma | groupby so_detection.ruleset so_detection.isEnabled | groupby so_detection.category | groupby so_detection.product"
- name: "Detection Type - Sigma (Elastalert) - Windows" description: Show all Sigma Detections, which are run with Elastalert
query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*"'
- name: "Detection Type - YARA (Strelka)" - name: "Detection Type - YARA (Strelka)"
query: "so_detection.language:yara" query: "so_detection.language:yara | groupby so_detection.ruleset so_detection.isEnabled"
description: Show all YARA detections, which are used by Strelka
- name: "Security Onion - Grid Detections" - name: "Security Onion - Grid Detections"
query: "so_detection.ruleset:securityonion-resources" query: "so_detection.ruleset:securityonion-resources"
description: Show Detections for this Security Onion Grid
- name: "Detections with Overrides"
query: "_exists_:so_detection.overrides | groupby so_detection.language | groupby so_detection.ruleset so_detection.isEnabled"
description: Show Detections that have Overrides
detection: detection:
presets: presets:
severity: severity:

View File

@@ -1,14 +0,0 @@
import os
import yara
import glob
import sys
def compile_yara_rules(rules_dir: str) -> None:
compiled_rules_path: str = os.path.join(rules_dir, "rules.yar.compiled")
rule_files: list[str] = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True)
if rule_files:
rules: yara.Rules = yara.compile(filepaths={os.path.basename(f): f for f in rule_files})
rules.save(compiled_rules_path)
compile_yara_rules(sys.argv[1])

View File

@@ -30,9 +30,11 @@
{# since cases is not a valid soc config item and only used for the map files, remove it from being placed in the config #} {# since cases is not a valid soc config item and only used for the map files, remove it from being placed in the config #}
{% do SOCMERGED.config.server.modules.pop('cases') %} {% do SOCMERGED.config.server.modules.pop('cases') %}
{# do not automatically enable Sigma rules if install is Eval or Import #} {# set Sigma rules based on role if defined and default if not #}
{% if grains['role'] in ['so-eval', 'so-import'] %} {% if GLOBALS.role in SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules %}
{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': []}) %} {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules[GLOBALS.role]}) %}
{% else %}
{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules.default}) %}
{% endif %} {% endif %}
{# remove these modules if detections is disabled #} {# remove these modules if detections is disabled #}

View File

@@ -89,10 +89,13 @@ soc:
advanced: True advanced: True
helpLink: sigma.html helpLink: sigma.html
autoEnabledSigmaRules: autoEnabledSigmaRules:
description: 'Sigma rules to automatically enable on initial import. Format is $Ruleset+$Level - for example, for the core community ruleset and critical level rules: core+critical' default: &autoEnabledSigmaRules
description: 'Sigma rules to automatically enable on initial import. Format is $Ruleset+$Level - for example, for the core community ruleset and critical level rules: core+critical. These will be applied based on role if defined and default if not.'
global: True global: True
advanced: True advanced: True
helpLink: sigma.html helpLink: sigma.html
so-eval: *autoEnabledSigmaRules
so-import: *autoEnabledSigmaRules
denyRegex: denyRegex:
description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.' description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.'
global: True global: True
@@ -110,7 +113,7 @@ soc:
forcedType: "[]{}" forcedType: "[]{}"
helpLink: sigma.html helpLink: sigma.html
sigmaRulePackages: sigmaRulePackages:
description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.'
global: True global: True
advanced: False advanced: False
helpLink: sigma.html helpLink: sigma.html
@@ -186,6 +189,11 @@ soc:
global: True global: True
advanced: True advanced: True
helpLink: yara.html helpLink: yara.html
autoEnabledYaraRules:
description: 'Yara rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara'
global: True
advanced: True
helpLink: sigma.html
autoUpdateEnabled: autoUpdateEnabled:
description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false.' description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false.'
global: True global: True
@@ -293,6 +301,7 @@ soc:
alerts: *appSettings alerts: *appSettings
cases: *appSettings cases: *appSettings
dashboards: *appSettings dashboards: *appSettings
detections: *appSettings
grid: grid:
maxUploadSize: maxUploadSize:
description: The maximum number of bytes for an uploaded PCAP import file. description: The maximum number of bytes for an uploaded PCAP import file.

View File

@@ -0,0 +1,33 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# Set the default output destination to stdout
output_dest="/dev/stdout"
# If the "cron" flag is passed, change the output destination to the log file
if [ "$1" = "cron" ]; then
output_dest="/opt/so/log/soc/detections_runtime-status_sigma.log"
fi
# Run the query and output based on the output_dest value
/sbin/so-elasticsearch-query '*:elastalert_error*/_search' -d '{"query":{"range":{"@timestamp":{"gte":"now-11m","lte":"now"}}},"size": 50}' | \
jq --compact-output '.hits.hits[] | {
_timestamp: ._source["@timestamp"],
"rule.name": ._source.data.rule,
error_type: "runtime_status",
error_message: ._source.message,
detection_type: "sigma",
event_module: "soc",
event_dataset: "soc.detections",
error_analysis: (
if ._source.message | contains("Unknown column [winlog.channel]") then "Target logsource never seen"
elif ._source.message | contains("parsing_exception") then "Syntax Error"
else "Unknown"
end
)
}' >> $output_dest

View File

@@ -17,7 +17,7 @@ strelka_backend:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-backend:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-backend:{{ GLOBALS.so_version }}
- binds: - binds:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro - /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/rules/:/etc/yara/:ro - /opt/so/conf/strelka/rules/compiled/:/etc/yara/:ro
{% if DOCKER.containers['so-strelka-backend'].custom_bind_mounts %} {% if DOCKER.containers['so-strelka-backend'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-strelka-backend'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-strelka-backend'].custom_bind_mounts %}
- {{ BIND }} - {{ BIND }}

View File

@@ -0,0 +1,66 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import os
import yara
import glob
import json
from concurrent.futures import ThreadPoolExecutor
def check_syntax(rule_file):
try:
# Testing if compilation throws a syntax error, don't save the result
yara.compile(filepath=rule_file)
return (True, rule_file, None)
except yara.SyntaxError as e:
# Return the error message for logging purposes
return (False, rule_file, str(e))
def compile_yara_rules(rules_dir):
compiled_dir = os.path.join(rules_dir, "compiled")
compiled_rules_path = os.path.join(compiled_dir, "rules.compiled")
rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True)
files_to_compile = {}
removed_count = 0
success_count = 0
# Use ThreadPoolExecutor to parallelize syntax checks
with ThreadPoolExecutor() as executor:
results = executor.map(check_syntax, rule_files)
# Collect yara files and prepare for batch compilation
for success, rule_file, error_message in results:
if success:
files_to_compile[os.path.basename(rule_file)] = rule_file
success_count += 1
else:
# Extract just the UUID from the rule file name
rule_id = os.path.splitext(os.path.basename(rule_file))[0]
log_entry = {
"event_module": "soc",
"event_dataset": "soc.detections",
"log.level": "error",
"error_message": error_message,
"error_analysis": "Syntax Error",
"detection_type": "YARA",
"rule_uuid": rule_id,
"error_type": "runtime_status"
}
with open('/opt/sensoroni/logs/detections_runtime-status_yara.log', 'a') as log_file:
json.dump(log_entry, log_file)
log_file.write('\n') # Ensure new entries start on new lines
os.remove(rule_file)
removed_count += 1
# Compile all remaining valid rules into a single file
if files_to_compile:
compiled_rules = yara.compile(filepaths=files_to_compile)
compiled_rules.save(compiled_rules_path)
print(f"All remaining rules compiled and saved into {compiled_rules_path}")
# Print summary of compilation results
print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.")
compile_yara_rules("/opt/sensoroni/yara/rules/")

View File

@@ -9,7 +9,15 @@
# Strelka config # Strelka config
strelkaconfdir: strelkaconfdir:
file.directory: file.directory:
- name: /opt/so/conf/strelka - name: /opt/so/conf/strelka/rules/compiled/
- user: 939
- group: 939
- makedirs: True
strelkacompileyara:
file.managed:
- name: /opt/so/conf/strelka/compile_yara.py
- source: salt://strelka/compile_yara/compile_yara.py
- user: 939 - user: 939
- group: 939 - group: 939
- makedirs: True - makedirs: True

View File

@@ -563,7 +563,7 @@ strelka:
options: options:
location: '/etc/yara/' location: '/etc/yara/'
compiled: compiled:
enabled: False enabled: True
filename: "rules.compiled" filename: "rules.compiled"
store_offset: True store_offset: True
offset_meta_key: "StrelkaHexDump" offset_meta_key: "StrelkaHexDump"

View File

@@ -30,6 +30,7 @@ suricata:
cluster-type: cluster_flow cluster-type: cluster_flow
defrag: "yes" defrag: "yes"
use-mmap: "yes" use-mmap: "yes"
mmap-locked: "no"
threads: 1 threads: 1
tpacket-v3: "yes" tpacket-v3: "yes"
ring-size: 5000 ring-size: 5000

View File

@@ -7,6 +7,7 @@
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'suricata/map.jinja' import SURICATAMERGED %}
include: include:
@@ -24,6 +25,13 @@ so-suricata:
- {{ XTRAENV }} - {{ XTRAENV }}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{# we look at SURICATAMERGED.config['af-packet'][0] since we only allow one interface and therefore always the first list item #}
{% if SURICATAMERGED.config['af-packet'][0]['mmap-locked'] == "yes" and DOCKER.containers['so-suricata'].ulimits %}
- ulimits:
{% for ULIMIT in DOCKER.containers['so-suricata'].ulimits %}
- {{ ULIMIT }}
{% endfor %}
{% endif %}
- binds: - binds:
- /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro - /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro - /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro

View File

@@ -34,6 +34,7 @@
cluster-type: {{ SURICATAMERGED.config['af-packet']['cluster-type'] }} cluster-type: {{ SURICATAMERGED.config['af-packet']['cluster-type'] }}
defrag: "{{ SURICATAMERGED.config['af-packet'].defrag }}" defrag: "{{ SURICATAMERGED.config['af-packet'].defrag }}"
use-mmap: "{{ SURICATAMERGED.config['af-packet']['use-mmap'] }}" use-mmap: "{{ SURICATAMERGED.config['af-packet']['use-mmap'] }}"
mmap-locked: "{{ SURICATAMERGED.config['af-packet']['mmap-locked'] }}"
threads: {{ SURICATAMERGED.config['af-packet'].threads }} threads: {{ SURICATAMERGED.config['af-packet'].threads }}
tpacket-v3: "{{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }}" tpacket-v3: "{{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }}"
ring-size: {{ SURICATAMERGED.config['af-packet']['ring-size'] }} ring-size: {{ SURICATAMERGED.config['af-packet']['ring-size'] }}

View File

@@ -4,13 +4,15 @@ suricata:
helpLink: suricata.html helpLink: suricata.html
thresholding: thresholding:
sids__yaml: sids__yaml:
description: Threshold SIDS List description: Threshold SIDS List. This setting is readonly; Use the Detections screen to modify rules.
syntax: yaml syntax: yaml
file: True file: True
global: True global: True
multiline: True multiline: True
title: SIDS title: SIDS
helpLink: suricata.html helpLink: suricata.html
readonlyUi: True
advanced: true
classification: classification:
classification__config: classification__config:
description: Classifications config file. description: Classifications config file.
@@ -83,6 +85,11 @@ suricata:
use-mmap: use-mmap:
advanced: True advanced: True
readonly: True readonly: True
mmap-locked:
description: Prevent swapping by locking the memory map.
advanced: True
regex: ^(yes|no)$
helpLink: suricata.html
threads: threads:
description: The amount of worker threads. description: The amount of worker threads.
helpLink: suricata.html helpLink: suricata.html
@@ -143,84 +150,40 @@ suricata:
helpLink: suricata.html helpLink: suricata.html
vars: vars:
address-groups: address-groups:
HOME_NET: HOME_NET: &suriaddressgroup
description: List of hosts or networks. description: Assign a list of hosts, or networks, using CIDR notation, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable.
regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$
regexFailureMessage: You must enter a valid IP address or CIDR. regexFailureMessage: You must enter a valid IP address or CIDR.
helpLink: suricata.html helpLink: suricata.html
EXTERNAL_NET: duplicates: True
description: List of hosts or networks. EXTERNAL_NET: *suriaddressgroup
helpLink: suricata.html HTTP_SERVERS: *suriaddressgroup
HTTP_SERVERS: SMTP_SERVERS: *suriaddressgroup
description: List of hosts or networks. SQL_SERVERS: *suriaddressgroup
helpLink: suricata.html DNS_SERVERS: *suriaddressgroup
SMTP_SERVERS: TELNET_SERVERS: *suriaddressgroup
description: List of hosts or networks. AIM_SERVERS: *suriaddressgroup
helpLink: suricata.html DC_SERVERS: *suriaddressgroup
SQL_SERVERS: DNP3_SERVER: *suriaddressgroup
description: List of hosts or networks. DNP3_CLIENT: *suriaddressgroup
helpLink: suricata.html MODBUS_CLIENT: *suriaddressgroup
DNS_SERVERS: MODBUS_SERVER: *suriaddressgroup
description: List of hosts or networks. ENIP_CLIENT: *suriaddressgroup
helpLink: suricata.html ENIP_SERVER: *suriaddressgroup
TELNET_SERVERS:
description: List of hosts or networks.
helpLink: suricata.html
AIM_SERVERS:
description: List of hosts or networks.
helpLink: suricata.html
DC_SERVERS:
description: List of hosts or networks.
helpLink: suricata.html
DNP3_SERVER:
description: List of hosts or networks.
helpLink: suricata.html
DNP3_CLIENT:
description: List of hosts or networks.
helpLink: suricata.html
MODBUS_CLIENT:
description: List of hosts or networks.
helpLink: suricata.html
MODBUS_SERVER:
description: List of hosts or networks.
helpLink: suricata.html
ENIP_CLIENT:
description: List of hosts or networks.
helpLink: suricata.html
ENIP_SERVER:
description: List of hosts or networks.
helpLink: suricata.html
port-groups: port-groups:
HTTP_PORTS: HTTP_PORTS: &suriportgroup
description: List of ports to look for HTTP traffic on. description: Assign a list of network port numbers to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable.
helpLink: suricata.html
SHELLCODE_PORTS:
description: List of ports to look for SHELLCODE traffic on.
helpLink: suricata.html
ORACLE_PORTS:
description: List of ports to look for ORACLE traffic on.
helpLink: suricata.html
SSH_PORTS:
description: List of ports to look for SSH traffic on.
helpLink: suricata.html
DNP3_PORTS:
description: List of ports to look for DNP3 traffic on.
helpLink: suricata.html
MODBUS_PORTS:
description: List of ports to look for MODBUS traffic on.
helpLink: suricata.html
FILE_DATA_PORTS:
description: List of ports to look for FILE_DATA traffic on.
helpLink: suricata.html
FTP_PORTS:
description: List of ports to look for FTP traffic on.
helpLink: suricata.html
VXLAN_PORTS:
description: List of ports to look for VXLAN traffic on.
helpLink: suricata.html
TEREDO_PORTS:
description: List of ports to look for TEREDO traffic on.
helpLink: suricata.html helpLink: suricata.html
duplicates: True
SHELLCODE_PORTS: *suriportgroup
ORACLE_PORTS: *suriportgroup
SSH_PORTS: *suriportgroup
DNP3_PORTS: *suriportgroup
MODBUS_PORTS: *suriportgroup
FILE_DATA_PORTS: *suriportgroup
FTP_PORTS: *suriportgroup
VXLAN_PORTS: *suriportgroup
TEREDO_PORTS: *suriportgroup
outputs: outputs:
eve-log: eve-log:
types: types:

View File

@@ -60,6 +60,7 @@ zeek:
file: True file: True
global: True global: True
advanced: True advanced: True
duplicates: True
file_extraction: file_extraction:
description: Contains a list of file or MIME types Zeek will extract from the network streams. Values must adhere to the following format - {"MIME_TYPE":"FILE_EXTENSION"} description: Contains a list of file or MIME types Zeek will extract from the network streams. Values must adhere to the following format - {"MIME_TYPE":"FILE_EXTENSION"}
helpLink: zeek.html helpLink: zeek.html

View File

@@ -559,7 +559,7 @@ check_requirements() {
local num_nics=${#nic_list[@]} local num_nics=${#nic_list[@]}
if [[ $is_eval ]]; then if [[ $is_eval ]]; then
req_mem=12 req_mem=8
req_cores=4 req_cores=4
req_nics=2 req_nics=2
elif [[ $is_standalone ]]; then elif [[ $is_standalone ]]; then
@@ -1342,6 +1342,10 @@ create_global() {
echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file
echo " registry_host: '$HOSTNAME'" >> $global_pillar_file echo " registry_host: '$HOSTNAME'" >> $global_pillar_file
echo " endgamehost: '$ENDGAMEHOST'" >> $global_pillar_file echo " endgamehost: '$ENDGAMEHOST'" >> $global_pillar_file
if [ "$install_type" = 'EVAL' ]; then
echo " pcapengine: SURICATA" >> $global_pillar_file
fi
} }
create_sensoroni_pillar() { create_sensoroni_pillar() {
@@ -1596,7 +1600,9 @@ reinstall_init() {
# Kill any salt processes (safely) # Kill any salt processes (safely)
for service in "${salt_services[@]}"; do for service in "${salt_services[@]}"; do
# Stop the service in the background so we can exit after a certain amount of time # Stop the service in the background so we can exit after a certain amount of time
if check_service_status "$service"; then
systemctl stop "$service" & systemctl stop "$service" &
fi
local pid=$! local pid=$!
local count=0 local count=0