diff --git a/.github/.gitleaks.toml b/.github/.gitleaks.toml index 32c27b355..009f1c613 100644 --- a/.github/.gitleaks.toml +++ b/.github/.gitleaks.toml @@ -541,6 +541,6 @@ paths = [ '''gitleaks.toml''', '''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''', '''(go.mod|go.sum)$''', - '''salt/nginx/files/enterprise-attack.json''' - '''(.*?)whl$ + '''salt/nginx/files/enterprise-attack.json''', + '''(.*?)whl$''' ] diff --git a/.gitignore b/.gitignore index fc9d41531..fb154cae7 100644 --- a/.gitignore +++ b/.gitignore @@ -67,11 +67,3 @@ __pycache__ # Analyzer dev/test config files *_dev.yaml site-packages - -# Project Scope Directory -.projectScope/ -.clinerules -cline_docs/ - -# vscode settings -.vscode/ diff --git a/salt/_runners/setup_hypervisor.py b/salt/_runners/setup_hypervisor.py index 6ddd571c9..9d7116d59 100644 --- a/salt/_runners/setup_hypervisor.py +++ b/salt/_runners/setup_hypervisor.py @@ -165,7 +165,7 @@ def _validate_image_checksum(path, expected_sha256): return True # Constants -IMAGE_URL = "https://yum.oracle.com/templates/OracleLinux/OL9/u5/x86_64/OL9U5_x86_64-kvm-b253.qcow2" +IMAGE_URL = "https://download.securityonion.net/file/securityonion/OL9U5_x86_64-kvm-b253.qcow2" IMAGE_SHA256 = "3b00bbbefc8e78dd28d9f538834fb9e2a03d5ccdc2cadf2ffd0036c0a8f02021" IMAGE_PATH = "/nsm/libvirt/boot/OL9U5_x86_64-kvm-b253.qcow2" MANAGER_HOSTNAME = socket.gethostname() diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 3edda8a4c..068722ca2 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -116,7 +116,7 @@ 'so-managersearch': ( ssl_states + manager_states + - ['strelka.manager'] + + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + stig_states + kafka_states + elastic_stack_states @@ -129,6 +129,7 @@ 'so-standalone': ( ssl_states + manager_states + + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] + sensor_states + stig_states + kafka_states + diff --git a/salt/backup/tools/sbin/so-config-backup.jinja b/salt/backup/tools/sbin/so-config-backup.jinja index 23e407653..7f65bbba3 100755 --- a/salt/backup/tools/sbin/so-config-backup.jinja +++ b/salt/backup/tools/sbin/so-config-backup.jinja @@ -11,6 +11,10 @@ TODAY=$(date '+%Y_%m_%d') BACKUPDIR={{ DESTINATION }} BACKUPFILE="$BACKUPDIR/so-config-backup-$TODAY.tar" MAXBACKUPS=7 +EXCLUSIONS=( + "--exclude=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers" +) + # Create backup dir if it does not exist mkdir -p /nsm/backup @@ -23,7 +27,7 @@ if [ ! -f $BACKUPFILE ]; then # Loop through all paths defined in global.sls, and append them to backup file {%- for LOCATION in BACKUPLOCATIONS %} - tar -rf $BACKUPFILE {{ LOCATION }} + tar -rf $BACKUPFILE "${EXCLUSIONS[@]}" {{ LOCATION }} {%- endfor %} fi diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 96881aa56..72ece1919 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -158,6 +158,8 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index lifecycle policy" # false positive (elasticsearch policy names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error') + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error') + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error') fi if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then diff --git a/salt/common/tools/sbin_jinja/so-import-pcap b/salt/common/tools/sbin_jinja/so-import-pcap index 5f2370a4a..e8c2b84c8 100755 --- a/salt/common/tools/sbin_jinja/so-import-pcap +++ b/salt/common/tools/sbin_jinja/so-import-pcap @@ -248,7 +248,7 @@ fi START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g') END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g') if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then - URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC" + URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source.as.organization.name%20source.geo.country_name%20%7C%20groupby%20destination.as.organization.name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC" status "Import complete!" status diff --git a/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json b/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json index 87870c7bc..fb9069e83 100644 --- a/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json +++ b/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json @@ -1,32 +1,33 @@ { - "name": "elastic-defend-endpoints", - "namespace": "default", - "description": "", - "package": { - "name": "endpoint", - "title": "Elastic Defend", - "version": "8.17.0", - "requires_root": true - }, - "enabled": true, - "policy_id": "endpoints-initial", - "vars": {}, - "inputs": [ - { - "type": "endpoint", - "enabled": true, - "config": { - "integration_config": { - "value": { - "type": "endpoint", - "endpointConfig": { - "preset": "DataCollection" - } - } - } - }, - "streams": [] - } - ] - } - \ No newline at end of file + "name": "elastic-defend-endpoints", + "namespace": "default", + "description": "", + "package": { + "name": "endpoint", + "title": "Elastic Defend", + "version": "8.18.1", + "requires_root": true + }, + "enabled": true, + "policy_ids": [ + "endpoints-initial" + ], + "vars": {}, + "inputs": [ + { + "type": "ENDPOINT_INTEGRATION_CONFIG", + "enabled": true, + "config": { + "_config": { + "value": { + "type": "endpoint", + "endpointConfig": { + "preset": "DataCollection" + } + } + } + }, + "streams": [] + } + ] +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/idh-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/idh-logs.json index db4b1a8f6..9f66c1937 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/idh-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/idh-logs.json @@ -19,7 +19,7 @@ ], "data_stream.dataset": "idh", "tags": [], - "processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary", + "processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary", "custom": "pipeline: common" } } diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/import-evtx-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/import-evtx-logs.json index 46717f3e1..059e4b8cc 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/import-evtx-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/import-evtx-logs.json @@ -20,7 +20,7 @@ ], "data_stream.dataset": "import", "custom": "", - "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.67.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-2.5.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.67.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.67.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-2.5.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import", + "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.3.3\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.3.3\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.3.3\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import", "tags": [ "import" ] diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/syslog-tcp-514.json b/salt/elasticfleet/files/integrations/grid-nodes_general/syslog-tcp-514.json index 4088f5a87..f284ede06 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/syslog-tcp-514.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/syslog-tcp-514.json @@ -11,7 +11,7 @@ "tcp-tcp": { "enabled": true, "streams": { - "tcp.generic": { + "tcp.tcp": { "enabled": true, "vars": { "listen_address": "0.0.0.0", @@ -23,7 +23,8 @@ "syslog" ], "syslog_options": "field: message\n#format: auto\n#timezone: Local", - "ssl": "" + "ssl": "", + "custom": "" } } } diff --git a/salt/elasticfleet/integration-defaults.map.jinja b/salt/elasticfleet/integration-defaults.map.jinja index 5449e791e..500a9e63c 100644 --- a/salt/elasticfleet/integration-defaults.map.jinja +++ b/salt/elasticfleet/integration-defaults.map.jinja @@ -4,6 +4,7 @@ {% import_json '/opt/so/state/esfleet_package_components.json' as ADDON_PACKAGE_COMPONENTS %} +{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %} {% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %} {% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %} @@ -14,6 +15,7 @@ 'awsfirehose.logs': 'awsfirehose', 'awsfirehose.metrics': 'aws.cloudwatch', 'cribl.logs': 'cribl', + 'cribl.metrics': 'cribl', 'sentinel_one_cloud_funnel.logins': 'sentinel_one_cloud_funnel.login', 'azure_application_insights.app_insights': 'azure.app_insights', 'azure_application_insights.app_state': 'azure.app_state', @@ -45,7 +47,10 @@ 'synthetics.browser_screenshot': 'synthetics-browser.screenshot', 'synthetics.http': 'synthetics-http', 'synthetics.icmp': 'synthetics-icmp', - 'synthetics.tcp': 'synthetics-tcp' + 'synthetics.tcp': 'synthetics-tcp', + 'swimlane.swimlane_api': 'swimlane.api', + 'swimlane.tenant_api': 'swimlane.tenant', + 'swimlane.turbine_api': 'turbine.api' } %} {% for pkg in ADDON_PACKAGE_COMPONENTS %} @@ -62,70 +67,90 @@ {% else %} {% set integration_type = "" %} {% endif %} -{% set component_name = pkg.name ~ "." ~ pattern.title %} -{# fix weirdly named components #} -{% if component_name in WEIRD_INTEGRATIONS %} -{% set component_name = WEIRD_INTEGRATIONS[component_name] %} -{% endif %} +{% set component_name = pkg.name ~ "." ~ pattern.title %} +{% set index_pattern = pattern.name %} + +{# fix weirdly named components #} +{% if component_name in WEIRD_INTEGRATIONS %} +{% set component_name = WEIRD_INTEGRATIONS[component_name] %} +{% endif %} + +{# create duplicate of component_name, so we can split generics from @custom component templates in the index template below and overwrite the default @package when needed + eg. having to replace unifiedlogs.generic@package with filestream.generic@package, but keep the ability to customize unifiedlogs.generic@custom and its ILM policy #} +{% set custom_component_name = component_name %} + +{# duplicate integration_type to assist with sometimes needing to overwrite component templates with 'logs-filestream.generic@package' (there is no metrics-filestream.generic@package) #} +{% set generic_integration_type = integration_type %} + {# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #} {% set component_name_x = component_name.replace(".","_x_") %} {# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #} {% set integration_key = "so-" ~ integration_type ~ component_name_x %} +{# if its a .generic template make sure that a .generic@package for the integration exists. Else default to logs-filestream.generic@package #} +{% if ".generic" in component_name and integration_type ~ component_name ~ "@package" not in INSTALLED_COMPONENT_TEMPLATES %} +{# these generic templates by default are directed to index_pattern of 'logs-generic-*', overwrite that here to point to eg gcp_pubsub.generic-* #} +{% set index_pattern = integration_type ~ component_name ~ "-*" %} +{# includes use of .generic component template, but it doesn't exist in installed component templates. Redirect it to filestream.generic@package #} +{% set component_name = "filestream.generic" %} +{% set generic_integration_type = "logs-" %} +{% endif %} + {# Default integration settings #} {% set integration_defaults = { - "index_sorting": false, - "index_template": { - "composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"], - "data_stream": { - "allow_custom_routing": false, - "hidden": false - }, - "ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"], - "index_patterns": [pattern.name], - "priority": 501, - "template": { - "settings": { - "index": { - "lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"}, - "number_of_replicas": 0 - } - } - } - }, - "policy": { - "phases": { - "cold": { - "actions": { - "set_priority": {"priority": 0} - }, - "min_age": "60d" + "index_sorting": false, + "index_template": { + "composed_of": [generic_integration_type ~ component_name ~ "@package", integration_type ~ custom_component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"], + "data_stream": { + "allow_custom_routing": false, + "hidden": false + }, + "ignore_missing_component_templates": [integration_type ~ custom_component_name ~ "@custom"], + "index_patterns": [index_pattern], + "priority": 501, + "template": { + "settings": { + "index": { + "lifecycle": {"name": "so-" ~ integration_type ~ custom_component_name ~ "-logs"}, + "number_of_replicas": 0 + } + } + } + }, + "policy": { + "phases": { + "cold": { + "actions": { + "set_priority": {"priority": 0} + }, + "min_age": "60d" + }, + "delete": { + "actions": { + "delete": {} + }, + "min_age": "365d" + }, + "hot": { + "actions": { + "rollover": { + "max_age": "30d", + "max_primary_shard_size": "50gb" + }, + "set_priority": {"priority": 100} }, - "delete": { - "actions": { - "delete": {} - }, - "min_age": "365d" - }, - "hot": { - "actions": { - "rollover": { - "max_age": "30d", - "max_primary_shard_size": "50gb" - }, - "set_priority": {"priority": 100} - }, - "min_age": "0ms" - }, - "warm": { - "actions": { - "set_priority": {"priority": 50} - }, - "min_age": "30d" - } - } - } - } %} + "min_age": "0ms" + }, + "warm": { + "actions": { + "set_priority": {"priority": 50} + }, + "min_age": "30d" + } + } + } + } %} + {% do ADDON_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %} {% endfor %} {% endif %} diff --git a/salt/elasticfleet/tools/sbin/so-elastic-fleet-common b/salt/elasticfleet/tools/sbin/so-elastic-fleet-common index d8d0bdb1e..9780c8b12 100644 --- a/salt/elasticfleet/tools/sbin/so-elastic-fleet-common +++ b/salt/elasticfleet/tools/sbin/so-elastic-fleet-common @@ -88,7 +88,13 @@ elastic_fleet_package_version_check() { elastic_fleet_package_latest_version_check() { PACKAGE=$1 - curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.latestVersion' + if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" --fail); then + if version=$(jq -e -r '.item.latestVersion' <<< $output); then + echo "$version" + fi + else + echo "Error: Failed to get latest version for $PACKAGE" + fi } elastic_fleet_package_install() { @@ -149,9 +155,13 @@ elastic_fleet_integration_policy_package_name() { elastic_fleet_integration_policy_package_version() { AGENT_POLICY=$1 INTEGRATION=$2 - curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' - if [ $? -ne 0 ]; then - echo "Error: Failed to retrieve package version for '$INTEGRATION' in '$AGENT_POLICY'." + + if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" --fail); then + if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< $output); then + echo "$version" + fi + else + echo "Error: Failed to retrieve agent policy $AGENT_POLICY" exit 1 fi } diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-integration-upgrade b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-integration-upgrade index 54540ba33..68a644798 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-integration-upgrade +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-integration-upgrade @@ -34,10 +34,18 @@ for AGENT_POLICY in $agent_policies; do if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then {%- endif %} # Get currently installed version of package - PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION") - - # Get latest available version of package - AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME") + attempt=0 + max_attempts=3 + while [ $attempt -lt $max_attempts ]; do + if PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION") && AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME"); then + break + fi + attempt=$((attempt + 1)) + done + if [ $attempt -eq $max_attempts ]; then + echo "Error: Failed getting $PACKAGE_VERSION or $AVAILABLE_VERSION" + exit 1 + fi # Get integration ID INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION") diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-optional-integrations-load b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-optional-integrations-load index 26d775e82..886bbf75c 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-optional-integrations-load +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-optional-integrations-load @@ -19,6 +19,7 @@ BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json +COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json PENDING_UPDATE=false @@ -147,14 +148,33 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then done <<< "$(jq -c '.packages[]' "$INSTALLED_PACKAGE_LIST")" if [ "$PENDING_UPDATE" = true ]; then - # Run bulk install of packages - elastic_fleet_bulk_package_install $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_OUTPUT + # Run chunked install of packages + echo "" > $BULK_INSTALL_OUTPUT + pkg_group=1 + pkg_filename="${BULK_INSTALL_PACKAGE_LIST%.json}" + + jq -c '.packages | _nwise(25)' $BULK_INSTALL_PACKAGE_LIST | while read -r line; do + echo "$line" | jq '{ "packages": . }' > "${pkg_filename}_${pkg_group}.json" + pkg_group=$((pkg_group + 1)) + done + + for file in "${pkg_filename}_"*.json; do + [ -e "$file" ] || continue + elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT + done + # cleanup any temp files for chunked package install + rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST else echo "Elastic integrations don't appear to need installation/updating..." fi # Write out file for generating index/component/ilm templates latest_installed_package_list=$(elastic_fleet_installed_packages) echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS + if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then + # Refresh installed component template list + latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.') + echo $latest_component_templates_list > $COMPONENT_TEMPLATES + fi else # This is the installation of add-on integrations and upgrade of existing integrations. Exiting without error, next highstate will attempt to re-run. diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 1f81e95d2..e08978e0d 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1,6 +1,6 @@ elasticsearch: enabled: false - version: 8.17.3 + version: 8.18.4 index_clean: true config: action: @@ -567,6 +567,7 @@ elasticsearch: - common-settings - common-dynamic-mappings - winlog-mappings + - hash-mappings data_stream: {} ignore_missing_component_templates: [] index_patterns: @@ -3874,6 +3875,7 @@ elasticsearch: - vulnerability-mappings - common-settings - common-dynamic-mappings + - hash-mappings data_stream: {} ignore_missing_component_templates: [] index_patterns: @@ -3987,6 +3989,7 @@ elasticsearch: - vulnerability-mappings - common-settings - common-dynamic-mappings + - hash-mappings data_stream: {} ignore_missing_component_templates: [] index_patterns: @@ -4100,6 +4103,7 @@ elasticsearch: - vulnerability-mappings - common-settings - common-dynamic-mappings + - hash-mappings data_stream: {} ignore_missing_component_templates: [] index_patterns: @@ -4329,6 +4333,7 @@ elasticsearch: - zeek-mappings - common-settings - common-dynamic-mappings + - hash-mappings data_stream: {} ignore_missing_component_templates: [] index_patterns: diff --git a/salt/elasticsearch/files/ingest-dynamic/common b/salt/elasticsearch/files/ingest-dynamic/common index e84702909..814d8d4d5 100644 --- a/salt/elasticsearch/files/ingest-dynamic/common +++ b/salt/elasticsearch/files/ingest-dynamic/common @@ -26,7 +26,7 @@ { "geoip": { "field": "destination.ip", - "target_field": "destination_geo", + "target_field": "destination.as", "database_file": "GeoLite2-ASN.mmdb", "ignore_missing": true, "ignore_failure": true, @@ -36,13 +36,17 @@ { "geoip": { "field": "source.ip", - "target_field": "source_geo", + "target_field": "source.as", "database_file": "GeoLite2-ASN.mmdb", "ignore_missing": true, "ignore_failure": true, "properties": ["ip", "asn", "organization_name", "network"] } }, + { "rename": { "field": "destination.as.organization_name", "target_field": "destination.as.organization.name", "ignore_failure": true, "ignore_missing": true } }, + { "rename": { "field": "source.as.organization_name", "target_field": "source.as.organization.name", "ignore_failure": true, "ignore_missing": true } }, + { "rename": { "field": "destination.as.asn", "target_field": "destination.as.number", "ignore_failure": true, "ignore_missing": true } }, + { "rename": { "field": "source.as.asn", "target_field": "source.as.number", "ignore_failure": true, "ignore_missing": true } }, { "set": { "if": "ctx.event?.severity == 1", "field": "event.severity_label", "value": "low", "override": true } }, { "set": { "if": "ctx.event?.severity == 2", "field": "event.severity_label", "value": "medium", "override": true } }, { "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } }, diff --git a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.21.0 b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.23.0 similarity index 93% rename from salt/elasticsearch/files/ingest/logs-pfsense.log-1.21.0 rename to salt/elasticsearch/files/ingest/logs-pfsense.log-1.23.0 index 7c4f2575f..e79b91b26 100644 --- a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.21.0 +++ b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.23.0 @@ -107,61 +107,61 @@ }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-firewall", + "name": "logs-pfsense.log-1.23.0-firewall", "if": "ctx.event.provider == 'filterlog'" } }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-openvpn", + "name": "logs-pfsense.log-1.23.0-openvpn", "if": "ctx.event.provider == 'openvpn'" } }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-ipsec", + "name": "logs-pfsense.log-1.23.0-ipsec", "if": "ctx.event.provider == 'charon'" } }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-dhcp", + "name": "logs-pfsense.log-1.23.0-dhcp", "if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)" } }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-unbound", + "name": "logs-pfsense.log-1.23.0-unbound", "if": "ctx.event.provider == 'unbound'" } }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-haproxy", + "name": "logs-pfsense.log-1.23.0-haproxy", "if": "ctx.event.provider == 'haproxy'" } }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-php-fpm", + "name": "logs-pfsense.log-1.23.0-php-fpm", "if": "ctx.event.provider == 'php-fpm'" } }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-squid", + "name": "logs-pfsense.log-1.23.0-squid", "if": "ctx.event.provider == 'squid'" } }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-snort", + "name": "logs-pfsense.log-1.23.0-snort", "if": "ctx.event.provider == 'snort'" } }, { "pipeline": { - "name": "logs-pfsense.log-1.21.0-suricata", + "name": "logs-pfsense.log-1.23.0-suricata", "if": "ctx.event.provider == 'suricata'" } }, @@ -358,14 +358,6 @@ "source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n" } }, - { - "remove": { - "field": "event.original", - "if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))", - "ignore_failure": true, - "ignore_missing": true - } - }, { "pipeline": { "name": "global@custom", diff --git a/salt/elasticsearch/files/ingest/logs-pfsense.log-1.21.0-suricata b/salt/elasticsearch/files/ingest/logs-pfsense.log-1.23.0-suricata similarity index 100% rename from salt/elasticsearch/files/ingest/logs-pfsense.log-1.21.0-suricata rename to salt/elasticsearch/files/ingest/logs-pfsense.log-1.23.0-suricata diff --git a/salt/elasticsearch/files/ingest/zeek.conn b/salt/elasticsearch/files/ingest/zeek.conn index 6051d93a1..57558e863 100644 --- a/salt/elasticsearch/files/ingest/zeek.conn +++ b/salt/elasticsearch/files/ingest/zeek.conn @@ -24,6 +24,10 @@ { "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } }, { "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } }, { "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_missing": true } }, + { "rename": { "field": "message2.ja4l", "target_field": "hash.ja4l", "ignore_missing" : true, "if": "ctx.message2?.ja4l != null && ctx.message2.ja4l.length() > 0" }}, + { "rename": { "field": "message2.ja4ls", "target_field": "hash.ja4ls", "ignore_missing" : true, "if": "ctx.message2?.ja4ls != null && ctx.message2.ja4ls.length() > 0" }}, + { "rename": { "field": "message2.ja4t", "target_field": "hash.ja4t", "ignore_missing" : true, "if": "ctx.message2?.ja4t != null && ctx.message2.ja4t.length() > 0" }}, + { "rename": { "field": "message2.ja4ts", "target_field": "hash.ja4ts", "ignore_missing" : true, "if": "ctx.message2?.ja4ts != null && ctx.message2.ja4ts.length() > 0" }}, { "script": { "lang": "painless", "source": "ctx.network.bytes = (ctx.client.bytes + ctx.server.bytes)", "ignore_failure": true } }, { "set": { "if": "ctx.connection?.state == 'S0'", "field": "connection.state_description", "value": "Connection attempt seen, no reply" } }, { "set": { "if": "ctx.connection?.state == 'S1'", "field": "connection.state_description", "value": "Connection established, not terminated" } }, diff --git a/salt/elasticsearch/files/ingest/zeek.http b/salt/elasticsearch/files/ingest/zeek.http index 2224da2f7..40642052a 100644 --- a/salt/elasticsearch/files/ingest/zeek.http +++ b/salt/elasticsearch/files/ingest/zeek.http @@ -27,6 +27,7 @@ { "rename": { "field": "message2.resp_fuids", "target_field": "log.id.resp_fuids", "ignore_missing": true } }, { "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } }, { "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } }, + { "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } }, { "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } }, { "script": { "lang": "painless", "source": "ctx.useragent_length = ctx.useragent.length()", "ignore_failure": true } }, { "script": { "lang": "painless", "source": "ctx.virtual_host_length = ctx.virtual_host.length()", "ignore_failure": true } }, diff --git a/salt/elasticsearch/files/ingest/zeek.http2 b/salt/elasticsearch/files/ingest/zeek.http2 index eeeecef8c..34cce0f7b 100644 --- a/salt/elasticsearch/files/ingest/zeek.http2 +++ b/salt/elasticsearch/files/ingest/zeek.http2 @@ -27,6 +27,7 @@ { "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } }, { "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } }, { "rename": { "field": "message2.stream_id", "target_field": "http2.stream_id", "ignore_missing": true } }, + { "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } }, { "remove": { "field": "message2.tags", "ignore_failure": true } }, { "remove": { "field": ["host"], "ignore_failure": true } }, { "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } }, diff --git a/salt/elasticsearch/files/ingest/zeek.ja4ssh b/salt/elasticsearch/files/ingest/zeek.ja4ssh new file mode 100644 index 000000000..5901e65f1 --- /dev/null +++ b/salt/elasticsearch/files/ingest/zeek.ja4ssh @@ -0,0 +1,10 @@ +{ + "description": "zeek.ja4ssh", + "processors": [ + {"set": {"field": "event.dataset","value": "ja4ssh"}}, + {"remove": {"field": "host","ignore_missing": true,"ignore_failure": true}}, + {"json": {"field": "message","target_field": "message2","ignore_failure": true}}, + {"rename": {"field": "message2.ja4ssh", "target_field": "hash.ja4ssh", "ignore_missing": true, "if": "ctx?.message2?.ja4ssh != null && ctx.message2.ja4ssh.length() > 0" }}, + {"pipeline": {"name": "zeek.common"}} + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/zeek.ssl b/salt/elasticsearch/files/ingest/zeek.ssl index 87174d3d2..0bd6fedb2 100644 --- a/salt/elasticsearch/files/ingest/zeek.ssl +++ b/salt/elasticsearch/files/ingest/zeek.ssl @@ -23,6 +23,8 @@ { "rename": { "field": "message2.validation_status","target_field": "ssl.validation_status", "ignore_missing": true } }, { "rename": { "field": "message2.ja3", "target_field": "hash.ja3", "ignore_missing": true } }, { "rename": { "field": "message2.ja3s", "target_field": "hash.ja3s", "ignore_missing": true } }, + { "rename": { "field": "message2.ja4", "target_field": "hash.ja4", "ignore_missing": true, "if": "ctx?.message2?.ja4 != null && ctx.message2.ja4.length() > 0" } }, + { "rename": { "field": "message2.ja4s", "target_field": "hash.ja4s", "ignore_missing": true, "if": "ctx?.message2?.ja4s != null && ctx.message2.ja4s.length() > 0" } }, { "foreach": { "if": "ctx?.tls?.client?.hash?.sha256 !=null", diff --git a/salt/elasticsearch/files/ingest/zeek.x509 b/salt/elasticsearch/files/ingest/zeek.x509 index 64d06131a..b639cb417 100644 --- a/salt/elasticsearch/files/ingest/zeek.x509 +++ b/salt/elasticsearch/files/ingest/zeek.x509 @@ -42,6 +42,7 @@ { "dot_expander": { "field": "basic_constraints.path_length", "path": "message2", "ignore_failure": true } }, { "rename": { "field": "message2.basic_constraints.path_length", "target_field": "x509.basic_constraints.path_length", "ignore_missing": true } }, { "rename": { "field": "message2.fingerprint", "target_field": "hash.sha256", "ignore_missing": true } }, + { "rename": { "field": "message2.ja4x", "target_field": "hash.ja4x", "ignore_missing": true, "if": "ctx?.message2?.ja4x != null && ctx.message2.ja4x.length() > 0" } }, { "pipeline": { "name": "zeek.common_ssl" } } ] } diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index aa90cb81b..414d8a6b4 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -15,7 +15,7 @@ {% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %} {# start generation of integration default index_settings #} -{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') %} +{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %} {% set check_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %} {% if check_package_components.size > 1 %} {% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %} diff --git a/salt/elasticsearch/templates/component/ecs/hash.json b/salt/elasticsearch/templates/component/ecs/hash.json new file mode 100644 index 000000000..c9d1f5a5d --- /dev/null +++ b/salt/elasticsearch/templates/component/ecs/hash.json @@ -0,0 +1,69 @@ +{ + "template": { + "mappings": { + "properties": { + "hash": { + "type": "object", + "properties": { + "ja3": { + "type": "keyword", + "ignore_above": 1024 + }, + "ja3s": { + "type": "keyword", + "ignore_above": 1024 + }, + "hassh": { + "type": "keyword", + "ignore_above": 1024 + }, + "md5": { + "type": "keyword", + "ignore_above": 1024 + }, + "sha1": { + "type": "keyword", + "ignore_above": 1024 + }, + "sha256": { + "type": "keyword", + "ignore_above": 1024 + }, + "ja4": { + "type": "keyword", + "ignore_above": 1024 + }, + "ja4l": { + "type": "keyword", + "ignore_above": 1024 + }, + "ja4ls": { + "type": "keyword", + "ignore_above": 1024 + }, + "ja4t": { + "type": "keyword", + "ignore_above": 1024 + }, + "ja4ts": { + "type": "keyword", + "ignore_above": 1024 + }, + "ja4ssh": { + "type": "keyword", + "ignore_above": 1024 + }, + "ja4h": { + "type": "keyword", + "ignore_above": 1024 + }, + "ja4x": { + "type": "keyword", + "ignore_above": 1024 + } + } + } + } + } + } +} \ No newline at end of file diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-roles-load b/salt/elasticsearch/tools/sbin/so-elasticsearch-roles-load index 90b262989..d1111fe2f 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-roles-load +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-roles-load @@ -21,7 +21,7 @@ while [[ "$COUNT" -le 240 ]]; do ELASTICSEARCH_CONNECTED="yes" echo "connected!" # Check cluster health once connected - so-elasticsearch-query _cluster/health?wait_for_status=yellow > /dev/null 2>&1 + so-elasticsearch-query _cluster/health?wait_for_status=yellow\&timeout=120s > /dev/null 2>&1 break else ((COUNT+=1)) diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot b/salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot new file mode 100644 index 000000000..1f59610e9 --- /dev/null +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot @@ -0,0 +1,195 @@ +#!/bin/bash + +. /usr/sbin/so-common + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +BOLD='\033[1;37m' +NC='\033[0m' + +log_title() { + if [ $1 == "LOG" ]; then + echo -e "\n${BOLD}================ $2 ================${NC}\n" + elif [ $1 == "OK" ]; then + echo -e "${GREEN} $2 ${NC}" + elif [ $1 == "WARN" ]; then + echo -e "${YELLOW} $2 ${NC}" + elif [ $1 == "ERROR" ]; then + echo -e "${RED} $2 ${NC}" + fi +} + +health_report() { + if ! health_report_output=$(so-elasticsearch-query _health_report?format=json --fail 2>/dev/null); then + log_title "ERROR" "Failed to retrieve health report from Elasticsearch" + return 1 + fi + non_green_count=$(echo "$health_report_output" | jq '[.indicators | to_entries[] | select(.value.status != "green")] | length') + + if [ "$non_green_count" -gt 0 ]; then + echo "$health_report_output" | jq -r '.indicators | to_entries[] | select(.value.status != "green") | .key' | while read -r indicator_name; do + indicator=$(echo "$health_report_output" | jq -r ".indicators.\"$indicator_name\"") + status=$(echo "$indicator" | jq -r '.status') + symptom=$(echo "$indicator" | jq -r '.symptom // "No symptom available"') + + # reormat indicator name + display_name=$(echo "$indicator_name" | tr '_' ' ' | sed 's/\b\(.\)/\u\1/g') + + if [ "$status" = "yellow" ]; then + log_title "WARN" "$display_name: $symptom" + else + log_title "ERROR" "$display_name: $symptom" + fi + + # diagnosis if available + echo "$indicator" | jq -c '.diagnosis[]? // empty' | while read -r diagnosis; do + cause=$(echo "$diagnosis" | jq -r '.cause // "Unknown"') + action=$(echo "$diagnosis" | jq -r '.action // "No action specified"') + + echo -e " ${BOLD}Cause:${NC} $cause\n" + echo -e " ${BOLD}Action:${NC} $action\n" + + # Check for affected indices + affected_indices=$(echo "$diagnosis" | jq -r '.affected_resources.indices[]? // empty') + if [ -n "$affected_indices" ]; then + echo -e " ${BOLD}Affected indices:${NC}" + total_indices=$(echo "$affected_indices" | wc -l) + echo "$affected_indices" | head -10 | while read -r index; do + echo " - $index" + done + if [ "$total_indices" -gt 10 ]; then + remaining=$((total_indices - 10)) + echo " ... and $remaining more indices (truncated for readability)" + fi + fi + echo + done + done + else + log_title "OK" "All health indicators are green" + fi +} + +elasticsearch_status() { + log_title "LOG" "Elasticsearch Status" + if so-elasticsearch-query / --fail --output /dev/null; then + health_report + else + log_title "ERROR" "Elasticsearch API is not accessible" + so-status + log_title "ERROR" "Make sure Elasticsearch is running. Addtionally, check for startup errors in /opt/so/log/elasticsearch/securityonion.log${NC}\n" + + exit 1 + fi + +} + +indices_by_age() { + log_title "LOG" "Indices by Creation Date - Size > 1KB" + log_title "WARN" "Since high/flood watermark has been reached consider updating ILM policies.\n" + if ! indices_output=$(so-elasticsearch-query '_cat/indices?v&s=creation.date:asc&h=creation.date.string,index,status,health,docs.count,pri.store.size&bytes=b&format=json' --fail 2>/dev/null); then + log_title "ERROR" "Failed to retrieve indices list from Elasticsearch" + return 1 + fi + + # Filter for indices with size > 1KB (1024 bytes) and format output + echo -e "${BOLD}Creation Date Name Size${NC}" + echo -e "${BOLD}--------------------------------------------------------------------------------------------------------------${NC}" + + # Create list of indices excluding .internal, so-detection*, so-case* + echo "$indices_output" | jq -r '.[] | select((."pri.store.size" | tonumber) > 1024) | select(.index | (startswith(".internal") or startswith("so-detection") or startswith("so-case")) | not ) | "\(."creation.date.string") | \(.index) | \(."pri.store.size")"' | while IFS='|' read -r creation_date index_name size_bytes; do + # Convert bytes to GB / MB + if [ "$size_bytes" -gt 1073741824 ]; then + size_human=$(echo "scale=2; $size_bytes / 1073741824" | bc)GB + else + size_human=$(echo "scale=2; $size_bytes / 1048576" | bc)MB + fi + + creation_date=$(date -d "$creation_date" '+%Y-%m-%dT%H:%MZ' ) + + # Format output with spacing + printf "%-19s %-76s %10s\n" "$creation_date" "$index_name" "$size_human" + done +} + +watermark_settings() { + watermark_path=".defaults.cluster.routing.allocation.disk.watermark" + if ! watermark_output=$(so-elasticsearch-query _cluster/settings?include_defaults=true\&filter_path=*.cluster.routing.allocation.disk.* --fail 2>/dev/null); then + log_title "ERROR" "Failed to retrieve watermark settings from Elasticsearch" + return 1 + fi + + if ! disk_allocation_output=$(so-elasticsearch-query _cat/nodes?v\&h=name,ip,disk.used_percent,disk.avail,disk.total,node.role\&format=json --fail 2>/dev/null); then + log_title "ERROR" "Failed to retrieve disk allocation data from Elasticsearch" + return 1 + fi + + flood=$(echo $watermark_output | jq -r "$watermark_path.flood_stage" ) + high=$(echo $watermark_output | jq -r "$watermark_path.high" ) + low=$(echo $watermark_output | jq -r "$watermark_path.low" ) + + # Strip percentage signs for comparison + flood_num=${flood%\%} + high_num=${high%\%} + low_num=${low%\%} + + # Check each nodes disk usage + log_title "LOG" "Disk Usage Check" + echo -e "${BOLD}LOW:${GREEN}$low${NC}${BOLD} HIGH:${YELLOW}${high}${NC}${BOLD} FLOOD:${RED}${flood}${NC}\n" + + # Only show data nodes (d=data, h=hot, w=warm, c=cold, f=frozen, s=content) + echo "$disk_allocation_output" | jq -r '.[] | select(.["node.role"] | test("[dhwcfs]")) | "\(.name)|\(.["disk.used_percent"])"' | while IFS='|' read -r node_name disk_used; do + disk_used_num=$(echo $disk_used | bc) + + if (( $(echo "$disk_used_num >= $flood_num" | bc -l) )); then + log_title "ERROR" "$node_name is at or above the flood watermark ($flood)! Disk usage: ${disk_used}%" + touch /tmp/watermark_reached + elif (( $(echo "$disk_used_num >= $high_num" | bc -l) )); then + log_title "ERROR" "$node_name is at or above the high watermark ($high)! Disk usage: ${disk_used}%" + touch /tmp/watermark_reached + else + log_title "OK" "$node_name disk usage: ${disk_used}%" + fi + done + + # Check if we need to show indices by age + if [ -f /tmp/watermark_reached ]; then + indices_by_age + rm -f /tmp/watermark_reached + fi + +} + +unassigned_shards() { + + if ! unassigned_shards_output=$(so-elasticsearch-query _cat/shards?v\&h=index,shard,prirep,state,unassigned.reason,unassigned.details\&s=state\&format=json --fail 2>/dev/null); then + log_title "ERROR" "Failed to retrieve shard data from Elasticsearch" + return 1 + fi + + log_title "LOG" "Unassigned Shards Check" + # Check if there are any UNASSIGNED shards + unassigned_count=$(echo "$unassigned_shards_output" | jq '[.[] | select(.state == "UNASSIGNED")] | length') + + if [ "$unassigned_count" -gt 0 ]; then + echo "$unassigned_shards_output" | jq -r '.[] | select(.state == "UNASSIGNED") | "\(.index)|\(.shard)|\(.prirep)|\(."unassigned.reason")"' | while IFS='|' read -r index shard prirep reason; do + if [ "$prirep" = "r" ]; then + log_title "WARN" "Replica shard for index $index is unassigned. Reason: $reason" + elif [ "$prirep" = "p" ]; then + log_title "ERROR" "Primary shard for index $index is unassigned. Reason: $reason" + fi + done + else + log_title "OK" "All shards are assigned" + fi +} + +main() { + elasticsearch_status + watermark_settings + unassigned_shards +} + +main diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index 758f601eb..4ac1b4d5f 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -136,7 +136,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then TEMPLATE=${i::-14} COMPONENT_PATTERN=${TEMPLATE:3} MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery") - if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ logs-http_endpoint\.generic|logs-winlog\.winlog ]]; then + if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then load_failures=$((load_failures+1)) echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures" else diff --git a/salt/hypervisor/defaults.yaml b/salt/hypervisor/defaults.yaml index 06509828c..938fa8bb2 100644 --- a/salt/hypervisor/defaults.yaml +++ b/salt/hypervisor/defaults.yaml @@ -17,42 +17,109 @@ hypervisor: 6: pci_0000_02_00_1 7: pci_0000_41_00_0 8: pci_0000_41_00_1 - model1: + SOSSNNV: hardware: cpu: 128 - memory: 128 + memory: 256 disk: - 1: pci_0000_c7_00_0 - 2: pci_0000_c8_00_0 + 1: pci_0000_42_00_0 + 2: pci_0000_43_00_0 + 3: pci_0000_44_00_0 + 4: pci_0000_45_00_0 copper: - 1: pci_0000_c4_00_0 - 2: pci_0000_c4_00_1 - 3: pci_0000_c4_00_2 - 4: pci_0000_c4_00_3 + sfp: + 1: pci_0000_02_00_0 + 2: pci_0000_02_00_1 + 3: pci_0000_41_00_0 + 4: pci_0000_41_00_1 + SOSSNNV-DE02: + hardware: + cpu: 128 + memory: 384 + disk: + 1: pci_0000_41_00_0 + 2: pci_0000_42_00_0 + 3: pci_0000_81_00_0 + 4: pci_0000_82_00_0 + 5: pci_0000_83_00_0 + 6: pci_0000_84_00_0 + copper: + 1: pci_0000_85_00_0 + 2: pci_0000_85_00_1 + 3: pci_0000_85_00_2 + 4: pci_0000_85_00_3 + sfp: + 5: pci_0000_c4_00_0 + 6: pci_0000_c4_00_1 + 7: pci_0000_c5_00_0 + 8: pci_0000_c5_00_1 + 9: pci_0000_c5_00_2 + 10: pci_0000_c5_00_3 + SOSSN7200: + hardware: + cpu: 128 + memory: 256 + copper: + 1: pci_0000_03_00_0 + 2: pci_0000_03_00_1 + 3: pci_0000_03_00_2 + 4: pci_0000_03_00_3 sfp: 5: pci_0000_02_00_0 6: pci_0000_02_00_1 - 7: pci_0000_41_00_0 - 8: pci_0000_41_00_1 - model2: - cpu: 256 - memory: 256 - disk: - 1: pci_0000_c7_00_0 - 2: pci_0000_c8_00_0 - 3: pci_0000_c9_00_0 - 4: pci_0000_c10_00_0 + 7: pci_0000_81_00_0 + 8: pci_0000_81_00_1 + 9: pci_0000_81_00_2 + 10: pci_0000_81_00_3 + SOSSN7200-DE02: + hardware: + cpu: 128 + memory: 384 copper: - 1: pci_0000_c4_00_0 - 2: pci_0000_c4_00_1 - 3: pci_0000_c4_00_2 - 4: pci_0000_c4_00_3 - 5: pci_0000_c5_00_0 - 6: pci_0000_c5_00_1 - 7: pci_0000_c5_00_2 - 8: pci_0000_c5_00_3 + 1: pci_0000_82_00_0 + 2: pci_0000_82_00_1 + 3: pci_0000_82_00_2 + 4: pci_0000_82_00_3 sfp: - 9: pci_0000_02_00_0 - 10: pci_0000_02_00_1 - 11: pci_0000_41_00_0 - 12: pci_0000_41_00_1 \ No newline at end of file + 5: pci_0000_c4_00_0 + 6: pci_0000_c4_00_1 + 7: pci_0000_c5_00_0 + 8: pci_0000_c5_00_1 + 9: pci_0000_c6_00_0 + 10: pci_0000_c6_00_1 + 11: pci_0000_c6_00_2 + 12: pci_0000_c6_00_3 + SOS4000: + hardware: + cpu: 128 + memory: 256 + copper: + 1: pci_0000_03_00_0 + 2: pci_0000_03_00_1 + 3: pci_0000_03_00_2 + 4: pci_0000_03_00_3 + sfp: + 5: pci_0000_02_00_0 + 6: pci_0000_02_00_1 + 7: pci_0000_81_00_0 + 8: pci_0000_81_00_1 + 9: pci_0000_81_00_2 + 10: pci_0000_81_00_3 + SOS5000-DE02: + hardware: + cpu: 128 + memory: 384 + copper: + 1: pci_0000_82_00_0 + 2: pci_0000_82_00_1 + 3: pci_0000_82_00_2 + 4: pci_0000_82_00_3 + sfp: + 5: pci_0000_c4_00_0 + 6: pci_0000_c4_00_1 + 7: pci_0000_c5_00_0 + 8: pci_0000_c5_00_1 + 9: pci_0000_c6_00_0 + 10: pci_0000_c6_00_1 + 11: pci_0000_c6_00_2 + 12: pci_0000_c6_00_3 diff --git a/salt/hypervisor/tools/sbin/so-nvme-raid1.sh b/salt/hypervisor/tools/sbin/so-nvme-raid1.sh index 79ccbb33c..ab97e3c88 100644 --- a/salt/hypervisor/tools/sbin/so-nvme-raid1.sh +++ b/salt/hypervisor/tools/sbin/so-nvme-raid1.sh @@ -12,7 +12,7 @@ # - Detects and reports existing RAID configurations # - Thoroughly cleans target drives of any existing data/configurations # - Creates GPT partition tables with RAID-type partitions -# - Establishes RAID-1 array (/dev/md0) for data redundancy +# - Establishes RAID-1 array (${RAID_DEVICE}) for data redundancy # - Formats the array with XFS filesystem for performance # - Automatically mounts at /nsm and configures for boot persistence # - Provides monitoring information for resync operations @@ -37,6 +37,11 @@ # Exit on any error set -e +# Configuration variables +RAID_ARRAY_NAME="md0" +RAID_DEVICE="/dev/${RAID_ARRAY_NAME}" +MOUNT_POINT="/nsm" + # Function to log messages log() { echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" @@ -50,36 +55,68 @@ check_root() { fi } +# Function to find MD arrays using specific devices +find_md_arrays_using_devices() { + local target_devices=("$@") + local found_arrays=() + + # Parse /proc/mdstat to find arrays using our target devices + if [ -f "/proc/mdstat" ]; then + while IFS= read -r line; do + if [[ $line =~ ^(md[0-9]+) ]]; then + local array_name="${BASH_REMATCH[1]}" + local array_path="/dev/$array_name" + + # Check if this array uses any of our target devices + for device in "${target_devices[@]}"; do + if echo "$line" | grep -q "${device##*/}"; then + found_arrays+=("$array_path") + break + fi + done + fi + done < /proc/mdstat + fi + + printf '%s\n' "${found_arrays[@]}" +} + # Function to check if RAID is already set up check_existing_raid() { - if [ -e "/dev/md0" ]; then - if mdadm --detail /dev/md0 &>/dev/null; then - local raid_state=$(mdadm --detail /dev/md0 | grep "State" | awk '{print $3}') - local mount_point="/nsm" - - log "Found existing RAID array /dev/md0 (State: $raid_state)" - - if mountpoint -q "$mount_point"; then - log "RAID is already mounted at $mount_point" - log "Current RAID details:" - mdadm --detail /dev/md0 + local target_devices=("/dev/nvme0n1p1" "/dev/nvme1n1p1") + local found_arrays=($(find_md_arrays_using_devices "${target_devices[@]}")) + + # Check if we found any arrays using our target devices + if [ ${#found_arrays[@]} -gt 0 ]; then + for array_path in "${found_arrays[@]}"; do + if mdadm --detail "$array_path" &>/dev/null; then + local raid_state=$(mdadm --detail "$array_path" | grep "State" | awk '{print $3}') + local mount_point="/nsm" - # Check if resyncing - if grep -q "resync" /proc/mdstat; then - log "RAID is currently resyncing:" - grep resync /proc/mdstat - log "You can monitor progress with: watch -n 60 cat /proc/mdstat" - else - log "RAID is fully synced and operational" + log "Found existing RAID array $array_path (State: $raid_state)" + + if mountpoint -q "$mount_point"; then + log "RAID is already mounted at $mount_point" + log "Current RAID details:" + mdadm --detail "$array_path" + + # Check if resyncing + if grep -q "resync" /proc/mdstat; then + log "RAID is currently resyncing:" + grep resync /proc/mdstat + log "You can monitor progress with: watch -n 60 cat /proc/mdstat" + else + log "RAID is fully synced and operational" + fi + + # Show disk usage + log "Current disk usage:" + df -h "$mount_point" + + exit 0 fi - - # Show disk usage - log "Current disk usage:" - df -h "$mount_point" - - exit 0 fi - fi + done fi # Check if any of the target devices are in use @@ -90,10 +127,29 @@ check_existing_raid() { fi if mdadm --examine "$device" &>/dev/null || mdadm --examine "${device}p1" &>/dev/null; then + # Find the actual array name for this device + local device_arrays=($(find_md_arrays_using_devices "${device}p1")) + local array_name="" + + if [ ${#device_arrays[@]} -gt 0 ]; then + array_name="${device_arrays[0]}" + else + # Fallback: try to find array name from /proc/mdstat + local partition_name="${device##*/}p1" + array_name=$(grep -l "$partition_name" /proc/mdstat 2>/dev/null | head -1) + if [ -n "$array_name" ]; then + array_name=$(grep "^md[0-9]" /proc/mdstat | grep "$partition_name" | awk '{print "/dev/" $1}' | head -1) + fi + # Final fallback + if [ -z "$array_name" ]; then + array_name="$RAID_DEVICE" + fi + fi + log "Error: $device appears to be part of an existing RAID array" log "To reuse this device, you must first:" log "1. Unmount any filesystems" - log "2. Stop the RAID array: mdadm --stop /dev/md0" + log "2. Stop the RAID array: mdadm --stop $array_name" log "3. Zero the superblock: mdadm --zero-superblock ${device}p1" exit 1 fi @@ -183,20 +239,20 @@ main() { fi log "Creating RAID array" - mdadm --create /dev/md0 --level=1 --raid-devices=2 \ + mdadm --create "$RAID_DEVICE" --level=1 --raid-devices=2 \ --metadata=1.2 \ /dev/nvme0n1p1 /dev/nvme1n1p1 \ --force --run log "Creating XFS filesystem" - mkfs.xfs -f /dev/md0 + mkfs.xfs -f "$RAID_DEVICE" log "Creating mount point" mkdir -p /nsm log "Updating fstab" - sed -i '/\/dev\/md0/d' /etc/fstab - echo "/dev/md0 /nsm xfs defaults,nofail 0 0" >> /etc/fstab + sed -i "\|${RAID_DEVICE}|d" /etc/fstab + echo "${RAID_DEVICE} ${MOUNT_POINT} xfs defaults,nofail 0 0" >> /etc/fstab log "Reloading systemd daemon" systemctl daemon-reload @@ -209,7 +265,7 @@ main() { log "RAID setup complete" log "RAID array details:" - mdadm --detail /dev/md0 + mdadm --detail "$RAID_DEVICE" if grep -q "resync" /proc/mdstat; then log "RAID is currently resyncing. You can monitor progress with:" diff --git a/salt/idh/config.sls b/salt/idh/config.sls index 91f809f9e..2e6315007 100644 --- a/salt/idh/config.sls +++ b/salt/idh/config.sls @@ -86,7 +86,7 @@ idh_sbin: file.recurse: - name: /usr/sbin - source: salt://idh/tools/sbin - - user: 934 + - user: 939 - group: 939 - file_mode: 755 diff --git a/salt/idstools/config.sls b/salt/idstools/config.sls index a44b02807..cea75ab9a 100644 --- a/salt/idstools/config.sls +++ b/salt/idstools/config.sls @@ -20,7 +20,7 @@ idstools_sbin: file.recurse: - name: /usr/sbin - source: salt://idstools/tools/sbin - - user: 934 + - user: 939 - group: 939 - file_mode: 755 @@ -29,7 +29,7 @@ idstools_sbin: # file.recurse: # - name: /usr/sbin # - source: salt://idstools/tools/sbin_jinja -# - user: 934 +# - user: 939 # - group: 939 # - file_mode: 755 # - template: jinja @@ -38,7 +38,7 @@ idstools_so-rule-update: file.managed: - name: /usr/sbin/so-rule-update - source: salt://idstools/tools/sbin_jinja/so-rule-update - - user: 934 + - user: 939 - group: 939 - mode: 755 - template: jinja diff --git a/salt/kibana/defaults.yaml b/salt/kibana/defaults.yaml index 2446821f1..29d9b9bf6 100644 --- a/salt/kibana/defaults.yaml +++ b/salt/kibana/defaults.yaml @@ -22,7 +22,7 @@ kibana: - default - file migrations: - discardCorruptObjects: "8.17.3" + discardCorruptObjects: "8.18.4" telemetry: enabled: False security: diff --git a/salt/kibana/files/config_saved_objects.ndjson.jinja b/salt/kibana/files/config_saved_objects.ndjson.jinja index 4902a1445..b1c2f0fb7 100644 --- a/salt/kibana/files/config_saved_objects.ndjson.jinja +++ b/salt/kibana/files/config_saved_objects.ndjson.jinja @@ -1,3 +1,3 @@ {% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS -%} -{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "{{ ELASTICSEARCHDEFAULTS.elasticsearch.version }}","id": "{{ ELASTICSEARCHDEFAULTS.elasticsearch.version }}","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} +{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"savedObjects:listingLimit":1500,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "{{ ELASTICSEARCHDEFAULTS.elasticsearch.version }}","id": "{{ ELASTICSEARCHDEFAULTS.elasticsearch.version }}","references": [],"type": "config","version": "WzI5NzUsMl0="} diff --git a/salt/kibana/tools/sbin_jinja/so-kibana-space-defaults b/salt/kibana/tools/sbin_jinja/so-kibana-space-defaults index a22aba066..cbd16a2de 100755 --- a/salt/kibana/tools/sbin_jinja/so-kibana-space-defaults +++ b/salt/kibana/tools/sbin_jinja/so-kibana-space-defaults @@ -13,6 +13,6 @@ echo "Setting up default Space:" {% if HIGHLANDER %} curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["enterpriseSearch"]} ' >> /opt/so/log/kibana/misc.log {% else %} -curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV2","inventory","dataQuality","actions"]} ' >> /opt/so/log/kibana/misc.log +curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","entityManager"]} ' >> /opt/so/log/kibana/misc.log {% endif %} echo diff --git a/salt/libvirt/images/init.sls b/salt/libvirt/images/init.sls index b6a5baf04..21ab0553d 100644 --- a/salt/libvirt/images/init.sls +++ b/salt/libvirt/images/init.sls @@ -48,6 +48,7 @@ manage_userdata_sool9: file.managed: - name: /nsm/libvirt/images/sool9/user-data - source: salt://libvirt/images/sool9/user-data + - show_changes: False # Manage qcow2 image manage_qcow2_sool9: diff --git a/salt/libvirt/ssh/files/config b/salt/libvirt/ssh/files/config index 360d5c182..de6cb7b34 100644 --- a/salt/libvirt/ssh/files/config +++ b/salt/libvirt/ssh/files/config @@ -1,2 +1,2 @@ -Host * +Match user soqemussh IdentityFile /etc/ssh/auth_keys/soqemussh/id_ed25519 diff --git a/salt/libvirt/ssh/users.sls b/salt/libvirt/ssh/users.sls index 0e9c045a0..173a3e095 100644 --- a/salt/libvirt/ssh/users.sls +++ b/salt/libvirt/ssh/users.sls @@ -16,10 +16,17 @@ {% if GLOBALS.is_manager %} -qemu_ssh_client_config: - file.managed: +root_ssh_config: + file.touch: - name: /root/.ssh/config + +qemu_ssh_client_config: + file.blockreplace: + - name: /root/.ssh/config + - marker_start: "# START of block managed by Salt - soqemussh config" + - marker_end: "# END of block managed by Salt - soqemussh config" - source: salt://libvirt/ssh/files/config + - prepend_if_not_found: True {% endif %} diff --git a/salt/manager/managed_soc_annotations.sls b/salt/manager/managed_soc_annotations.sls index 17621f973..d8f175df6 100644 --- a/salt/manager/managed_soc_annotations.sls +++ b/salt/manager/managed_soc_annotations.sls @@ -5,7 +5,7 @@ {# Managed elasticsearch/soc_elasticsearch.yaml file for adding integration configuration items to UI #} {% set managed_integrations = salt['pillar.get']('elasticsearch:managed_integrations', []) %} -{% if managed_integrations %} +{% if managed_integrations and salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %} {% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %} {% set addon_integration_keys = ADDON_INTEGRATION_DEFAULTS.keys() %} {% set matched_integration_names = [] %} diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c8fb38969..e37ccbfda 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -578,8 +578,7 @@ post_to_2.4.141() { } post_to_2.4.150() { - echo "Regenerating Elastic Agent Installers" - /sbin/so-elastic-agent-gen-installers + echo "Nothing to apply" POSTVERSION=2.4.150 } @@ -589,7 +588,14 @@ post_to_2.4.160() { } post_to_2.4.170() { - echo "Nothing to apply" + echo "Regenerating Elastic Agent Installers" + /sbin/so-elastic-agent-gen-installers + + # Update kibana default space + salt-call state.apply kibana.config queue=True + echo "Updating Kibana default space" + /usr/sbin/so-kibana-space-defaults + POSTVERSION=2.4.170 } @@ -802,9 +808,6 @@ up_to_2.4.130() { # Remove any old Elastic Defend config files rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json - # Elastic Update for this release, so download Elastic Agent files - determine_elastic_agent_upgrade - # Ensure override exists to allow nmcli access to other devices touch /etc/NetworkManager/conf.d/10-globally-managed-devices.conf @@ -847,6 +850,9 @@ up_to_2.4.170() { touch /opt/so/saltstack/local/pillar/$state/adv_$state.sls /opt/so/saltstack/local/pillar/$state/soc_$state.sls done + # Elastic Update for this release, so download Elastic Agent files + determine_elastic_agent_upgrade + INSTALLEDVERSION=2.4.170 } diff --git a/salt/orch/delete_hypervisor.sls b/salt/orch/delete_hypervisor.sls new file mode 100644 index 000000000..3f0bd02b6 --- /dev/null +++ b/salt/orch/delete_hypervisor.sls @@ -0,0 +1,22 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% set hypervisor = pillar.minion_id %} + +ensure_hypervisor_mine_deleted: + salt.function: + - name: file.remove + - tgt: 'G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone or G@role:so-eval' + - tgt_type: compound + - arg: + - /var/cache/salt/master/minions/{{hypervisor}} + +update_salt_cloud_profile: + salt.state: + - tgt: 'G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone or G@role:so-eval' + - tgt_type: compound + - sls: + - salt.cloud.config + - concurrent: True diff --git a/salt/reactor/check_hypervisor.sls b/salt/reactor/check_hypervisor.sls index 889656b36..91b7c0c02 100644 --- a/salt/reactor/check_hypervisor.sls +++ b/salt/reactor/check_hypervisor.sls @@ -1,5 +1,24 @@ -{% if data['act'] == 'accept' and data['id'].endswith(('_hypervisor', '_managerhyper')) and data['result'] == True %} +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% if data['id'].endswith('_hypervisor') and data['result'] == True %} + +{% if data['act'] == 'accept' %} check_and_trigger: runner.setup_hypervisor.setup_environment: - minion_id: {{ data['id'] }} +{% endif %} + +{% if data['act'] == 'delete' %} +delete_hypervisor: + runner.state.orchestrate: + - args: + - mods: orch.delete_hypervisor + - pillar: + minion_id: {{ data['id'] }} +{% endif %} + {% endif %} + diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 55b3a7ffb..1e3b200f4 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -26,9 +26,9 @@ {% if INSTALLEDSALTVERSION != SALTVERSION %} {% if grains.os_family|lower == 'redhat' %} - {% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F stable ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -X -r -F stable ' ~ SALTVERSION %} {% elif grains.os_family|lower == 'debian' %} - {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F stable ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -X -F stable ' ~ SALTVERSION %} {% endif %} {% else %} {% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %} diff --git a/salt/salt/master.sls b/salt/salt/master.sls index fce702932..6486e9126 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -23,11 +23,6 @@ sync_runners: - name: saltutil.sync_runners {% endif %} -hold_salt_master_package: - module.run: - - pkg.hold: - - name: salt-master - # prior to 2.4.30 this engine ran on the manager with salt-minion # this has changed to running with the salt-master in 2.4.30 remove_engines_config: diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 3ae7ded9a..b0e078e79 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -38,25 +38,36 @@ unhold_salt_packages: {% endfor %} install_salt_minion: + cmd.run: + - name: /bin/sh -c '{{ UPGRADECOMMAND }}' + +# minion service is in failed state after upgrade. this command will start it after the state run for the upgrade completes +start_minion_post_upgrade: cmd.run: - name: | exec 0>&- # close stdin exec 1>&- # close stdout exec 2>&- # close stderr - nohup /bin/sh -c '{{ UPGRADECOMMAND }}' & + nohup /bin/sh -c 'sleep 30; systemctl start salt-minion' & + - require: + - cmd: install_salt_minion + - watch: + - cmd: install_salt_minion + - order: last {% endif %} {% if INSTALLEDSALTVERSION|string == SALTVERSION|string %} + +{% for package in SALTPACKAGES %} # only hold the package if it is already installed -hold_salt_packages: +{% if salt['pkg.version'](package) %} +hold_{{ package }}_package: pkg.held: - - pkgs: -{% for package in SALTPACKAGES %} -{% if salt['pkg.version'](package) %} - - {{ package }}: {{SALTVERSION}}-0.* -{% endif %} -{% endfor %} + - name: {{ package }} + - version: {{SALTVERSION}}-0.* +{% endif %} +{% endfor %} remove_error_log_level_logfile: file.line: diff --git a/salt/schedule.sls b/salt/schedule.sls index 74c9acb56..c3b5d85ae 100644 --- a/salt/schedule.sls +++ b/salt/schedule.sls @@ -1,5 +1,10 @@ +{% from 'vars/globals.map.jinja' import GLOBALS %} + highstate_schedule: schedule.present: - function: state.highstate - minutes: 15 - maxrunning: 1 +{% if not GLOBALS.is_manager %} + - splay: 120 +{% endif %} diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 66355fa24..0e3e50240 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -10,7 +10,7 @@ soc: icon: fa-crosshairs target: links: - - '/#/hunt?q="{value|escape}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' + - '/#/hunt?q="{value|escape}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source.as.organization.name source.geo.country_name | groupby destination.as.organization.name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' - name: actionAddToCase description: actionAddToCaseHelp icon: fa-briefcase @@ -24,13 +24,13 @@ soc: icon: fa-magnifying-glass-arrow-right target: '' links: - - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' - - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' - - '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' - - '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' - - '/#/hunt?q="{:log.id.fuid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' - - '/#/hunt?q="{:log.id.uid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' - - '/#/hunt?q="{:network.community_id}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' + - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source.as.organization.name source.geo.country_name | groupby destination.as.organization.name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' + - '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source.as.organization.name source.geo.country_name | groupby destination.as.organization.name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' + - '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source.as.organization.name source.geo.country_name | groupby destination.as.organization.name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' + - '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source.as.organization.name source.geo.country_name | groupby destination.as.organization.name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' + - '/#/hunt?q="{:log.id.fuid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source.as.organization.name source.geo.country_name | groupby destination.as.organization.name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' + - '/#/hunt?q="{:log.id.uid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source.as.organization.name source.geo.country_name | groupby destination.as.organization.name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' + - '/#/hunt?q="{:network.community_id}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source.as.organization.name source.geo.country_name | groupby destination.as.organization.name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid&gridId={gridId}' - name: actionPcap description: actionPcapHelp icon: fa-stream @@ -1336,6 +1336,21 @@ soc: - soc.fields.statusCode - event.action - soc.fields.error + ':iptables:': + - soc_timestamp + - source.ip + - source.port + - destination.ip + - destination.port + - message + ':cef:': + - soc_timestamp + - cef.device.event_class_id + - cef.device.vendor + - cef.device.product + - cef.device.version + - log.source.address + - message server: bindAddress: 0.0.0.0:9822 baseUrl: / @@ -1464,12 +1479,16 @@ soc: autoUpdateEnabled: true playbookImportFrequencySeconds: 86400 playbookImportErrorSeconds: 600 - playbookRepoUrl: - default: https://github.com/Security-Onion-Solutions/securityonion-resources-playbooks - airgap: file:///nsm/airgap-resources/playbooks/securityonion-resources-playbooks - playbookRepoBranch: main playbookRepoPath: /opt/sensoroni/playbooks/ - playbookPathInRepo: securityonion-normalized + playbookRepos: + default: + - repo: https://github.com/Security-Onion-Solutions/securityonion-resources-playbooks + branch: main + folder: securityonion-normalized + airgap: + - repo: file:///nsm/airgap-resources/playbooks/securityonion-resources-playbooks + branch: main + folder: securityonion-normalized salt: queueDir: /opt/sensoroni/queue timeoutMs: 45000 @@ -1906,13 +1925,13 @@ soc: query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip user.name | groupby user.name | groupby http_request.headers.user-agent' - name: SOC Login Failures description: SOC (Security Onion Console) login failures - query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip http_request.headers.user-agent | groupby http_request.headers.user-agent' + query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby user.name | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip http_request.headers.user-agent | groupby http_request.headers.user-agent' - name: Alerts description: Overview of all alerts - query: 'tags:alert | groupby event.module* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby event.severity | groupby destination_geo.organization_name' + query: 'tags:alert | groupby event.module* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby event.severity | groupby destination.as.organization.name' - name: NIDS Alerts description: NIDS (Network Intrusion Detection System) alerts - query: 'event.category:network AND tags:alert | groupby rule.category | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby rule.uuid | groupby rule.gid | groupby destination_geo.organization_name' + query: 'event.category:network AND tags:alert | groupby rule.category | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby rule.uuid | groupby rule.gid | groupby destination.as.organization.name' - name: Elastic Agent Overview description: Overview of all events from Elastic Agents query: 'event.module:endpoint | groupby event.dataset | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name' @@ -1963,34 +1982,34 @@ soc: query: 'event.module:strelka | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby -sankey file.source file.name | groupby file.name' - name: Zeek Notice description: Zeek notice logs - query: 'event.dataset:zeek.notice | groupby notice.note | groupby -sankey notice.note source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby notice.message | groupby notice.sub_message | groupby source_geo.organization_name | groupby destination_geo.organization_name' + query: 'event.dataset:zeek.notice | groupby notice.note | groupby -sankey notice.note source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby notice.message | groupby notice.sub_message | groupby source.as.organization.name | groupby destination.as.organization.name' - name: Connections and Metadata with Community ID description: Network connections that include network.community_id - query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby source_geo.organization_name | groupby source.geo.country_name | groupby destination_geo.organization_name | groupby destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' + query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby source.as.organization.name | groupby source.geo.country_name | groupby destination.as.organization.name | groupby destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid' - name: Connections seen by Zeek or Suricata description: Network connections logged by Zeek or Suricata query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui' - name: DCE_RPC description: DCE_RPC (Distributed Computing Environment / Remote Procedure Calls) network metadata - query: 'tags:dce_rpc | groupby dce_rpc.endpoint | groupby -sankey dce_rpc.endpoint dce_rpc.operation | groupby dce_rpc.operation | groupby -sankey dce_rpc.operation dce_rpc.named_pipe | groupby dce_rpc.named_pipe | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:dce_rpc | groupby dce_rpc.endpoint | groupby -sankey dce_rpc.endpoint dce_rpc.operation | groupby dce_rpc.operation | groupby -sankey dce_rpc.operation dce_rpc.named_pipe | groupby dce_rpc.named_pipe | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name' - name: DHCP description: DHCP (Dynamic Host Configuration Protocol) leases query: 'tags:dhcp | groupby host.hostname | groupby -sankey host.hostname client.address | groupby client.address | groupby -sankey client.address server.address | groupby server.address | groupby dhcp.message_types | groupby host.domain' - name: DNS description: DNS (Domain Name System) queries - query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.query.type_name | groupby dns.response.code_name | groupby dns.answers.name | groupby destination_geo.organization_name' + query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.query.type_name | groupby dns.response.code_name | groupby dns.answers.name | groupby destination.as.organization.name' - name: DPD description: DPD (Dynamic Protocol Detection) errors - query: 'tags:dpd | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination_geo.organization_name' + query: 'tags:dpd | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination.as.organization.name' - name: Files description: Files seen in network traffic - query: 'tags:file | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip | groupby destination_geo.organization_name' + query: 'tags:file | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip | groupby destination.as.organization.name' - name: FTP description: FTP (File Transfer Protocol) network metadata - query: 'tags:ftp | groupby ftp.command | groupby -sankey ftp.command source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ftp.argument | groupby ftp.user' + query: 'tags:ftp | groupby ftp.command | groupby -sankey ftp.command source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name | groupby ftp.argument | groupby ftp.user' - name: HTTP description: HTTP (Hyper Text Transport Protocol) network metadata - query: '(tags:http OR tags:http2) | groupby http.method | groupby -sankey http.method http.virtual_host | groupby http.virtual_host | groupby http.uri | groupby http.useragent | groupby http.status_code | groupby http.status_message | groupby file.resp_mime_types | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: '(tags:http OR tags:http2) | groupby http.method | groupby -sankey http.method http.virtual_host | groupby http.virtual_host | groupby http.uri | groupby http.useragent | groupby http.status_code | groupby http.status_message | groupby file.resp_mime_types | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name' - name: Intel description: Zeek Intel framework hits query: 'tags:intel | groupby intel.indicator | groupby -sankey intel.indicator source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby intel.indicator_type | groupby intel.seen_where' @@ -1999,7 +2018,7 @@ soc: query: 'tags:ipsec | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby ipsec.version' - name: IRC description: IRC (Internet Relay Chat) network metadata - query: 'tags:irc | groupby irc.command.type | groupby -sankey irc.command.type irc.username | groupby irc.username | groupby irc.nickname | groupby irc.command.value | groupby irc.command.info | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:irc | groupby irc.command.type | groupby -sankey irc.command.type irc.username | groupby irc.username | groupby irc.nickname | groupby irc.command.value | groupby irc.command.info | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name' - name: Kerberos description: Kerberos network metadata query: 'tags:kerberos | groupby kerberos.service | groupby -sankey kerberos.service source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby kerberos.client | groupby kerberos.request_type' @@ -2023,22 +2042,22 @@ soc: query: 'tags:pe | groupby file.machine | groupby -sankey file.machine file.os | groupby file.os | groupby -sankey file.os file.subsystem | groupby file.subsystem | groupby file.section_names | groupby file.is_exe | groupby file.is_64bit' - name: QUIC description: QUIC network metadata - query: 'tags:quic | groupby quic.server_name | groupby -sankey quic.server_name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby quic.server_scid | groupby quic.version | groupby quic.client_protocol' + query: 'tags:quic | groupby quic.server_name | groupby -sankey quic.server_name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name | groupby quic.server_scid | groupby quic.version | groupby quic.client_protocol' - name: RADIUS description: RADIUS (Remote Authentication Dial-In User Service) network metadata - query: 'tags:radius | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:radius | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name' - name: RDP description: RDP (Remote Desktop Protocol) network metadata - query: 'tags:rdp | groupby client.name | groupby -sankey client.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:rdp | groupby client.name | groupby -sankey client.name source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name' - name: RFB description: RFB (Remote Frame Buffer) network metadata - query: 'tags:rfb | groupby rfb.desktop.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:rfb | groupby rfb.desktop.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name' - name: Signatures description: Zeek signatures query: 'event.dataset:zeek.signatures | groupby signature_id' - name: SIP description: SIP (Session Initiation Protocol) network metadata - query: 'tags:sip | groupby sip.method | groupby -sankey sip.method source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby client.user_agent | groupby sip.method | groupby sip.uri' + query: 'tags:sip | groupby sip.method | groupby -sankey sip.method source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name | groupby client.user_agent | groupby sip.method | groupby sip.uri' - name: SMB_Files description: Files transferred via SMB (Server Message Block) query: 'tags:smb_files | groupby file.action | groupby -sankey file.action source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby file.path | groupby file.name' @@ -2047,7 +2066,7 @@ soc: query: 'tags:smb_mapping | groupby smb.share_type | groupby -sankey smb.share_type smb.path | groupby smb.path | groupby -sankey smb.path smb.service | groupby smb.service | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - name: SMTP description: SMTP (Simple Mail Transfer Protocol) network metadata - query: 'tags:smtp | groupby smtp.mail_from | groupby -sankey smtp.mail_from smtp.recipient_to | groupby smtp.recipient_to | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby smtp.subject | groupby destination_geo.organization_name' + query: 'tags:smtp | groupby smtp.mail_from | groupby -sankey smtp.mail_from smtp.recipient_to | groupby smtp.recipient_to | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby smtp.subject | groupby destination.as.organization.name' - name: SNMP description: SNMP (Simple Network Management Protocol) network metadat query: 'tags:snmp | groupby snmp.community | groupby -sankey snmp.community source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby snmp.version' @@ -2056,16 +2075,16 @@ soc: query: 'tags:software | groupby software.type | groupby -sankey software.type source.ip | groupby source.ip | groupby software.name' - name: SSH description: SSH (Secure Shell) connections seen by Zeek - query: 'tags:ssh | groupby ssh.client | groupby -sankey ssh.client source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby ssh.server | groupby ssh.version | groupby ssh.hassh_version | groupby ssh.direction | groupby source_geo.organization_name | groupby destination_geo.organization_name' + query: 'tags:ssh | groupby ssh.client | groupby -sankey ssh.client source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby ssh.server | groupby ssh.version | groupby ssh.hassh_version | groupby ssh.direction | groupby source.as.organization.name | groupby destination.as.organization.name' - name: SSL description: SSL/TLS network metadata - query: 'tags:ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'tags:ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name' - name: SSL - Suricata description: SSL/TLS network metadata from Suricata - query: 'event.dataset:suricata.ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject' + query: 'event.dataset:suricata.ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject' - name: SSL - Zeek description: SSL/TLS network metadata from Zeek - query: 'event.dataset:zeek.ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'event.dataset:zeek.ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name' - name: STUN description: STUN (Session Traversal Utilities for NAT) network metadata query: 'tags:stun* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby stun.class | groupby -sankey stun.class stun.method | groupby stun.method | groupby stun.attribute.types' @@ -2080,7 +2099,7 @@ soc: query: 'tags:tunnel | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby tunnel.type | groupby event.action | groupby destination.geo.country_name' - name: Weird description: Weird network traffic seen by Zeek - query: 'event.dataset:zeek.weird | groupby weird.name | groupby -sankey weird.name source.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' + query: 'event.dataset:zeek.weird | groupby weird.name | groupby -sankey weird.name source.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name' - name: WireGuard description: WireGuard VPN network metadata query: 'tags:wireguard | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name' @@ -2123,39 +2142,54 @@ soc: - name: ICS S7 description: S7 (Siemens) network metadata query: 'tags:s7* | groupby event.dataset | groupby -sankey event.dataset source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port' - - name: NetFlow - description: NetFlow records - query: 'event.module:netflow | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.type | groupby network.transport | groupby network.direction | groupby netflow.type | groupby netflow.exporter.version | groupby observer.ip | groupby source.as.organization.name | groupby source.geo.country_name | groupby destination.as.organization.name | groupby destination.geo.country_name' - - name: Firewall - description: Firewall logs - query: 'observer.type:firewall | groupby event.action | groupby -sankey event.action observer.ingress.interface.name | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port' - - name: Firewall Auth - description: Firewall authentication logs - query: 'observer.type:firewall AND event.category:authentication | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | table soc_timestamp user.name source.ip message' - name: VLAN description: VLAN (Virtual Local Area Network) tagged logs query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby -sankey network.vlan.id source.ip | groupby source.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name' - name: GeoIP - Destination Countries description: GeoIP tagged logs visualized by destination countries - query: '* AND _exists_:destination.geo.country_name | groupby destination.geo.country_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby event.dataset | groupby event.module' + query: '* AND _exists_:destination.geo.country_name | groupby destination.geo.country_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.as.organization.name | groupby event.dataset | groupby event.module' - name: GeoIP - Destination Organizations description: GeoIP tagged logs visualized by destination organizations - query: '* AND _exists_:destination_geo.organization_name | groupby destination_geo.organization_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby event.dataset | groupby event.module' + query: '* AND _exists_:destination.as.organization.name | groupby destination.as.organization.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby event.dataset | groupby event.module' - name: GeoIP - Source Countries description: GeoIP tagged logs visualized by source countries - query: '* AND _exists_:source.geo.country_name | groupby source.geo.country_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source_geo.organization_name | groupby event.dataset | groupby event.module' + query: '* AND _exists_:source.geo.country_name | groupby source.geo.country_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source.as.organization.name | groupby event.dataset | groupby event.module' - name: GeoIP - Source Organizations description: GeoIP tagged logs visualized by source organizations - query: '* AND _exists_:source_geo.organization_name | groupby source_geo.organization_name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source.geo.country_name | groupby event.dataset | groupby event.module' + query: '* AND _exists_:source.as.organization.name | groupby source.as.organization.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby source.geo.country_name | groupby event.dataset | groupby event.module' + - name: NetFlow + description: NetFlow records + query: 'event.module:netflow | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.type | groupby network.transport | groupby network.direction | groupby netflow.type | groupby netflow.exporter.version | groupby observer.ip | groupby source.as.organization.name | groupby source.geo.country_name | groupby destination.as.organization.name | groupby destination.geo.country_name' + - name: Firewall - pfSense/OPNsense + description: pfSense/OPNsense firewall logs + query: 'observer.type:firewall | groupby event.action | groupby -sankey event.action observer.ingress.interface.name | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port' + - name: Firewall - pfSense/OPNsense Auth + description: pfSense/OPNsense firewall authentication logs + query: 'observer.type:firewall AND event.category:authentication | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | table soc_timestamp user.name source.ip message' + - name: Firewall - iptables + description: All network traffic logged by Elastic integration for iptables + query: 'event.module:iptables AND event.type:connection | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby -sankey destination.ip destination.port | groupby destination.port' + - name: Firewall - UniFi Firewall Overview + description: All network traffic logged by UniFi firewall + query: 'event.module:iptables AND event.type:connection | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby -sankey destination.ip destination.port | groupby destination.port' + - name: Firewall - UniFi Firewall Blocks + description: Network traffic blocked by UniFi firewall + query: 'event.module:iptables AND event.type:connection AND message:block | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby -sankey destination.ip destination.port | groupby destination.port' + - name: Firewall - UniFi Firewall Allows + description: Network traffic allowed by UniFi firewall + query: 'event.module:iptables AND event.type:connection AND NOT message:block | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby -sankey destination.ip destination.port | groupby destination.port' + - name: Firewall - UniFi System + description: UniFi system logs + query: 'event.module:cef | groupby cef.device.event_class_id | groupby -sankey cef.device.event_class_id cef.device.vendor | groupby cef.device.vendor | groupby cef.device.product | groupby cef.device.version | groupby log.source.address' + - name: CEF + description: Logs handled by the Elastic integration for CEF + query: 'event.module:cef | groupby cef.device.event_class_id | groupby -sankey cef.device.event_class_id cef.device.vendor | groupby cef.device.vendor | groupby cef.device.product | groupby cef.device.version | groupby log.source.address' - name: Kismet - WiFi Devices description: WiFi devices seen by Kismet sensors query: 'event.module: kismet | groupby network.wireless.ssid | groupby device.manufacturer | groupby -pie device.manufacturer | groupby event.dataset' - name: SOC Detections - Runtime Status description: Runtime Status of Detections query: 'event.dataset:soc.detections | groupby soc.detection_type soc.error_type | groupby soc.error_analysis | groupby soc.rule.name | groupby soc.error_message' - - - job: alerts: advanced: false diff --git a/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja b/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja index 4222ff6b2..07577a1fb 100644 --- a/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja +++ b/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja @@ -22,7 +22,7 @@ | | CPU Cores | Memory (GB) | Disk | Copper | SFP | |-----------|-----------|-------------|-------------|-------------|-------------| | Available | {{ cpu_free }} | {{ mem_free }} | {{ disk_free | replace('\n', ',') if disk_free else 'None' }} | {{ copper_free | replace('\n', ',') if copper_free else 'None' }} | {{ sfp_free | replace('\n', ',') if sfp_free else 'None' }} | -| Total | {{ cpu_total }} | {{ mem_total }} | {{ disk_total | replace('\n', ',') }} | {{ copper_total | replace('\n', ',') }} | {{ sfp_total | replace('\n', ',') }} | +| Total | {{ cpu_total }} | {{ mem_total }} | {{ disk_total | replace('\n', ',') if disk_total else 'None' }} | {{ copper_total | replace('\n', ',') if copper_total else 'None' }} | {{ sfp_total | replace('\n', ',') if sfp_total else 'None' }} | {%- if baseDomainStatus == 'Initialized' %} {%- if vm_list %} @@ -60,7 +60,8 @@ Base domain has not been initialized. {%- macro get_available_pci(hw_config, device_type, used_indices) -%} {%- set available = [] -%} -{%- for idx in hw_config.get(device_type, {}).keys() -%} +{%- set device_config = hw_config.get(device_type, {}) or {} -%} +{%- for idx in device_config.keys() -%} {%- if idx | string not in used_indices -%} {%- do available.append(idx) -%} {%- endif -%} @@ -155,9 +156,9 @@ Base domain has not been initialized. {# Get total resources #} {%- set cpu_total = hw_config.cpu -%} {%- set mem_total = hw_config.memory -%} -{%- set disk_total = hw_config.disk.keys() | join('\n') -%} -{%- set copper_total = hw_config.copper.keys() | join('\n') -%} -{%- set sfp_total = hw_config.sfp.keys() | join('\n') -%} +{%- set disk_total = (hw_config.get('disk', {}) or {}).keys() | join('\n') if hw_config.get('disk', {}) else '' -%} +{%- set copper_total = (hw_config.get('copper', {}) or {}).keys() | join('\n') if hw_config.get('copper', {}) else '' -%} +{%- set sfp_total = (hw_config.get('sfp', {}) or {}).keys() | join('\n') if hw_config.get('sfp', {}) else '' -%} {# Update field labels with total and free values #} {%- set updated_template = TEMPLATE.copy() -%} @@ -170,20 +171,26 @@ Base domain has not been initialized. {%- do update_resource_field(updated_field, mem_free, mem_total, 'GB') -%} {%- elif field.field == 'disk' -%} {%- set disk_free_list = disk_free.split(',') if disk_free else [] -%} +{%- set disk_free_safe = disk_free if disk_free is defined else '' -%} +{%- set disk_total_safe = disk_total if disk_total is defined else '' -%} {%- do updated_field.update({ - 'label': field.label | replace('FREE', disk_free) | replace('TOTAL', disk_total | replace('\n', ',')), + 'label': field.label | replace('FREE', disk_free_safe) | replace('TOTAL', disk_total_safe | replace('\n', ',')), 'options': disk_free_list }) -%} {%- elif field.field == 'copper' -%} {%- set copper_free_list = copper_free.split(',') if copper_free else [] -%} +{%- set copper_free_safe = copper_free if copper_free is defined else '' -%} +{%- set copper_total_safe = copper_total if copper_total is defined else '' -%} {%- do updated_field.update({ - 'label': field.label | replace('FREE', copper_free) | replace('TOTAL', copper_total | replace('\n', ',')), + 'label': field.label | replace('FREE', copper_free_safe) | replace('TOTAL', copper_total_safe | replace('\n', ',')), 'options': copper_free_list }) -%} {%- elif field.field == 'sfp' -%} {%- set sfp_free_list = sfp_free.split(',') if sfp_free else [] -%} +{%- set sfp_free_safe = sfp_free if sfp_free is defined else '' -%} +{%- set sfp_total_safe = sfp_total if sfp_total is defined else '' -%} {%- do updated_field.update({ - 'label': field.label | replace('FREE', sfp_free) | replace('TOTAL', sfp_total | replace('\n', ',')), + 'label': field.label | replace('FREE', sfp_free_safe) | replace('TOTAL', sfp_total_safe | replace('\n', ',')), 'options': sfp_free_list }) -%} {%- endif -%} diff --git a/salt/soc/files/soc/motd.md b/salt/soc/files/soc/motd.md index 91c603851..69b3145fa 100644 --- a/salt/soc/files/soc/motd.md +++ b/salt/soc/files/soc/motd.md @@ -6,7 +6,7 @@ If you're ready to dive in, take a look at the [Alerts](/#/alerts) interface to Next, go to the [Dashboards](/#/dashboards) interface for a general overview of all logs collected. Here are a few overview dashboards to get you started: -[Overview Dashboard](/#/dashboards) | [Elastic Agent Overview](/#/dashboards?q=event.module%3Aendpoint%20%7C%20groupby%20event.dataset%20%7C%20groupby%20host.name%20%7C%20groupby%20-sankey%20host.name%20user.name%20%7C%20groupby%20user.name%20%7C%20groupby%20-sankey%20user.name%20process.name%20%7C%20groupby%20process.name) | [Network Connection Overview](/#/dashboards?q=tags%3Aconn%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20-sankey%20destination.port%20network.protocol%20%7C%20groupby%20network.protocol%20%7C%20groupby%20network.transport%20%7C%20groupby%20connection.history%20%7C%20groupby%20connection.state%20%7C%20groupby%20connection.state_description%20%7C%20groupby%20source.geo.country_name%20%7C%20groupby%20destination.geo.country_name%20%7C%20groupby%20client.ip_bytes%20%7C%20groupby%20server.ip_bytes%20%7C%20groupby%20client.oui) | [DNS](/#/dashboards?q=tags%3Adns%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20-sankey%20source.ip%20destination.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20dns.highest_registered_domain%20%7C%20groupby%20dns.parent_domain%20%7C%20groupby%20dns.query.type_name%20%7C%20groupby%20dns.response.code_name%20%7C%20groupby%20dns.answers.name%20%7C%20groupby%20destination_geo.organization_name) | [Files](/#/dashboards?q=tags%3Afile%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20-sankey%20file.mime_type%20file.source%20%7C%20groupby%20file.source%20%7C%20groupby%20file.bytes.total%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination_geo.organization_name) | [HTTP](/#/dashboards?q=tags%3Ahttp%20%7C%20groupby%20http.method%20%7C%20groupby%20-sankey%20http.method%20http.virtual_host%20%7C%20groupby%20http.virtual_host%20%7C%20groupby%20http.uri%20%7C%20groupby%20http.useragent%20%7C%20groupby%20http.status_code%20%7C%20groupby%20http.status_message%20%7C%20groupby%20file.resp_mime_types%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20destination_geo.organization_name) | [SSL](/#/dashboards?q=tags%3Assl%20%7C%20groupby%20ssl.version%20%7C%20groupby%20-sankey%20ssl.version%20ssl.server_name%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20destination_geo.organization_name) +[Overview Dashboard](/#/dashboards) | [Elastic Agent Overview](/#/dashboards?q=event.module%3Aendpoint%20%7C%20groupby%20event.dataset%20%7C%20groupby%20host.name%20%7C%20groupby%20-sankey%20host.name%20user.name%20%7C%20groupby%20user.name%20%7C%20groupby%20-sankey%20user.name%20process.name%20%7C%20groupby%20process.name) | [Network Connection Overview](/#/dashboards?q=tags%3Aconn%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20-sankey%20destination.port%20network.protocol%20%7C%20groupby%20network.protocol%20%7C%20groupby%20network.transport%20%7C%20groupby%20connection.history%20%7C%20groupby%20connection.state%20%7C%20groupby%20connection.state_description%20%7C%20groupby%20source.geo.country_name%20%7C%20groupby%20destination.geo.country_name%20%7C%20groupby%20client.ip_bytes%20%7C%20groupby%20server.ip_bytes%20%7C%20groupby%20client.oui) | [DNS](/#/dashboards?q=tags%3Adns%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20-sankey%20source.ip%20destination.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20dns.highest_registered_domain%20%7C%20groupby%20dns.parent_domain%20%7C%20groupby%20dns.query.type_name%20%7C%20groupby%20dns.response.code_name%20%7C%20groupby%20dns.answers.name%20%7C%20groupby%20destination.as.organization.name) | [Files](/#/dashboards?q=tags%3Afile%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20-sankey%20file.mime_type%20file.source%20%7C%20groupby%20file.source%20%7C%20groupby%20file.bytes.total%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.as.organization.name) | [HTTP](/#/dashboards?q=tags%3Ahttp%20%7C%20groupby%20http.method%20%7C%20groupby%20-sankey%20http.method%20http.virtual_host%20%7C%20groupby%20http.virtual_host%20%7C%20groupby%20http.uri%20%7C%20groupby%20http.useragent%20%7C%20groupby%20http.status_code%20%7C%20groupby%20http.status_message%20%7C%20groupby%20file.resp_mime_types%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20destination.as.organization.name) | [SSL](/#/dashboards?q=tags%3Assl%20%7C%20groupby%20ssl.version%20%7C%20groupby%20-sankey%20ssl.version%20ssl.server_name%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20destination.as.organization.name) Click the drop-down menu in Dashboards to find many more dashboards. You might also want to explore the [Hunt](/#/hunt) interface for more focused threat hunting. diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index c950d8a60..e053ce63f 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -61,11 +61,11 @@ {% do SOCMERGED.config.server.update({'airgapEnabled': false}) %} {% endif %} -{# set playbookRepoUrl based on airgap or not #} +{# set playbookRepos based on airgap or not #} {% if GLOBALS.airgap %} -{% do SOCMERGED.config.server.modules.playbook.update({'playbookRepoUrl': SOCMERGED.config.server.modules.playbook.playbookRepoUrl.airgap}) %} +{% do SOCMERGED.config.server.modules.playbook.update({'playbookRepos': SOCMERGED.config.server.modules.playbook.playbookRepos.airgap}) %} {% else %} -{% do SOCMERGED.config.server.modules.playbook.update({'playbookRepoUrl': SOCMERGED.config.server.modules.playbook.playbookRepoUrl.default}) %} +{% do SOCMERGED.config.server.modules.playbook.update({'playbookRepos': SOCMERGED.config.server.modules.playbook.playbookRepos.default}) %} {% endif %} {# remove these modules if detections is disabled #} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 58560e89e..b292d1460 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -552,6 +552,23 @@ soc: description: How far back to search for ATT&CK-tagged alerts. (days) global: True helpLink: attack-navigator.html + playbook: + playbookRepos: + default: &pbRepos + description: "Custom Git repositories to pull Playbooks from. Playbooks are pulled when SOC starts and automatically refreshed every 24 hours. If this grid is airgapped then edit the airgap repos. Otherwise edit the default repos." + global: True + advanced: True + forcedType: "[]{}" + syntax: json + uiElements: + - field: repo + label: Repo URL + required: True + - field: branch + label: Branch + - field: folder + label: Folder + airgap: *pbRepos client: enableReverseLookup: description: Set to true to enable reverse DNS lookups for IP addresses in the SOC UI. @@ -712,7 +729,7 @@ soc: global: True status: labels: - description: List of available case statuses. Some statuses have specifial characteristics and related functionality built into SOC. + description: List of available case statuses. Note that some default statuses have special characteristics and related functionality built into SOC. global: True customEnabled: description: Set to true to allow users add their own case statuses directly in the SOC UI. diff --git a/salt/storage/init.sls b/salt/storage/init.sls index 5bce7e71a..533366fd0 100644 --- a/salt/storage/init.sls +++ b/salt/storage/init.sls @@ -3,5 +3,11 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. + +{% set nvme_devices = salt['cmd.shell']("find /dev -name 'nvme*n1' 2>/dev/null") %} +{% if nvme_devices %} + include: - storage.nsm_mount + +{% endif %} diff --git a/salt/storage/nsm_mount.sls b/salt/storage/nsm_mount.sls index b0476e054..ed9e97c33 100644 --- a/salt/storage/nsm_mount.sls +++ b/salt/storage/nsm_mount.sls @@ -23,7 +23,7 @@ storage_nsm_mount_logdir: storage_nsm_mount_script: file.managed: - name: /usr/sbin/so-nsm-mount - - source: salt://storage/files/so-nsm-mount + - source: salt://storage/tools/sbin/so-nsm-mount - mode: 755 - user: root - group: root diff --git a/salt/storage/files/so-nsm-cleanup b/salt/storage/tools/sbin/so-nsm-cleanup similarity index 100% rename from salt/storage/files/so-nsm-cleanup rename to salt/storage/tools/sbin/so-nsm-cleanup diff --git a/salt/storage/files/so-nsm-mount b/salt/storage/tools/sbin/so-nsm-mount similarity index 100% rename from salt/storage/files/so-nsm-mount rename to salt/storage/tools/sbin/so-nsm-mount diff --git a/salt/zeek/config.sls b/salt/zeek/config.sls index 7fdbd8560..761c6f7d3 100644 --- a/salt/zeek/config.sls +++ b/salt/zeek/config.sls @@ -150,6 +150,13 @@ plcronscript: - source: salt://zeek/cron/packetloss.sh - mode: 755 +zeekja4cfg: + file.managed: + - name: /opt/so/conf/zeek/config.zeek + - source: salt://zeek/files/config.zeek.ja4 + - user: 937 + - group: 939 + # BPF compilation and configuration {% if ZEEKBPF %} {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %} diff --git a/salt/zeek/defaults.yaml b/salt/zeek/defaults.yaml index 1daf77102..81bfa3d9d 100644 --- a/salt/zeek/defaults.yaml +++ b/salt/zeek/defaults.yaml @@ -52,6 +52,7 @@ zeek: - policy/frameworks/notice/community-id - policy/protocols/conn/community-id-logging - ja3 + - ja4 - hassh - intel - cve-2020-0601 @@ -115,7 +116,6 @@ zeek: excluded: - broker - capture_loss - - ecat_arp_info - known_hosts - known_services - loaded_scripts diff --git a/salt/zeek/enabled.sls b/salt/zeek/enabled.sls index 7d444ff43..ff090428f 100644 --- a/salt/zeek/enabled.sls +++ b/salt/zeek/enabled.sls @@ -34,6 +34,7 @@ so-zeek: - /opt/so/conf/zeek/policy/cve-2020-0601:/opt/zeek/share/zeek/policy/cve-2020-0601:ro - /opt/so/conf/zeek/policy/intel:/opt/zeek/share/zeek/policy/intel:rw - /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro + - /opt/so/conf/zeek/config.zeek:/opt/zeek/share/zeek/site/packages/ja4/config.zeek:ro {% if DOCKER.containers['so-zeek'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-zeek'].custom_bind_mounts %} - {{ BIND }} diff --git a/salt/zeek/files/config.zeek.ja4 b/salt/zeek/files/config.zeek.ja4 new file mode 100644 index 000000000..e3dd08a48 --- /dev/null +++ b/salt/zeek/files/config.zeek.ja4 @@ -0,0 +1,25 @@ +module FINGERPRINT; + +export { + option delimiter: string = "_"; + + # BSD licensed + option JA4_enabled: bool = T; + option JA4_raw: bool = F; + + # FoxIO license required for JA4+ + option JA4S_enabled: bool = F; + option JA4S_raw: bool = F; + + option JA4H_enabled: bool = F; + option JA4H_raw: bool = F; + + option JA4L_enabled: bool = F; + + option JA4SSH_enabled: bool = F; + + option JA4T_enabled: bool = F; + option JA4TS_enabled: bool = F; + + option JA4X_enabled: bool = F; +} diff --git a/setup/so-functions b/setup/so-functions index c2eb0b349..522446be4 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1187,12 +1187,15 @@ get_minion_type() { } hypervisor_local_states() { - # these states need to run before the first highstate so that we dont deal with the salt-minion restarting - # and we need these setup prior to the highstate - if [ $is_hypervisor ] || [ $is_managerhype ]; then - salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info - salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "enp1s0"}}' - fi + # these states need to run before the first highstate so that we dont deal with the salt-minion restarting + # and we need these setup prior to the highstate + info "Check if hypervisor or managerhype" + if [ $is_hypervisor ] || [ $is_managerhype ]; then + info "Running libvirt states for hypervisor" + logCmd "salt-call state.apply libvirt.64962 --local --file-root=../salt/ -l info" + info "Setting up bridge for $MNIC" + salt-call state.apply libvirt.bridge --local --file-root=../salt/ -l info pillar="{\"host\": {\"mainint\": \"$MNIC\"}}" + fi } install_cleanup() {