Compare commits

..

1 Commits

Author SHA1 Message Date
bryant-treacle e604ad5969 Update so-nsm-clear 2026-04-17 09:54:33 -04:00
61 changed files with 632 additions and 1372 deletions
-1
View File
@@ -10,7 +10,6 @@ body:
options: options:
- -
- 3.0.0 - 3.0.0
- 3.1.0
- Other (please provide detail below) - Other (please provide detail below)
validations: validations:
required: true required: true
+1 -1
View File
@@ -1 +1 @@
3.1.0 3.0.0
+2
View File
@@ -0,0 +1,2 @@
elasticsearch:
index_settings:
+3
View File
@@ -97,6 +97,7 @@ base:
- node_data.ips - node_data.ips
- secrets - secrets
- healthcheck.eval - healthcheck.eval
- elasticsearch.index_templates
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
- elasticsearch.auth - elasticsearch.auth
{% endif %} {% endif %}
@@ -141,6 +142,7 @@ base:
- logstash.nodes - logstash.nodes
- logstash.soc_logstash - logstash.soc_logstash
- logstash.adv_logstash - logstash.adv_logstash
- elasticsearch.index_templates
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
- elasticsearch.auth - elasticsearch.auth
{% endif %} {% endif %}
@@ -254,6 +256,7 @@ base:
'*_import': '*_import':
- node_data.ips - node_data.ips
- secrets - secrets
- elasticsearch.index_templates
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
- elasticsearch.auth - elasticsearch.auth
{% endif %} {% endif %}
-2
View File
@@ -33,8 +33,6 @@
'kratos', 'kratos',
'hydra', 'hydra',
'elasticfleet', 'elasticfleet',
'elasticfleet.manager',
'elasticsearch.cluster',
'elastic-fleet-package-registry', 'elastic-fleet-package-registry',
'utility' 'utility'
] %} ] %}
+2 -8
View File
@@ -186,14 +186,8 @@ update_docker_containers() {
if [ -z "$HOSTNAME" ]; then if [ -z "$HOSTNAME" ]; then
HOSTNAME=$(hostname) HOSTNAME=$(hostname)
fi fi
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 || { docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
echo "Unable to tag $image" >> "$LOG_FILE" 2>&1 docker push $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
exit 1
}
docker push $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 || {
echo "Unable to push $image" >> "$LOG_FILE" 2>&1
exit 1
}
fi fi
else else
echo "There is a problem downloading the $image image. Details: " >> "$LOG_FILE" 2>&1 echo "There is a problem downloading the $image image. Details: " >> "$LOG_FILE" 2>&1
+13 -2
View File
@@ -66,11 +66,22 @@ delete_zeek() {
ZEEK_LOG="/nsm/zeek/logs/" ZEEK_LOG="/nsm/zeek/logs/"
[ -d $ZEEK_LOG ] && so-zeek-stop && rm -rf $ZEEK_LOG/* && so-zeek-start [ -d $ZEEK_LOG ] && so-zeek-stop && rm -rf $ZEEK_LOG/* && so-zeek-start
} }
delete_import() {
IMPORT_DATA="/nsm/import/"
[ -d $IMPORT_DATA ] && rm -rf $IMPORT_DATA/*
}
delete_strelka() {
STRELKA_HISTORY_DATA="/nsm/strelka/history/"
STRELKA_PROCESSED_DATA="/nsm/strelka/processed/"
[ -d $STRELKA_HISTORY_DATA ] && rm -rf $STRELKA_HISTORY_DATA/*
[ -d $STRELKA_PROCESSED_DATA ] && rm -rf $STRELKA_PROCESSED_DATA/*
}
so-suricata-stop so-suricata-stop
delete_pcap delete_pcap
delete_suricata delete_suricata
delete_zeek delete_zeek
so-suricata-start so-suricata-start
delete_import
delete_strelka
+2 -9
View File
@@ -9,7 +9,7 @@
. /usr/sbin/so-common . /usr/sbin/so-common
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02" "HVGUEST") software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000") hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
{%- if salt['grains.get']('sosmodel', '') %} {%- if salt['grains.get']('sosmodel', '') %}
@@ -87,11 +87,6 @@ check_boss_raid() {
} }
check_software_raid() { check_software_raid() {
if [[ ! -f /proc/mdstat ]]; then
SWRAID=0
return
fi
SWRC=$(grep "_" /proc/mdstat) SWRC=$(grep "_" /proc/mdstat)
if [[ -n $SWRC ]]; then if [[ -n $SWRC ]]; then
# RAID is failed in some way # RAID is failed in some way
@@ -112,9 +107,7 @@ if [[ "$is_hwraid" == "true" ]]; then
fi fi
if [[ "$is_softwareraid" == "true" ]]; then if [[ "$is_softwareraid" == "true" ]]; then
check_software_raid check_software_raid
if [ "$model" != "HVGUEST" ]; then check_boss_raid
check_boss_raid
fi
fi fi
sum=$(($SWRAID + $BOSSRAID + $HWRAID)) sum=$(($SWRAID + $BOSSRAID + $HWRAID))
@@ -1,123 +0,0 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
this file except in compliance with the Elastic License 2.0. #}
{% import_json '/opt/so/state/esfleet_content_package_components.json' as ADDON_CONTENT_PACKAGE_COMPONENTS %}
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
{% set ADDON_CONTENT_INTEGRATION_DEFAULTS = {} %}
{% set DEBUG_STUFF = {} %}
{% for pkg in ADDON_CONTENT_PACKAGE_COMPONENTS %}
{% if pkg.name in CORE_ESFLEET_PACKAGES %}
{# skip core content packages #}
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
{# generate defaults for each content package #}
{% if pkg.dataStreams is defined and pkg.dataStreams is not none and pkg.dataStreams | length > 0%}
{% for pattern in pkg.dataStreams %}
{# in ES 9.3.2 'input' type integrations no longer create default component templates and instead they wait for user input during 'integration' setup (fleet ui config)
title: generic is an artifact of that and is not in use #}
{% if pattern.title == "generic" %}
{% continue %}
{% endif %}
{% if "metrics-" in pattern.name %}
{% set integration_type = "metrics-" %}
{% elif "logs-" in pattern.name %}
{% set integration_type = "logs-" %}
{% else %}
{% set integration_type = "" %}
{% endif %}
{# on content integrations the component name is user defined at the time it is added to an agent policy #}
{% set component_name = pattern.title %}
{% set index_pattern = pattern.name %}
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
{% set component_name_x = component_name.replace(".","_x_") %}
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
{% set integration_key = "so-" ~ integration_type ~ pkg.name + '_x_' ~ component_name_x %}
{# Default integration settings #}
{% set integration_defaults = {
"index_sorting": false,
"index_template": {
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
"data_stream": {
"allow_custom_routing": false,
"hidden": false
},
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
"index_patterns": [index_pattern],
"priority": 501,
"template": {
"settings": {
"index": {
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
"number_of_replicas": 0
}
}
}
},
"policy": {
"phases": {
"cold": {
"actions": {
"allocate":{
"number_of_replicas": ""
},
"set_priority": {"priority": 0}
},
"min_age": "60d"
},
"delete": {
"actions": {
"delete": {}
},
"min_age": "365d"
},
"hot": {
"actions": {
"rollover": {
"max_age": "30d",
"max_primary_shard_size": "50gb"
},
"forcemerge":{
"max_num_segments": ""
},
"shrink":{
"max_primary_shard_size": "",
"method": "COUNT",
"number_of_shards": ""
},
"set_priority": {"priority": 100}
},
"min_age": "0ms"
},
"warm": {
"actions": {
"allocate": {
"number_of_replicas": ""
},
"forcemerge": {
"max_num_segments": ""
},
"shrink":{
"max_primary_shard_size": "",
"method": "COUNT",
"number_of_shards": ""
},
"set_priority": {"priority": 50}
},
"min_age": "30d"
}
}
}
} %}
{% do ADDON_CONTENT_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
{% endfor %}
{% else %}
{% endif %}
{% endif %}
{% endfor %}
-1
View File
@@ -1,6 +1,5 @@
elasticfleet: elasticfleet:
enabled: False enabled: False
patch_version: 9.3.3+build202604082258 # Elastic Agent specific patch release.
enable_manager_output: True enable_manager_output: True
config: config:
server: server:
+103 -4
View File
@@ -17,17 +17,65 @@ include:
- logstash.ssl - logstash.ssl
- elasticfleet.config - elasticfleet.config
- elasticfleet.sostatus - elasticfleet.sostatus
{%- if GLOBALS.role != "so-fleet" %}
- elasticfleet.manager
{%- endif %}
{% if GLOBALS.role != "so-fleet" %} {% if grains.role not in ['so-fleet'] %}
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready # Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
wait_for_elasticsearch_elasticfleet: wait_for_elasticsearch_elasticfleet:
cmd.run: cmd.run:
- name: so-elasticsearch-wait - name: so-elasticsearch-wait
{% endif %}
# If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
so-elastic-fleet-auto-configure-logstash-outputs:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update
- retry:
attempts: 4
interval: 30
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
so-elastic-fleet-auto-configure-logstash-outputs-force:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
- retry:
attempts: 4
interval: 30
- onchanges:
- x509: etc_elasticfleet_logstash_crt
- x509: elasticfleet_kafka_crt
{% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-fleet'] %}
so-elastic-fleet-auto-configure-server-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-urls-update
- retry:
attempts: 4
interval: 30
{% endif %}
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
{% if grains.role not in ['so-fleet'] %}
so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update
- retry:
attempts: 4
interval: 30
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry:
attempts: 4
interval: 30
{% endif %}
# Sync Elastic Agent artifacts to Fleet Node # Sync Elastic Agent artifacts to Fleet Node
{% if grains.role in ['so-fleet'] %}
elasticagent_syncartifacts: elasticagent_syncartifacts:
file.recurse: file.recurse:
- name: /nsm/elastic-fleet/artifacts/beats - name: /nsm/elastic-fleet/artifacts/beats
@@ -101,6 +149,57 @@ so-elastic-fleet:
- x509: etc_elasticfleet_crt - x509: etc_elasticfleet_crt
{% endif %} {% endif %}
{% if GLOBALS.role != "so-fleet" %}
so-elastic-fleet-package-statefile:
file.managed:
- name: /opt/so/state/elastic_fleet_packages.txt
- contents: {{ELASTICFLEETMERGED.packages}}
so-elastic-fleet-package-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-package-upgrade
- retry:
attempts: 3
interval: 10
- onchanges:
- file: /opt/so/state/elastic_fleet_packages.txt
so-elastic-fleet-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
- retry:
attempts: 3
interval: 10
so-elastic-agent-grid-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-agent-grid-upgrade
- retry:
attempts: 12
interval: 5
so-elastic-fleet-integration-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
- retry:
attempts: 3
interval: 10
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
so-elastic-fleet-addon-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
so-elastic-defend-manage-filters-file-watch:
cmd.run:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- onchanges:
- file: elasticdefendcustom
- file: elasticdefenddisabled
{% endif %}
{% endif %}
delete_so-elastic-fleet_so-status.disabled: delete_so-elastic-fleet_so-status.disabled:
file.uncomment: file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
@@ -9,22 +9,16 @@
"namespace": "so", "namespace": "so",
"description": "Zeek Import logs", "description": "Zeek Import logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/zeek/logs/*.log" "/nsm/import/*/zeek/logs/*.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "", "pipeline": "",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -40,8 +34,7 @@
"fingerprint_length": "64", "fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -15,25 +15,19 @@
"version": "" "version": ""
}, },
"name": "kratos-logs", "name": "kratos-logs",
"namespace": "so",
"description": "Kratos logs", "description": "Kratos logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/kratos/kratos.log" "/opt/so/log/kratos/kratos.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "kratos", "data_stream.dataset": "kratos",
"pipeline": "kratos", "pipeline": "kratos",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -54,10 +48,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -9,22 +9,16 @@
"namespace": "so", "namespace": "so",
"description": "Zeek logs", "description": "Zeek logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/zeek/logs/current/*.log" "/nsm/zeek/logs/current/*.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "zeek", "data_stream.dataset": "zeek",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"], "exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
@@ -36,10 +30,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -5,7 +5,7 @@
"package": { "package": {
"name": "endpoint", "name": "endpoint",
"title": "Elastic Defend", "title": "Elastic Defend",
"version": "9.3.0", "version": "9.0.2",
"requires_root": true "requires_root": true
}, },
"enabled": true, "enabled": true,
@@ -6,23 +6,21 @@
"name": "agent-monitor", "name": "agent-monitor",
"namespace": "", "namespace": "",
"description": "", "description": "",
"policy_id": "so-grid-nodes_general",
"policy_ids": [ "policy_ids": [
"so-grid-nodes_general" "so-grid-nodes_general"
], ],
"output_id": null,
"vars": {}, "vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/agents/agent-monitor.log" "/opt/so/log/agents/agent-monitor.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "agentmonitor", "data_stream.dataset": "agentmonitor",
"pipeline": "elasticagent.monitor", "pipeline": "elasticagent.monitor",
"parsers": "", "parsers": "",
@@ -36,16 +34,15 @@
"ignore_older": "72h", "ignore_older": "72h",
"clean_inactive": -1, "clean_inactive": -1,
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": true,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"file_identity_native": true, "fingerprint_length": 64,
"file_identity_native": false,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
} }
}, }
"force": true
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "hydra-logs", "name": "hydra-logs",
"namespace": "so",
"description": "Hydra logs", "description": "Hydra logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/hydra/hydra.log" "/opt/so/log/hydra/hydra.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "hydra", "data_stream.dataset": "hydra",
"pipeline": "hydra", "pipeline": "hydra",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -40,10 +34,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "idh-logs", "name": "idh-logs",
"namespace": "so",
"description": "IDH integration", "description": "IDH integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/idh/opencanary.log" "/nsm/idh/opencanary.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "idh", "data_stream.dataset": "idh",
"pipeline": "common", "pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -37,10 +31,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,32 +4,26 @@
"version": "" "version": ""
}, },
"name": "import-evtx-logs", "name": "import-evtx-logs",
"namespace": "so",
"description": "Import Windows EVTX logs", "description": "Import Windows EVTX logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/evtx/*.json" "/nsm/import/*/evtx/*.json"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "import", "data_stream.dataset": "import",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [ "exclude_files": [
"\\.gz$" "\\.gz$"
], ],
"include_files": [], "include_files": [],
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.15.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.8.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.15.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.15.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.8.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import", "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
"tags": [ "tags": [
"import" "import"
], ],
@@ -39,10 +33,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "import-suricata-logs", "name": "import-suricata-logs",
"namespace": "so",
"description": "Import Suricata logs", "description": "Import Suricata logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/import/*/suricata/eve*.json" "/nsm/import/*/suricata/eve*.json"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "import", "data_stream.dataset": "import",
"pipeline": "suricata.common", "pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -38,10 +32,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,18 +4,14 @@
"version": "" "version": ""
}, },
"name": "rita-logs", "name": "rita-logs",
"namespace": "so",
"description": "RITA Logs", "description": "RITA Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
@@ -23,8 +19,6 @@
"/nsm/rita/exploded-dns.csv", "/nsm/rita/exploded-dns.csv",
"/nsm/rita/long-connections.csv" "/nsm/rita/long-connections.csv"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "rita", "data_stream.dataset": "rita",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [ "exclude_files": [
@@ -39,10 +33,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "so-ip-mappings", "name": "so-ip-mappings",
"namespace": "so",
"description": "IP Description mappings", "description": "IP Description mappings",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/custom-mappings/ip-descriptions.csv" "/nsm/custom-mappings/ip-descriptions.csv"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "hostnamemappings", "data_stream.dataset": "hostnamemappings",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
"exclude_files": [ "exclude_files": [
@@ -38,10 +32,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "soc-auth-sync-logs", "name": "soc-auth-sync-logs",
"namespace": "so",
"description": "Security Onion - Elastic Auth Sync - Logs", "description": "Security Onion - Elastic Auth Sync - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sync.log" "/opt/so/log/soc/sync.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -37,10 +31,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,26 +4,20 @@
"version": "" "version": ""
}, },
"name": "soc-detections-logs", "name": "soc-detections-logs",
"namespace": "so",
"description": "Security Onion Console - Detections Logs", "description": "Security Onion Console - Detections Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/detections_runtime-status_sigma.log", "/opt/so/log/soc/detections_runtime-status_sigma.log",
"/opt/so/log/soc/detections_runtime-status_yara.log" "/opt/so/log/soc/detections_runtime-status_yara.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -41,10 +35,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "soc-salt-relay-logs", "name": "soc-salt-relay-logs",
"namespace": "so",
"description": "Security Onion - Salt Relay - Logs", "description": "Security Onion - Salt Relay - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/salt-relay.log" "/opt/so/log/soc/salt-relay.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -39,10 +33,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "soc-sensoroni-logs", "name": "soc-sensoroni-logs",
"namespace": "so",
"description": "Security Onion - Sensoroni - Logs", "description": "Security Onion - Sensoroni - Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/sensoroni/sensoroni.log" "/opt/so/log/sensoroni/sensoroni.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -37,10 +31,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "soc-server-logs", "name": "soc-server-logs",
"namespace": "so",
"description": "Security Onion Console Logs", "description": "Security Onion Console Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/opt/so/log/soc/sensoroni-server.log" "/opt/so/log/soc/sensoroni-server.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "soc", "data_stream.dataset": "soc",
"pipeline": "common", "pipeline": "common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -39,10 +33,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "strelka-logs", "name": "strelka-logs",
"namespace": "so",
"description": "Strelka Logs", "description": "Strelka Logs",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/strelka/log/strelka.log" "/nsm/strelka/log/strelka.log"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "strelka", "data_stream.dataset": "strelka",
"pipeline": "strelka.file", "pipeline": "strelka.file",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -37,10 +31,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
@@ -4,25 +4,19 @@
"version": "" "version": ""
}, },
"name": "suricata-logs", "name": "suricata-logs",
"namespace": "so",
"description": "Suricata integration", "description": "Suricata integration",
"policy_id": "so-grid-nodes_general", "policy_id": "so-grid-nodes_general",
"policy_ids": [ "namespace": "so",
"so-grid-nodes_general"
],
"vars": {},
"inputs": { "inputs": {
"filestream-filestream": { "filestream-filestream": {
"enabled": true, "enabled": true,
"streams": { "streams": {
"filestream.filestream": { "filestream.generic": {
"enabled": true, "enabled": true,
"vars": { "vars": {
"paths": [ "paths": [
"/nsm/suricata/eve*.json" "/nsm/suricata/eve*.json"
], ],
"compression_gzip": false,
"use_logs_stream": false,
"data_stream.dataset": "suricata", "data_stream.dataset": "suricata",
"pipeline": "suricata.common", "pipeline": "suricata.common",
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
@@ -37,10 +31,10 @@
"harvester_limit": 0, "harvester_limit": 0,
"fingerprint": false, "fingerprint": false,
"fingerprint_offset": 0, "fingerprint_offset": 0,
"fingerprint_length": "64",
"file_identity_native": true, "file_identity_native": true,
"exclude_lines": [], "exclude_lines": [],
"include_lines": [], "include_lines": []
"delete_enabled": false
} }
} }
} }
-123
View File
@@ -1,123 +0,0 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
this file except in compliance with the Elastic License 2.0. #}
{% import_json '/opt/so/state/esfleet_input_package_components.json' as ADDON_INPUT_PACKAGE_COMPONENTS %}
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
{% set ADDON_INPUT_INTEGRATION_DEFAULTS = {} %}
{% set DEBUG_STUFF = {} %}
{% for pkg in ADDON_INPUT_PACKAGE_COMPONENTS %}
{% if pkg.name in CORE_ESFLEET_PACKAGES %}
{# skip core input packages #}
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
{# generate defaults for each input package #}
{% if pkg.dataStreams is defined and pkg.dataStreams is not none and pkg.dataStreams | length > 0 %}
{% for pattern in pkg.dataStreams %}
{# in ES 9.3.2 'input' type integrations no longer create default component templates and instead they wait for user input during 'integration' setup (fleet ui config)
title: generic is an artifact of that and is not in use #}
{% if pattern.title == "generic" %}
{% continue %}
{% endif %}
{% if "metrics-" in pattern.name %}
{% set integration_type = "metrics-" %}
{% elif "logs-" in pattern.name %}
{% set integration_type = "logs-" %}
{% else %}
{% set integration_type = "" %}
{% endif %}
{# on input integrations the component name is user defined at the time it is added to an agent policy #}
{% set component_name = pattern.title %}
{% set index_pattern = pattern.name %}
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
{% set component_name_x = component_name.replace(".","_x_") %}
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
{% set integration_key = "so-" ~ integration_type ~ pkg.name + '_x_' ~ component_name_x %}
{# Default integration settings #}
{% set integration_defaults = {
"index_sorting": false,
"index_template": {
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
"data_stream": {
"allow_custom_routing": false,
"hidden": false
},
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
"index_patterns": [index_pattern],
"priority": 501,
"template": {
"settings": {
"index": {
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
"number_of_replicas": 0
}
}
}
},
"policy": {
"phases": {
"cold": {
"actions": {
"allocate":{
"number_of_replicas": ""
},
"set_priority": {"priority": 0}
},
"min_age": "60d"
},
"delete": {
"actions": {
"delete": {}
},
"min_age": "365d"
},
"hot": {
"actions": {
"rollover": {
"max_age": "30d",
"max_primary_shard_size": "50gb"
},
"forcemerge":{
"max_num_segments": ""
},
"shrink":{
"max_primary_shard_size": "",
"method": "COUNT",
"number_of_shards": ""
},
"set_priority": {"priority": 100}
},
"min_age": "0ms"
},
"warm": {
"actions": {
"allocate": {
"number_of_replicas": ""
},
"forcemerge": {
"max_num_segments": ""
},
"shrink":{
"max_primary_shard_size": "",
"method": "COUNT",
"number_of_shards": ""
},
"set_priority": {"priority": 50}
},
"min_age": "30d"
}
}
}
} %}
{% do ADDON_INPUT_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
{% do DEBUG_STUFF.update({integration_key: "Generating defaults for "+ pkg.name })%}
{% endfor %}
{% endif %}
{% endif %}
{% endfor %}
@@ -59,8 +59,8 @@
{# skip core integrations #} {# skip core integrations #}
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %} {% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
{# generate defaults for each integration #} {# generate defaults for each integration #}
{% if pkg.dataStreams is defined and pkg.dataStreams is not none and pkg.dataStreams | length > 0 %} {% if pkg.es_index_patterns is defined and pkg.es_index_patterns is not none %}
{% for pattern in pkg.dataStreams %} {% for pattern in pkg.es_index_patterns %}
{% if "metrics-" in pattern.name %} {% if "metrics-" in pattern.name %}
{% set integration_type = "metrics-" %} {% set integration_type = "metrics-" %}
{% elif "logs-" in pattern.name %} {% elif "logs-" in pattern.name %}
@@ -75,27 +75,44 @@
{% if component_name in WEIRD_INTEGRATIONS %} {% if component_name in WEIRD_INTEGRATIONS %}
{% set component_name = WEIRD_INTEGRATIONS[component_name] %} {% set component_name = WEIRD_INTEGRATIONS[component_name] %}
{% endif %} {% endif %}
{# create duplicate of component_name, so we can split generics from @custom component templates in the index template below and overwrite the default @package when needed
eg. having to replace unifiedlogs.generic@package with filestream.generic@package, but keep the ability to customize unifiedlogs.generic@custom and its ILM policy #}
{% set custom_component_name = component_name %}
{# duplicate integration_type to assist with sometimes needing to overwrite component templates with 'logs-filestream.generic@package' (there is no metrics-filestream.generic@package) #}
{% set generic_integration_type = integration_type %}
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #} {# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
{% set component_name_x = component_name.replace(".","_x_") %} {% set component_name_x = component_name.replace(".","_x_") %}
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #} {# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
{% set integration_key = "so-" ~ integration_type ~ component_name_x %} {% set integration_key = "so-" ~ integration_type ~ component_name_x %}
{# if its a .generic template make sure that a .generic@package for the integration exists. Else default to logs-filestream.generic@package #}
{% if ".generic" in component_name and integration_type ~ component_name ~ "@package" not in INSTALLED_COMPONENT_TEMPLATES %}
{# these generic templates by default are directed to index_pattern of 'logs-generic-*', overwrite that here to point to eg gcp_pubsub.generic-* #}
{% set index_pattern = integration_type ~ component_name ~ "-*" %}
{# includes use of .generic component template, but it doesn't exist in installed component templates. Redirect it to filestream.generic@package #}
{% set component_name = "filestream.generic" %}
{% set generic_integration_type = "logs-" %}
{% endif %}
{# Default integration settings #} {# Default integration settings #}
{% set integration_defaults = { {% set integration_defaults = {
"index_sorting": false, "index_sorting": false,
"index_template": { "index_template": {
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"], "composed_of": [generic_integration_type ~ component_name ~ "@package", integration_type ~ custom_component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
"data_stream": { "data_stream": {
"allow_custom_routing": false, "allow_custom_routing": false,
"hidden": false "hidden": false
}, },
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"], "ignore_missing_component_templates": [integration_type ~ custom_component_name ~ "@custom"],
"index_patterns": [index_pattern], "index_patterns": [index_pattern],
"priority": 501, "priority": 501,
"template": { "template": {
"settings": { "settings": {
"index": { "index": {
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"}, "lifecycle": {"name": "so-" ~ integration_type ~ custom_component_name ~ "-logs"},
"number_of_replicas": 0 "number_of_replicas": 0
} }
} }
-112
View File
@@ -1,112 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
include:
- elasticfleet.config
# If enabled, automatically update Fleet Logstash Outputs
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval'] %}
so-elastic-fleet-auto-configure-logstash-outputs:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update
- retry:
attempts: 4
interval: 30
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
so-elastic-fleet-auto-configure-logstash-outputs-force:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
- retry:
attempts: 4
interval: 30
- onchanges:
- x509: etc_elasticfleet_logstash_crt
- x509: elasticfleet_kafka_crt
{% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection
so-elastic-fleet-auto-configure-server-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-urls-update
- retry:
attempts: 4
interval: 30
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update
- retry:
attempts: 4
interval: 30
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry:
attempts: 4
interval: 30
so-elastic-fleet-package-statefile:
file.managed:
- name: /opt/so/state/elastic_fleet_packages.txt
- contents: {{ELASTICFLEETMERGED.packages}}
so-elastic-fleet-package-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-package-upgrade
- retry:
attempts: 3
interval: 10
- onchanges:
- file: /opt/so/state/elastic_fleet_packages.txt
so-elastic-fleet-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
- retry:
attempts: 3
interval: 10
so-elastic-agent-grid-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-agent-grid-upgrade
- retry:
attempts: 12
interval: 5
so-elastic-fleet-integration-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
- retry:
attempts: 3
interval: 10
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
so-elastic-fleet-addon-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
so-elastic-defend-manage-filters-file-watch:
cmd.run:
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
- onchanges:
- file: elasticdefendcustom
- file: elasticdefenddisabled
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
@@ -135,33 +135,9 @@ elastic_fleet_bulk_package_install() {
fi fi
} }
elastic_fleet_get_package_list_by_type() { elastic_fleet_installed_packages() {
if ! output=$(fleet_api "epm/packages"); then if ! fleet_api "epm/packages/installed?perPage=500"; then
return 1 return 1
else
is_integration=$(jq '[.items[] | select(.type=="integration") | .name ]' <<< "$output")
is_input=$(jq '[.items[] | select(.type=="input") | .name ]' <<< "$output")
is_content=$(jq '[.items[] | select(.type=="content") | .name ]' <<< "$output")
jq -n --argjson is_integration "${is_integration:-[]}" \
--argjson is_input "${is_input:-[]}" \
--argjson is_content "${is_content:-[]}" \
'{"integration": $is_integration,"input": $is_input, "content": $is_content}'
fi
}
elastic_fleet_installed_packages_components() {
package_type=${1,,}
if [[ "$package_type" != "integration" && "$package_type" != "input" && "$package_type" != "content" ]]; then
echo "Error: Invalid package type ${package_type}. Valid types are 'integration', 'input', or 'content'."
return 1
fi
packages_by_type=$(elastic_fleet_get_package_list_by_type)
packages=$(jq --arg package_type "$package_type" '.[$package_type]' <<< "$packages_by_type")
if ! output=$(fleet_api "epm/packages/installed?perPage=500"); then
return 1
else
jq -c --argjson packages "$packages" '[.items[] | select(.name | IN($packages[])) | {name: .name, dataStreams: .dataStreams}]' <<< "$output"
fi fi
} }
@@ -5,13 +5,7 @@
# this file except in compliance with the Elastic License 2.0. # this file except in compliance with the Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
. /usr/sbin/so-elastic-fleet-common
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %} {%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{# Optionally override Elasticsearch version for Elastic Agent patch releases #}
{%- if ELASTICFLEETDEFAULTS.elasticfleet.patch_version is defined %}
{%- do ELASTICSEARCHDEFAULTS.elasticsearch.update({'version': ELASTICFLEETDEFAULTS.elasticfleet.patch_version}) %}
{%- endif %}
# Only run on Managers # Only run on Managers
if ! is_manager_node; then if ! is_manager_node; then
@@ -20,10 +14,13 @@ if ! is_manager_node; then
fi fi
# Get current list of Grid Node Agents that need to be upgraded # Get current list of Grid Node Agents that need to be upgraded
if ! RAW_JSON=$(fleet_api "agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version | urlencode }}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null)
printf "Failed to query for current Grid Agents...\n" # Check to make sure that the server responded with good data - else, bail from script
exit 1 CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
if [ "$CHECKSUM" -ne 1 ]; then
printf "Failed to query for current Grid Agents...\n"
exit 1
fi fi
# Generate list of Node Agents that need updates # Generate list of Node Agents that need updates
@@ -34,12 +31,10 @@ if [ "$OUTDATED_LIST" != '[]' ]; then
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n" printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n"
# Generate updated JSON payload # Generate updated JSON payload
JSON_STRING=$(jq -n --arg ELASTICVERSION "{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}" --argjson UPDATELIST "$OUTDATED_LIST" '{"version": $ELASTICVERSION,"agents": $UPDATELIST }') JSON_STRING=$(jq -n --arg ELASTICVERSION {{ELASTICSEARCHDEFAULTS.elasticsearch.version}} --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
# Update Node Agents # Update Node Agents
if ! fleet_api "agents/bulk_upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "http://localhost:5601/api/fleet/agents/bulk_upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
printf "Failed to initiate Agent upgrades...\n"
fi
else else
printf "No Agents need updates... Exiting\n\n" printf "No Agents need updates... Exiting\n\n"
exit 0 exit 0
@@ -18,9 +18,7 @@ INSTALLED_PACKAGE_LIST=/tmp/esfleet_installed_packages.json
BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
INTEGRATION_PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
INPUT_PACKAGE_COMPONENTS=/opt/so/state/esfleet_input_package_components.json
CONTENT_PACKAGE_COMPONENTS=/opt/so/state/esfleet_content_package_components.json
COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json
PENDING_UPDATE=false PENDING_UPDATE=false
@@ -181,13 +179,10 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
else else
echo "Elastic integrations don't appear to need installation/updating..." echo "Elastic integrations don't appear to need installation/updating..."
fi fi
# Write out file for generating index/component/ilm templates, keeping each package type separate # Write out file for generating index/component/ilm templates
for package_type in "INTEGRATION" "INPUT" "CONTENT"; do if latest_installed_package_list=$(elastic_fleet_installed_packages); then
if latest_installed_package_list=$(elastic_fleet_installed_packages_components "$package_type"); then echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
outfile="${package_type}_PACKAGE_COMPONENTS" fi
echo $latest_installed_package_list > "${!outfile}"
fi
done
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
# Refresh installed component template list # Refresh installed component template list
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.') latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
-164
View File
@@ -1,164 +0,0 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS, SO_MANAGED_INDICES %}
{% if GLOBALS.role != 'so-heavynode' %}
{% from 'elasticsearch/template.map.jinja' import ALL_ADDON_SETTINGS %}
{% endif %}
escomponenttemplates:
file.recurse:
- name: /opt/so/conf/elasticsearch/templates/component
- source: salt://elasticsearch/templates/component
- user: 930
- group: 939
- clean: True
- onchanges_in:
- file: so-elasticsearch-templates-reload
- show_changes: False
# Clean up legacy and non-SO managed templates from the elasticsearch/templates/index/ directory
so_index_template_dir:
file.directory:
- name: /opt/so/conf/elasticsearch/templates/index
- clean: True
{%- if SO_MANAGED_INDICES %}
- require:
{%- for index in SO_MANAGED_INDICES %}
- file: so_index_template_{{index}}
{%- endfor %}
{%- endif %}
# Auto-generate index templates for SO managed indices (directly defined in elasticsearch/defaults.yaml)
# These index templates are for the core SO datasets and are always required
{% for index, settings in ES_INDEX_SETTINGS.items() %}
{% if settings.index_template is defined %}
so_index_template_{{index}}:
file.managed:
- name: /opt/so/conf/elasticsearch/templates/index/{{ index }}-template.json
- source: salt://elasticsearch/base-template.json.jinja
- defaults:
TEMPLATE_CONFIG: {{ settings.index_template }}
- template: jinja
- onchanges_in:
- file: so-elasticsearch-templates-reload
{% endif %}
{% endfor %}
{% if GLOBALS.role != "so-heavynode" %}
# Auto-generate optional index templates for integration | input | content packages
# These index templates are not used by default (until user adds package to an agent policy).
# Pre-configured with standard defaults, and incorporated into SOC configuration for user customization.
{% for index,settings in ALL_ADDON_SETTINGS.items() %}
{% if settings.index_template is defined %}
addon_index_template_{{index}}:
file.managed:
- name: /opt/so/conf/elasticsearch/templates/addon-index/{{ index }}-template.json
- source: salt://elasticsearch/base-template.json.jinja
- defaults:
TEMPLATE_CONFIG: {{ settings.index_template }}
- template: jinja
- show_changes: False
- onchanges_in:
- file: addon-elasticsearch-templates-reload
{% endif %}
{% endfor %}
{% endif %}
{% if GLOBALS.role in GLOBALS.manager_roles %}
so-es-cluster-settings:
cmd.run:
- name: /usr/sbin/so-elasticsearch-cluster-settings
- cwd: /opt/so
- template: jinja
- require:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
- http: wait_for_so-elasticsearch
{% endif %}
# heavynodes will only load ILM policies for SO managed indices. (Indicies defined in elasticsearch/defaults.yaml)
so-elasticsearch-ilm-policy-load:
cmd.run:
- name: /usr/sbin/so-elasticsearch-ilm-policy-load
- cwd: /opt/so
- require:
- docker_container: so-elasticsearch
- file: so-elasticsearch-ilm-policy-load-script
- onchanges:
- file: so-elasticsearch-ilm-policy-load-script
so-elasticsearch-templates-reload:
file.absent:
- name: /opt/so/state/estemplates.txt
addon-elasticsearch-templates-reload:
file.absent:
- name: /opt/so/state/addon_estemplates.txt
# so-elasticsearch-templates-load will have its first successful run during the 'so-elastic-fleet-setup' script
so-elasticsearch-templates:
cmd.run:
{%- if GLOBALS.role == "so-heavynode" %}
- name: /usr/sbin/so-elasticsearch-templates-load --heavynode
{%- else %}
- name: /usr/sbin/so-elasticsearch-templates-load
{%- endif %}
- cwd: /opt/so
- template: jinja
- require:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
so-elasticsearch-pipelines:
cmd.run:
- name: /usr/sbin/so-elasticsearch-pipelines {{ GLOBALS.hostname }}
- require:
- docker_container: so-elasticsearch
- file: so-elasticsearch-pipelines-script
so-elasticsearch-roles-load:
cmd.run:
- name: /usr/sbin/so-elasticsearch-roles-load
- cwd: /opt/so
- template: jinja
- require:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
{% set ap = "absent" %}
{% endif %}
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
{% if ELASTICSEARCHMERGED.index_clean %}
{% set ap = "present" %}
{% else %}
{% set ap = "absent" %}
{% endif %}
{% endif %}
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
so-elasticsearch-indices-delete:
cron.{{ap}}:
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
- identifier: so-elasticsearch-indices-delete
- user: root
- minute: '*/5'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
-9
View File
@@ -66,8 +66,6 @@ so-elasticsearch-ilm-policy-load-script:
- group: 939 - group: 939
- mode: 754 - mode: 754
- template: jinja - template: jinja
- defaults:
GLOBALS: {{ GLOBALS }}
- show_changes: False - show_changes: False
so-elasticsearch-pipelines-script: so-elasticsearch-pipelines-script:
@@ -93,13 +91,6 @@ estemplatedir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
esaddontemplatedir:
file.directory:
- name: /opt/so/conf/elasticsearch/templates/addon-index
- user: 930
- group: 939
- makedirs: True
esrolesdir: esrolesdir:
file.directory: file.directory:
- name: /opt/so/conf/elasticsearch/roles - name: /opt/so/conf/elasticsearch/roles
+1 -1
View File
@@ -1,6 +1,6 @@
elasticsearch: elasticsearch:
enabled: false enabled: false
version: 9.3.3 version: 9.0.8
index_clean: true index_clean: true
vm: vm:
max_map_count: 1048576 max_map_count: 1048576
+125 -16
View File
@@ -10,6 +10,8 @@
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %}
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %} {% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
include: include:
- ca - ca
@@ -17,9 +19,6 @@ include:
- elasticsearch.ssl - elasticsearch.ssl
- elasticsearch.config - elasticsearch.config
- elasticsearch.sostatus - elasticsearch.sostatus
{%- if GLOBALS.role != "so-searchode" %}
- elasticsearch.cluster
{%- endif%}
so-elasticsearch: so-elasticsearch:
docker_container.running: docker_container.running:
@@ -102,24 +101,134 @@ so-elasticsearch:
- cmd: auth_users_roles_inode - cmd: auth_users_roles_inode
- cmd: auth_users_inode - cmd: auth_users_inode
wait_for_so-elasticsearch:
http.wait_for_successful_query:
- name: "https://localhost:9200/"
- username: 'so_elastic'
- password: '{{ ELASTICSEARCHMERGED.auth.users.so_elastic_user.pass }}'
- ssl: True
- verify_ssl: False
- status: 200
- wait_for: 300
- request_interval: 15
- require:
- docker_container: so-elasticsearch
delete_so-elasticsearch_so-status.disabled: delete_so-elasticsearch_so-status.disabled:
file.uncomment: file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-elasticsearch$ - regex: ^so-elasticsearch$
{% if GLOBALS.role != "so-searchnode" %}
escomponenttemplates:
file.recurse:
- name: /opt/so/conf/elasticsearch/templates/component
- source: salt://elasticsearch/templates/component
- user: 930
- group: 939
- clean: True
- onchanges_in:
- file: so-elasticsearch-templates-reload
- show_changes: False
# Auto-generate templates from defaults file
{% for index, settings in ES_INDEX_SETTINGS.items() %}
{% if settings.index_template is defined %}
es_index_template_{{index}}:
file.managed:
- name: /opt/so/conf/elasticsearch/templates/index/{{ index }}-template.json
- source: salt://elasticsearch/base-template.json.jinja
- defaults:
TEMPLATE_CONFIG: {{ settings.index_template }}
- template: jinja
- show_changes: False
- onchanges_in:
- file: so-elasticsearch-templates-reload
{% endif %}
{% endfor %}
{% if TEMPLATES %}
# Sync custom templates to /opt/so/conf/elasticsearch/templates
{% for TEMPLATE in TEMPLATES %}
es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}:
file.managed:
- source: salt://elasticsearch/templates/index/{{TEMPLATE}}
{% if 'jinja' in TEMPLATE.split('.')[-1] %}
- name: /opt/so/conf/elasticsearch/templates/index/{{TEMPLATE.split('/')[1] | replace(".jinja", "")}}
- template: jinja
{% else %}
- name: /opt/so/conf/elasticsearch/templates/index/{{TEMPLATE.split('/')[1]}}
{% endif %}
- user: 930
- group: 939
- show_changes: False
- onchanges_in:
- file: so-elasticsearch-templates-reload
{% endfor %}
{% endif %}
{% if GLOBALS.role in GLOBALS.manager_roles %}
so-es-cluster-settings:
cmd.run:
- name: /usr/sbin/so-elasticsearch-cluster-settings
- cwd: /opt/so
- template: jinja
- require:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
{% endif %}
so-elasticsearch-ilm-policy-load:
cmd.run:
- name: /usr/sbin/so-elasticsearch-ilm-policy-load
- cwd: /opt/so
- require:
- docker_container: so-elasticsearch
- file: so-elasticsearch-ilm-policy-load-script
- onchanges:
- file: so-elasticsearch-ilm-policy-load-script
so-elasticsearch-templates-reload:
file.absent:
- name: /opt/so/state/estemplates.txt
so-elasticsearch-templates:
cmd.run:
- name: /usr/sbin/so-elasticsearch-templates-load
- cwd: /opt/so
- template: jinja
- require:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
so-elasticsearch-pipelines:
cmd.run:
- name: /usr/sbin/so-elasticsearch-pipelines {{ GLOBALS.hostname }}
- require:
- docker_container: so-elasticsearch
- file: so-elasticsearch-pipelines-script
so-elasticsearch-roles-load:
cmd.run:
- name: /usr/sbin/so-elasticsearch-roles-load
- cwd: /opt/so
- template: jinja
- require:
- docker_container: so-elasticsearch
- file: elasticsearch_sbin_jinja
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
{% set ap = "absent" %}
{% endif %}
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
{% if ELASTICSEARCHMERGED.index_clean %}
{% set ap = "present" %}
{% else %}
{% set ap = "absent" %}
{% endif %}
{% endif %}
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
so-elasticsearch-indices-delete:
cron.{{ap}}:
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
- identifier: so-elasticsearch-indices-delete
- user: root
- minute: '*/5'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% endif %}
{% endif %}
{% else %} {% else %}
{{sls}}_state_not_allowed: {{sls}}_state_not_allowed:
@@ -10,28 +10,24 @@
"processors": [ "processors": [
{ {
"set": { "set": {
"tag": "set_ecs_version_f5923549",
"field": "ecs.version", "field": "ecs.version",
"value": "8.17.0" "value": "8.17.0"
} }
}, },
{ {
"set": { "set": {
"tag": "set_observer_vendor_ad9d35cc",
"field": "observer.vendor", "field": "observer.vendor",
"value": "netgate" "value": "netgate"
} }
}, },
{ {
"set": { "set": {
"tag": "set_observer_type_5dddf3ba",
"field": "observer.type", "field": "observer.type",
"value": "firewall" "value": "firewall"
} }
}, },
{ {
"rename": { "rename": {
"tag": "rename_message_to_event_original_56a77271",
"field": "message", "field": "message",
"target_field": "event.original", "target_field": "event.original",
"ignore_missing": true, "ignore_missing": true,
@@ -40,14 +36,12 @@
}, },
{ {
"set": { "set": {
"tag": "set_event_kind_de80643c",
"field": "event.kind", "field": "event.kind",
"value": "event" "value": "event"
} }
}, },
{ {
"set": { "set": {
"tag": "set_event_timezone_4ca44cac",
"field": "event.timezone", "field": "event.timezone",
"value": "{{{_tmp.tz_offset}}}", "value": "{{{_tmp.tz_offset}}}",
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'" "if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
@@ -55,7 +49,6 @@
}, },
{ {
"grok": { "grok": {
"tag": "grok_event_original_27d9c8c7",
"description": "Parse syslog header", "description": "Parse syslog header",
"field": "event.original", "field": "event.original",
"patterns": [ "patterns": [
@@ -79,7 +72,6 @@
}, },
{ {
"date": { "date": {
"tag": "date__tmp_timestamp8601_to_timestamp_6ac9d3ce",
"if": "ctx._tmp.timestamp8601 != null", "if": "ctx._tmp.timestamp8601 != null",
"field": "_tmp.timestamp8601", "field": "_tmp.timestamp8601",
"target_field": "@timestamp", "target_field": "@timestamp",
@@ -90,7 +82,6 @@
}, },
{ {
"date": { "date": {
"tag": "date__tmp_timestamp_to_timestamp_f21e536e",
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null", "if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
"field": "_tmp.timestamp", "field": "_tmp.timestamp",
"target_field": "@timestamp", "target_field": "@timestamp",
@@ -104,7 +95,6 @@
}, },
{ {
"grok": { "grok": {
"tag": "grok_process_name_cef3d489",
"description": "Set Event Provider", "description": "Set Event Provider",
"field": "process.name", "field": "process.name",
"patterns": [ "patterns": [
@@ -117,83 +107,71 @@
}, },
{ {
"pipeline": { "pipeline": {
"tag": "pipeline_e16851a7", "name": "logs-pfsense.log-1.23.1-firewall",
"name": "logs-pfsense.log-1.25.2-firewall",
"if": "ctx.event.provider == 'filterlog'" "if": "ctx.event.provider == 'filterlog'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"tag": "pipeline_828590b5", "name": "logs-pfsense.log-1.23.1-openvpn",
"name": "logs-pfsense.log-1.25.2-openvpn",
"if": "ctx.event.provider == 'openvpn'" "if": "ctx.event.provider == 'openvpn'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"tag": "pipeline_9d37039c", "name": "logs-pfsense.log-1.23.1-ipsec",
"name": "logs-pfsense.log-1.25.2-ipsec",
"if": "ctx.event.provider == 'charon'" "if": "ctx.event.provider == 'charon'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"tag": "pipeline_ad56bbca", "name": "logs-pfsense.log-1.23.1-dhcp",
"name": "logs-pfsense.log-1.25.2-dhcp", "if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\", \"dnsmasq-dhcp\"].contains(ctx.event.provider)"
} }
}, },
{ {
"pipeline": { "pipeline": {
"tag": "pipeline_dd85553d", "name": "logs-pfsense.log-1.23.1-unbound",
"name": "logs-pfsense.log-1.25.2-unbound",
"if": "ctx.event.provider == 'unbound'" "if": "ctx.event.provider == 'unbound'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"tag": "pipeline_720ed255", "name": "logs-pfsense.log-1.23.1-haproxy",
"name": "logs-pfsense.log-1.25.2-haproxy",
"if": "ctx.event.provider == 'haproxy'" "if": "ctx.event.provider == 'haproxy'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"tag": "pipeline_456beba5", "name": "logs-pfsense.log-1.23.1-php-fpm",
"name": "logs-pfsense.log-1.25.2-php-fpm",
"if": "ctx.event.provider == 'php-fpm'" "if": "ctx.event.provider == 'php-fpm'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"tag": "pipeline_a0d89375", "name": "logs-pfsense.log-1.23.1-squid",
"name": "logs-pfsense.log-1.25.2-squid",
"if": "ctx.event.provider == 'squid'" "if": "ctx.event.provider == 'squid'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"tag": "pipeline_c2f1ed55", "name": "logs-pfsense.log-1.23.1-snort",
"name": "logs-pfsense.log-1.25.2-snort",
"if": "ctx.event.provider == 'snort'" "if": "ctx.event.provider == 'snort'"
} }
}, },
{ {
"pipeline": { "pipeline": {
"tag":"pipeline_33db1c9e", "name": "logs-pfsense.log-1.23.1-suricata",
"name": "logs-pfsense.log-1.25.2-suricata",
"if": "ctx.event.provider == 'suricata'" "if": "ctx.event.provider == 'suricata'"
} }
}, },
{ {
"drop": { "drop": {
"tag": "drop_9d7c46f8", "if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"snort\", \"suricata\"].contains(ctx.event?.provider)"
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dnsmasq-dhcp\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"snort\", \"suricata\"].contains(ctx.event?.provider)"
} }
}, },
{ {
"append": { "append": {
"tag": "append_event_category_4780a983",
"field": "event.category", "field": "event.category",
"value": "network", "value": "network",
"if": "ctx.network != null" "if": "ctx.network != null"
@@ -201,7 +179,6 @@
}, },
{ {
"convert": { "convert": {
"tag": "convert_source_address_to_source_ip_f5632a20",
"field": "source.address", "field": "source.address",
"target_field": "source.ip", "target_field": "source.ip",
"type": "ip", "type": "ip",
@@ -211,7 +188,6 @@
}, },
{ {
"convert": { "convert": {
"tag": "convert_destination_address_to_destination_ip_f1388f0c",
"field": "destination.address", "field": "destination.address",
"target_field": "destination.ip", "target_field": "destination.ip",
"type": "ip", "type": "ip",
@@ -221,7 +197,6 @@
}, },
{ {
"set": { "set": {
"tag": "set_network_type_1f1d940a",
"field": "network.type", "field": "network.type",
"value": "ipv6", "value": "ipv6",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")" "if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
@@ -229,7 +204,6 @@
}, },
{ {
"set": { "set": {
"tag": "set_network_type_69deca38",
"field": "network.type", "field": "network.type",
"value": "ipv4", "value": "ipv4",
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")" "if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
@@ -237,7 +211,6 @@
}, },
{ {
"geoip": { "geoip": {
"tag": "geoip_source_ip_to_source_geo_da2e41b2",
"field": "source.ip", "field": "source.ip",
"target_field": "source.geo", "target_field": "source.geo",
"ignore_missing": true "ignore_missing": true
@@ -245,7 +218,6 @@
}, },
{ {
"geoip": { "geoip": {
"tag": "geoip_destination_ip_to_destination_geo_ab5e2968",
"field": "destination.ip", "field": "destination.ip",
"target_field": "destination.geo", "target_field": "destination.geo",
"ignore_missing": true "ignore_missing": true
@@ -253,7 +225,6 @@
}, },
{ {
"geoip": { "geoip": {
"tag": "geoip_source_ip_to_source_as_28d69883",
"ignore_missing": true, "ignore_missing": true,
"database_file": "GeoLite2-ASN.mmdb", "database_file": "GeoLite2-ASN.mmdb",
"field": "source.ip", "field": "source.ip",
@@ -266,7 +237,6 @@
}, },
{ {
"geoip": { "geoip": {
"tag": "geoip_destination_ip_to_destination_as_8a007787",
"database_file": "GeoLite2-ASN.mmdb", "database_file": "GeoLite2-ASN.mmdb",
"field": "destination.ip", "field": "destination.ip",
"target_field": "destination.as", "target_field": "destination.as",
@@ -279,7 +249,6 @@
}, },
{ {
"rename": { "rename": {
"tag": "rename_source_as_asn_to_source_as_number_a917047d",
"field": "source.as.asn", "field": "source.as.asn",
"target_field": "source.as.number", "target_field": "source.as.number",
"ignore_missing": true "ignore_missing": true
@@ -287,7 +256,6 @@
}, },
{ {
"rename": { "rename": {
"tag": "rename_source_as_organization_name_to_source_as_organization_name_f1362d0b",
"field": "source.as.organization_name", "field": "source.as.organization_name",
"target_field": "source.as.organization.name", "target_field": "source.as.organization.name",
"ignore_missing": true "ignore_missing": true
@@ -295,7 +263,6 @@
}, },
{ {
"rename": { "rename": {
"tag": "rename_destination_as_asn_to_destination_as_number_3b459fcd",
"field": "destination.as.asn", "field": "destination.as.asn",
"target_field": "destination.as.number", "target_field": "destination.as.number",
"ignore_missing": true "ignore_missing": true
@@ -303,7 +270,6 @@
}, },
{ {
"rename": { "rename": {
"tag": "rename_destination_as_organization_name_to_destination_as_organization_name_814bd459",
"field": "destination.as.organization_name", "field": "destination.as.organization_name",
"target_field": "destination.as.organization.name", "target_field": "destination.as.organization.name",
"ignore_missing": true "ignore_missing": true
@@ -311,14 +277,12 @@
}, },
{ {
"community_id": { "community_id": {
"tag": "community_id_d2308e7a",
"target_field": "network.community_id", "target_field": "network.community_id",
"ignore_failure": true "ignore_failure": true
} }
}, },
{ {
"grok": { "grok": {
"tag": "grok_observer_ingress_interface_name_968018d3",
"field": "observer.ingress.interface.name", "field": "observer.ingress.interface.name",
"patterns": [ "patterns": [
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}" "%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
@@ -329,7 +293,6 @@
}, },
{ {
"set": { "set": {
"tag": "set_network_vlan_id_efd4d96a",
"field": "network.vlan.id", "field": "network.vlan.id",
"copy_from": "observer.ingress.vlan.id", "copy_from": "observer.ingress.vlan.id",
"ignore_empty_value": true "ignore_empty_value": true
@@ -337,7 +300,6 @@
}, },
{ {
"append": { "append": {
"tag": "append_related_ip_c1a6356b",
"field": "related.ip", "field": "related.ip",
"value": "{{{destination.ip}}}", "value": "{{{destination.ip}}}",
"allow_duplicates": false, "allow_duplicates": false,
@@ -346,7 +308,6 @@
}, },
{ {
"append": { "append": {
"tag": "append_related_ip_8121c591",
"field": "related.ip", "field": "related.ip",
"value": "{{{source.ip}}}", "value": "{{{source.ip}}}",
"allow_duplicates": false, "allow_duplicates": false,
@@ -355,7 +316,6 @@
}, },
{ {
"append": { "append": {
"tag": "append_related_ip_53b62ed8",
"field": "related.ip", "field": "related.ip",
"value": "{{{source.nat.ip}}}", "value": "{{{source.nat.ip}}}",
"allow_duplicates": false, "allow_duplicates": false,
@@ -364,7 +324,6 @@
}, },
{ {
"append": { "append": {
"tag": "append_related_hosts_6f162628",
"field": "related.hosts", "field": "related.hosts",
"value": "{{{destination.domain}}}", "value": "{{{destination.domain}}}",
"if": "ctx.destination?.domain != null" "if": "ctx.destination?.domain != null"
@@ -372,7 +331,6 @@
}, },
{ {
"append": { "append": {
"tag": "append_related_user_c036eec2",
"field": "related.user", "field": "related.user",
"value": "{{{user.name}}}", "value": "{{{user.name}}}",
"if": "ctx.user?.name != null" "if": "ctx.user?.name != null"
@@ -380,7 +338,6 @@
}, },
{ {
"set": { "set": {
"tag": "set_network_direction_cb1e3125",
"field": "network.direction", "field": "network.direction",
"value": "{{{network.direction}}}bound", "value": "{{{network.direction}}}bound",
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/" "if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
@@ -388,7 +345,6 @@
}, },
{ {
"remove": { "remove": {
"tag": "remove_a82e20f2",
"field": [ "field": [
"_tmp" "_tmp"
], ],
@@ -397,21 +353,11 @@
}, },
{ {
"script": { "script": {
"tag": "script_a7f2c062",
"lang": "painless", "lang": "painless",
"description": "This script processor iterates over the whole document to remove fields with null values.", "description": "This script processor iterates over the whole document to remove fields with null values.",
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n" "source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
} }
}, },
{
"append": {
"tag": "append_preserve_original_event_on_error",
"field": "tags",
"value": "preserve_original_event",
"allow_duplicates": false,
"if": "ctx.error?.message != null"
}
},
{ {
"pipeline": { "pipeline": {
"name": "global@custom", "name": "global@custom",
@@ -459,14 +405,7 @@
{ {
"append": { "append": {
"field": "error.message", "field": "error.message",
"value": "Processor '{{{ _ingest.on_failure_processor_type }}}' {{#_ingest.on_failure_processor_tag}}with tag '{{{ _ingest.on_failure_processor_tag }}}' {{/_ingest.on_failure_processor_tag}}in pipeline '{{{ _ingest.pipeline }}}' failed with message '{{{ _ingest.on_failure_message }}}'" "value": "{{{ _ingest.on_failure_message }}}"
}
},
{
"append": {
"field": "tags",
"value": "preserve_original_event",
"allow_duplicates": false
} }
} }
] ]
@@ -45,7 +45,3 @@ appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
rootLogger.level = info rootLogger.level = info
rootLogger.appenderRef.rolling.ref = rolling rootLogger.appenderRef.rolling.ref = rolling
rootLogger.appenderRef.rolling_json.ref = rolling_json rootLogger.appenderRef.rolling_json.ref = rolling_json
# Suppress NotEntitledException WARNs (ES 9.3.3 bug)
logger.entitlement_security.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.x-pack-security.org.elasticsearch.security.org.elasticsearch.xpack.security
logger.entitlement_security.level = error
+18 -62
View File
@@ -14,42 +14,15 @@
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %} {% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
{% set ALL_ADDON_INTEGRATION_DEFAULTS = {} %}
{% set ALL_ADDON_SETTINGS_ORIG = {} %}
{% set ALL_ADDON_SETTINGS_GLOBAL_OVERRIDES = {} %}
{% set ALL_ADDON_SETTINGS = {} %}
{# start generation of integration default index_settings #} {# start generation of integration default index_settings #}
{% if salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %} {% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
{# import integration type defaults #} {% set check_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') %} {% if check_package_components.size > 1 %}
{% set check_integration_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %} {% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
{% if check_integration_package_components.size > 1 %} {% for index, settings in ADDON_INTEGRATION_DEFAULTS.items() %}
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %} {% do ES_INDEX_SETTINGS_ORIG.update({index: settings}) %}
{% do ALL_ADDON_INTEGRATION_DEFAULTS.update(ADDON_INTEGRATION_DEFAULTS) %} {% endfor %}
{% endif %} {% endif%}
{% endif %}
{# import input type defaults #}
{% if salt['file.file_exists']('/opt/so/state/esfleet_input_package_components.json') %}
{% set check_input_package_components = salt['file.stats']('/opt/so/state/esfleet_input_package_components.json') %}
{% if check_input_package_components.size > 1 %}
{% from 'elasticfleet/input-defaults.map.jinja' import ADDON_INPUT_INTEGRATION_DEFAULTS %}
{% do ALL_ADDON_INTEGRATION_DEFAULTS.update(ADDON_INPUT_INTEGRATION_DEFAULTS) %}
{% endif %}
{% endif %}
{# import content type defaults #}
{% if salt['file.file_exists']('/opt/so/state/esfleet_content_package_components.json') %}
{% set check_content_package_components = salt['file.stats']('/opt/so/state/esfleet_content_package_components.json') %}
{% if check_content_package_components.size > 1 %}
{% from 'elasticfleet/content-defaults.map.jinja' import ADDON_CONTENT_INTEGRATION_DEFAULTS %}
{% do ALL_ADDON_INTEGRATION_DEFAULTS.update(ADDON_CONTENT_INTEGRATION_DEFAULTS) %}
{% endif %}
{% endif %}
{% for index, settings in ALL_ADDON_INTEGRATION_DEFAULTS.items() %}
{% do ALL_ADDON_SETTINGS_ORIG.update({index: settings}) %}
{% endfor %}
{% endif %} {% endif %}
{# end generation of integration default index_settings #} {# end generation of integration default index_settings #}
@@ -58,33 +31,25 @@
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update({index: salt['defaults.merge'](ELASTICSEARCHDEFAULTS.elasticsearch.index_settings[index], PILLAR_GLOBAL_OVERRIDES, in_place=False)}) %} {% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update({index: salt['defaults.merge'](ELASTICSEARCHDEFAULTS.elasticsearch.index_settings[index], PILLAR_GLOBAL_OVERRIDES, in_place=False)}) %}
{% endfor %} {% endfor %}
{% if ALL_ADDON_SETTINGS_ORIG.keys() | length > 0 %}
{% for index in ALL_ADDON_SETTINGS_ORIG.keys() %}
{% do ALL_ADDON_SETTINGS_GLOBAL_OVERRIDES.update({index: salt['defaults.merge'](ALL_ADDON_SETTINGS_ORIG[index], PILLAR_GLOBAL_OVERRIDES, in_place=False)}) %}
{% endfor %}
{% endif %}
{% set ES_INDEX_SETTINGS = {} %} {% set ES_INDEX_SETTINGS = {} %}
{% macro create_final_index_template(DEFINED_SETTINGS, GLOBAL_OVERRIDES, FINAL_INDEX_SETTINGS) %} {% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
{% do GLOBAL_OVERRIDES.update(salt['defaults.merge'](GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
{% for index, settings in GLOBAL_OVERRIDES.items() %}
{# prevent this action from being performed on custom defined indices. #} {# prevent this action from being performed on custom defined indices. #}
{# the custom defined index is not present in either of the dictionaries and fails to reder. #} {# the custom defined index is not present in either of the dictionaries and fails to reder. #}
{% if index in DEFINED_SETTINGS and index in GLOBAL_OVERRIDES %} {% if index in ES_INDEX_SETTINGS_ORIG and index in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES %}
{# dont merge policy from the global_overrides if policy isn't defined in the original index settingss #} {# dont merge policy from the global_overrides if policy isn't defined in the original index settingss #}
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #} {# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
{% if not DEFINED_SETTINGS[index].policy is defined and GLOBAL_OVERRIDES[index].policy is defined %} {% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
{% do GLOBAL_OVERRIDES[index].pop('policy') %} {% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
{% endif %} {% endif %}
{# this prevents and index from inderiting a policy phase from global overrides if it wasnt defined in the defaults. #} {# this prevents and index from inderiting a policy phase from global overrides if it wasnt defined in the defaults. #}
{% if GLOBAL_OVERRIDES[index].policy is defined %} {% if ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
{% for phase in GLOBAL_OVERRIDES[index].policy.phases.copy() %} {% for phase in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.copy() %}
{% if DEFINED_SETTINGS[index].policy.phases[phase] is not defined %} {% if ES_INDEX_SETTINGS_ORIG[index].policy.phases[phase] is not defined %}
{% do GLOBAL_OVERRIDES[index].policy.phases.pop(phase) %} {% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.pop(phase) %}
{% endif %} {% endif %}
{% endfor %} {% endfor %}
{% endif %} {% endif %}
@@ -146,14 +111,5 @@
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% do FINAL_INDEX_SETTINGS.update({index | replace("_x_", "."): GLOBAL_OVERRIDES[index]}) %} {% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %}
{% endfor %} {% endfor %}
{% endmacro %}
{{ create_final_index_template(ES_INDEX_SETTINGS_ORIG, ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_SETTINGS) }}
{{ create_final_index_template(ALL_ADDON_SETTINGS_ORIG, ALL_ADDON_SETTINGS_GLOBAL_OVERRIDES, ALL_ADDON_SETTINGS) }}
{% set SO_MANAGED_INDICES = [] %}
{% for index, settings in ES_INDEX_SETTINGS.items() %}
{% do SO_MANAGED_INDICES.append(index) %}
{% endfor %}
@@ -6,19 +6,8 @@
# Elastic License 2.0. # Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
if [ "$1" == "" ]; then
if [[ -z "$1" ]]; then curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://localhost:9200/_component_template | jq '.component_templates[] |.name'| sort
if output=$(so-elasticsearch-query "_component_template" --retry 3 --retry-delay 1 --fail); then
jq '[.component_templates[] | .name] | sort' <<< "$output"
else
echo "Failed to retrieve component templates from Elasticsearch."
exit 1
fi
else else
if output=$(so-elasticsearch-query "_component_template/$1" --retry 3 --retry-delay 1 --fail); then curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://localhost:9200/_component_template/$1 | jq
jq <<< "$output" fi
else
echo "Failed to retrieve component template '$1' from Elasticsearch."
exit 1
fi
fi
@@ -1,276 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
SO_STATEFILE_SUCCESS=/opt/so/state/estemplates.txt
ADDON_STATEFILE_SUCCESS=/opt/so/state/addon_estemplates.txt
ELASTICSEARCH_TEMPLATES_DIR="/opt/so/conf/elasticsearch/templates"
SO_TEMPLATES_DIR="${ELASTICSEARCH_TEMPLATES_DIR}/index"
ADDON_TEMPLATES_DIR="${ELASTICSEARCH_TEMPLATES_DIR}/addon-index"
SO_LOAD_FAILURES=0
ADDON_LOAD_FAILURES=0
SO_LOAD_FAILURES_NAMES=()
ADDON_LOAD_FAILURES_NAMES=()
IS_HEAVYNODE="false"
FORCE="false"
VERBOSE="false"
SHOULD_EXIT_ON_FAILURE="true"
# If soup is running, ignore errors
pgrep soup >/dev/null && SHOULD_EXIT_ON_FAILURE="false"
while [[ $# -gt 0 ]]; do
case "$1" in
--heavynode)
IS_HEAVYNODE="true"
;;
--force)
FORCE="true"
;;
--verbose)
VERBOSE="true"
;;
*)
echo "Usage: $0 [options]"
echo "Options:"
echo " --heavynode Only loads index templates specific to heavynodes"
echo " --force Force reload all templates regardless of statefiles (default: false)"
echo " --verbose Enable verbose output"
exit 1
;;
esac
shift
done
load_template() {
local uri="$1"
local file="$2"
echo "Loading template file $file"
if ! output=$(retry 3 3 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"); then
echo "$output"
return 1
elif [[ "$VERBOSE" == "true" ]]; then
echo "$output"
fi
}
check_required_component_template_exists() {
local required
local missing
local file=$1
required=$(jq '[((.composed_of //[]) - (.ignore_missing_component_templates // []))[]]' "$file")
missing=$(jq -n --argjson required "$required" --argjson component_templates "$component_templates" '(($required) - ($component_templates))')
if [[ $(jq length <<<"$missing") -gt 0 ]]; then
return 1
fi
}
check_heavynode_compatiable_index_template() {
# The only templates that are relevant to heavynodes are from datasets defined in elasticagent/files/elastic-agent.yml.jinja.
# Heavynodes do not have fleet server packages installed and do not support elastic agents reporting directly to them.
local -A heavynode_index_templates=(
["so-import"]=1
["so-syslog"]=1
["so-logs-soc"]=1
["so-suricata"]=1
["so-suricata.alerts"]=1
["so-zeek"]=1
["so-strelka"]=1
)
local template_name="$1"
if [[ ! -v heavynode_index_templates["$template_name"] ]]; then
return 1
fi
}
load_component_templates() {
local printed_name="$1"
local pattern="${ELASTICSEARCH_TEMPLATES_DIR}/component/$2"
local append_mappings="${3:-"false"}"
echo -e "\nLoading $printed_name component templates...\n"
if ! compgen -G "${pattern}/*.json" > /dev/null; then
echo "No $printed_name component templates found in ${pattern}, skipping."
return
fi
for component in "$pattern"/*.json; do
tmpl_name=$(basename "${component%.json}")
if [[ "$append_mappings" == "true" ]]; then
# avoid duplicating "-mappings" if it already exists in the component template filename
tmpl_name="${tmpl_name%-mappings}-mappings"
fi
if ! load_template "_component_template/${tmpl_name}" "$component"; then
SO_LOAD_FAILURES=$((SO_LOAD_FAILURES + 1))
SO_LOAD_FAILURES_NAMES+=("$component")
fi
done
}
check_elasticsearch_responsive() {
# Cannot load templates if Elasticsearch is not responding.
# NOTE: Slightly faster exit w/ failure than previous "retry 240 1" if there is a problem with Elasticsearch the
# script should exit sooner rather than hang at the 'so-elasticsearch-templates' salt state.
retry 3 15 "so-elasticsearch-query / --output /dev/null --fail" ||
fail "Elasticsearch is not responding. Please review Elasticsearch logs /opt/so/log/elasticsearch/securityonion.log for more details. Additionally, consider running so-elasticsearch-troubleshoot."
}
index_templates_exist() {
local templates_dir="$1"
if [[ ! -d "$templates_dir" ]]; then
return 1
fi
compgen -G "${templates_dir}/*.json" > /dev/null
}
should_load_addon_templates() {
if [[ "$IS_HEAVYNODE" == "true" ]]; then
return 1
fi
# Skip statefile checks when forcing template load
if [[ "$FORCE" != "true" ]]; then
if [[ ! -f "$SO_STATEFILE_SUCCESS" || -f "$ADDON_STATEFILE_SUCCESS" ]]; then
return 1
fi
fi
index_templates_exist "$ADDON_TEMPLATES_DIR"
}
if [[ "$FORCE" == "true" || ! -f "$SO_STATEFILE_SUCCESS" ]] && index_templates_exist "$SO_TEMPLATES_DIR"; then
check_elasticsearch_responsive
if [[ "$IS_HEAVYNODE" == "false" ]]; then
# TODO: Better way to check if fleet server is installed vs checking for Elastic Defend component template.
fleet_check="logs-endpoint.alerts@package"
if ! so-elasticsearch-query "_component_template/$fleet_check" --output /dev/null --retry 5 --retry-delay 3 --fail; then
# This check prevents so-elasticsearch-templates-load from running before so-elastic-fleet-setup has run.
echo -e "\nPackage $fleet_check not yet installed. Fleet Server may not be fully configured yet."
# Fleet Server is required because some SO index templates depend on components installed via
# specific integrations eg Elastic Defend. These are components that we do not manually create / manage
# via /opt/so/saltstack/salt/elasticsearch/templates/component/
exit 0
fi
fi
# load_component_templates "Name" "directory" "append '-mappings'?"
load_component_templates "ECS" "ecs" "true"
load_component_templates "Elastic Agent" "elastic-agent"
load_component_templates "Security Onion" "so"
component_templates=$(so-elasticsearch-component-templates-list)
echo -e "Loading Security Onion index templates...\n"
for so_idx_tmpl in "${SO_TEMPLATES_DIR}"/*.json; do
tmpl_name=$(basename "${so_idx_tmpl%-template.json}")
if [[ "$IS_HEAVYNODE" == "true" ]]; then
# TODO: Better way to load only heavynode specific templates
if ! check_heavynode_compatiable_index_template "$tmpl_name"; then
if [[ "$VERBOSE" == "true" ]]; then
echo "Skipping over $so_idx_tmpl, template is not a heavynode specific index template."
fi
continue
fi
fi
if check_required_component_template_exists "$so_idx_tmpl"; then
if ! load_template "_index_template/$tmpl_name" "$so_idx_tmpl"; then
SO_LOAD_FAILURES=$((SO_LOAD_FAILURES + 1))
SO_LOAD_FAILURES_NAMES+=("$so_idx_tmpl")
fi
else
echo "Skipping over $so_idx_tmpl due to missing required component template(s)."
SO_LOAD_FAILURES=$((SO_LOAD_FAILURES + 1))
SO_LOAD_FAILURES_NAMES+=("$so_idx_tmpl")
continue
fi
done
if [[ $SO_LOAD_FAILURES -eq 0 ]]; then
echo "All Security Onion core templates loaded successfully."
touch "$SO_STATEFILE_SUCCESS"
else
echo "Encountered $SO_LOAD_FAILURES failure(s) loading templates:"
for failed_template in "${SO_LOAD_FAILURES_NAMES[@]}"; do
echo " - $failed_template"
done
if [[ "$SHOULD_EXIT_ON_FAILURE" == "true" ]]; then
fail "Failed to load all Security Onion core templates successfully."
fi
fi
elif ! index_templates_exist "$SO_TEMPLATES_DIR"; then
echo "No Security Onion core index templates found in ${SO_TEMPLATES_DIR}, skipping."
elif [[ -f "$SO_STATEFILE_SUCCESS" ]]; then
echo "Security Onion core templates already loaded"
fi
# Start loading addon templates
if should_load_addon_templates; then
check_elasticsearch_responsive
echo -e "\nLoading addon integration index templates...\n"
component_templates=$(so-elasticsearch-component-templates-list)
for addon_idx_tmpl in "${ADDON_TEMPLATES_DIR}"/*.json; do
tmpl_name=$(basename "${addon_idx_tmpl%-template.json}")
if check_required_component_template_exists "$addon_idx_tmpl"; then
if ! load_template "_index_template/${tmpl_name}" "$addon_idx_tmpl"; then
ADDON_LOAD_FAILURES=$((ADDON_LOAD_FAILURES + 1))
ADDON_LOAD_FAILURES_NAMES+=("$addon_idx_tmpl")
fi
else
echo "Skipping over $addon_idx_tmpl due to missing required component template(s)."
ADDON_LOAD_FAILURES=$((ADDON_LOAD_FAILURES + 1))
ADDON_LOAD_FAILURES_NAMES+=("$addon_idx_tmpl")
continue
fi
done
if [[ $ADDON_LOAD_FAILURES -eq 0 ]]; then
echo "All addon integration templates loaded successfully."
touch "$ADDON_STATEFILE_SUCCESS"
else
echo "Encountered $ADDON_LOAD_FAILURES failure(s) loading addon integration templates:"
for failed_template in "${ADDON_LOAD_FAILURES_NAMES[@]}"; do
echo " - $failed_template"
done
if [[ "$SHOULD_EXIT_ON_FAILURE" == "true" ]]; then
fail "Failed to load all addon integration templates successfully."
fi
fi
elif [[ ! -f "$SO_STATEFILE_SUCCESS" && "$IS_HEAVYNODE" == "false" ]]; then
echo "Skipping loading addon integration templates until Security Onion core templates have been loaded."
elif [[ -f "$ADDON_STATEFILE_SUCCESS" && "$IS_HEAVYNODE" == "false" && "$FORCE" == "false" ]]; then
echo "Addon integration templates already loaded"
fi
@@ -7,9 +7,6 @@
. /usr/sbin/so-common . /usr/sbin/so-common
{%- from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %} {%- from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
{%- if GLOBALS.role != "so-heavynode" %}
{%- from 'elasticsearch/template.map.jinja' import ALL_ADDON_SETTINGS %}
{%- endif %}
{%- for index, settings in ES_INDEX_SETTINGS.items() %} {%- for index, settings in ES_INDEX_SETTINGS.items() %}
{%- if settings.policy is defined %} {%- if settings.policy is defined %}
@@ -36,13 +33,3 @@
{%- endif %} {%- endif %}
{%- endfor %} {%- endfor %}
echo echo
{%- if GLOBALS.role != "so-heavynode" %}
{%- for index, settings in ALL_ADDON_SETTINGS.items() %}
{%- if settings.policy is defined %}
echo
echo "Setting up {{ index }}-logs policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
echo
{%- endif %}
{%- endfor %}
{%- endif %}
@@ -0,0 +1,165 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt
STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
if [[ -f $STATE_FILE_INITIAL ]]; then
# The initial template load has already run. As this is a subsequent load, all dependencies should
# already be satisified. Therefore, immediately exit/abort this script upon any template load failure
# since this is an unrecoverable failure.
should_exit_on_failure=1
else
# This is the initial template load, and there likely are some components not yet setup in Elasticsearch.
# Therefore load as many templates as possible at this time and if an error occurs proceed to the next
# template. But if at least one template fails to load do not mark the templates as having been loaded.
# This will allow the next load to resume the load of the templates that failed to load initially.
should_exit_on_failure=0
echo "This is the initial template load"
fi
# If soup is running, ignore errors
pgrep soup > /dev/null && should_exit_on_failure=0
load_failures=0
load_template() {
uri=$1
file=$2
echo "Loading template file $i"
if ! retry 3 1 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"; then
if [[ $should_exit_on_failure -eq 1 ]]; then
fail "Could not load template file: $file"
else
load_failures=$((load_failures+1))
echo "Incremented load failure counter: $load_failures"
fi
fi
}
if [ ! -f $STATE_FILE_SUCCESS ]; then
echo "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load."
. /usr/sbin/so-common
{% if GLOBALS.role != 'so-heavynode' %}
if [ -f /usr/sbin/so-elastic-fleet-common ]; then
. /usr/sbin/so-elastic-fleet-common
fi
{% endif %}
default_conf_dir=/opt/so/conf
# Define a default directory to load pipelines from
ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/"
{% if GLOBALS.role == 'so-heavynode' %}
file="/opt/so/conf/elasticsearch/templates/index/so-common-template.json"
{% else %}
file="/usr/sbin/so-elastic-fleet-common"
{% endif %}
if [ -f "$file" ]; then
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
{% if GLOBALS.role != 'so-heavynode' %}
TEMPLATE="logs-endpoint.alerts@package"
INSTALLED=$(so-elasticsearch-query _component_template/$TEMPLATE | jq -r .component_templates[0].name)
if [ "$INSTALLED" != "$TEMPLATE" ]; then
echo
echo "Packages not yet installed."
echo
exit 0
fi
{% endif %}
touch $STATE_FILE_INITIAL
cd ${ELASTICSEARCH_TEMPLATES}/component/ecs
echo "Loading ECS component templates..."
for i in *; do
TEMPLATE=$(echo $i | cut -d '.' -f1)
load_template "_component_template/${TEMPLATE}-mappings" "$i"
done
echo
cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent
echo "Loading Elastic Agent component templates..."
{% if GLOBALS.role == 'so-heavynode' %}
component_pattern="so-*"
{% else %}
component_pattern="*"
{% endif %}
for i in $component_pattern; do
TEMPLATE=${i::-5}
load_template "_component_template/$TEMPLATE" "$i"
done
echo
# Load SO-specific component templates
cd ${ELASTICSEARCH_TEMPLATES}/component/so
echo "Loading Security Onion component templates..."
for i in *; do
TEMPLATE=$(echo $i | cut -d '.' -f1);
load_template "_component_template/$TEMPLATE" "$i"
done
echo
# Load SO index templates
cd ${ELASTICSEARCH_TEMPLATES}/index
echo "Loading Security Onion index templates..."
shopt -s extglob
{% if GLOBALS.role == 'so-heavynode' %}
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*|*endpoint*|*elasticsearch*|*generic*|*fleet_server*|*soc*)"
{% else %}
pattern="*"
{% endif %}
# Index templates will be skipped if the following conditions are met:
# 1. The template is part of the "so-logs-" template group
# 2. The template name does not correlate to at least one existing component template
# In this situation, the script will treat the skipped template as a temporary failure
# and allow the templates to be loaded again on the next run or highstate, whichever
# comes first.
COMPONENT_LIST=$(so-elasticsearch-component-templates-list)
for i in $pattern; do
TEMPLATE=${i::-14}
COMPONENT_PATTERN=${TEMPLATE:3}
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then
load_failures=$((load_failures+1))
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
else
load_template "_index_template/$TEMPLATE" "$i"
fi
done
else
{% if GLOBALS.role == 'so-heavynode' %}
echo "Common template does not exist. Exiting..."
{% else %}
echo "Elastic Fleet not configured. Exiting..."
{% endif %}
exit 0
fi
cd - >/dev/null
if [[ $load_failures -eq 0 ]]; then
echo "All templates loaded successfully"
touch $STATE_FILE_SUCCESS
else
echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate"
fi
else
echo "Templates already loaded"
fi
+6
View File
@@ -11,14 +11,18 @@ global:
regexFailureMessage: You must enter a valid IP address or CIDR. regexFailureMessage: You must enter a valid IP address or CIDR.
mdengine: mdengine:
description: Which engine to use for meta data generation. Options are ZEEK and SURICATA. description: Which engine to use for meta data generation. Options are ZEEK and SURICATA.
regex: ^(ZEEK|SURICATA)$
options: options:
- ZEEK - ZEEK
- SURICATA - SURICATA
regexFailureMessage: You must enter either ZEEK or SURICATA.
global: True global: True
pcapengine: pcapengine:
description: Which engine to use for generating pcap. Currently only SURICATA is supported. description: Which engine to use for generating pcap. Currently only SURICATA is supported.
regex: ^(SURICATA)$
options: options:
- SURICATA - SURICATA
regexFailureMessage: You must enter either SURICATA.
global: True global: True
ids: ids:
description: Which IDS engine to use. Currently only Suricata is supported. description: Which IDS engine to use. Currently only Suricata is supported.
@@ -38,9 +42,11 @@ global:
advanced: True advanced: True
pipeline: pipeline:
description: Sets which pipeline technology for events to use. The use of Kafka requires a Security Onion Pro license. description: Sets which pipeline technology for events to use. The use of Kafka requires a Security Onion Pro license.
regex: ^(REDIS|KAFKA)$
options: options:
- REDIS - REDIS
- KAFKA - KAFKA
regexFailureMessage: You must enter either REDIS or KAFKA.
global: True global: True
advanced: True advanced: True
repo_host: repo_host:
+3 -10
View File
@@ -85,10 +85,7 @@ influxdb:
description: The log level to use for outputting log statements. Allowed values are debug, info, or error. description: The log level to use for outputting log statements. Allowed values are debug, info, or error.
global: True global: True
advanced: false advanced: false
options: regex: ^(info|debug|error)$
- info
- debug
- error
helpLink: influxdb helpLink: influxdb
metrics-disabled: metrics-disabled:
description: If true, the HTTP endpoint that exposes internal InfluxDB metrics will be inaccessible. description: If true, the HTTP endpoint that exposes internal InfluxDB metrics will be inaccessible.
@@ -143,9 +140,7 @@ influxdb:
description: Determines the type of storage used for secrets. Allowed values are bolt or vault. description: Determines the type of storage used for secrets. Allowed values are bolt or vault.
global: True global: True
advanced: True advanced: True
options: regex: ^(bolt|vault)$
- bolt
- vault
helpLink: influxdb helpLink: influxdb
session-length: session-length:
description: Number of minutes that a user login session can remain authenticated. description: Number of minutes that a user login session can remain authenticated.
@@ -265,9 +260,7 @@ influxdb:
description: The type of data store to use for HTTP resources. Allowed values are disk or memory. Memory should not be used for production Security Onion installations. description: The type of data store to use for HTTP resources. Allowed values are disk or memory. Memory should not be used for production Security Onion installations.
global: True global: True
advanced: True advanced: True
options: regex: ^(disk|memory)$
- disk
- memory
helpLink: influxdb helpLink: influxdb
tls-cert: tls-cert:
description: The container path to the certificate to use for TLS encryption of the HTTP requests and responses. description: The container path to the certificate to use for TLS encryption of the HTTP requests and responses.
+4 -14
View File
@@ -128,13 +128,10 @@ kafka:
title: ssl.keystore.password title: ssl.keystore.password
sensitive: True sensitive: True
helpLink: kafka helpLink: kafka
ssl_x_keystore_x_type: ssl_x_keystore_x_type:
description: The key store file format. description: The key store file format.
title: ssl.keystore.type title: ssl.keystore.type
options: regex: ^(JKS|PKCS12|PEM)$
- JKS
- PKCS12
- PEM
helpLink: kafka helpLink: kafka
ssl_x_truststore_x_location: ssl_x_truststore_x_location:
description: The trust store file location within the Docker container. description: The trust store file location within the Docker container.
@@ -163,11 +160,7 @@ kafka:
security_x_protocol: security_x_protocol:
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT' description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
title: security.protocol title: security.protocol
options: regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
- SASL_SSL
- PLAINTEXT
- SSL
- SASL_PLAINTEXT
helpLink: kafka helpLink: kafka
ssl_x_keystore_x_location: ssl_x_keystore_x_location:
description: The key store file location within the Docker container. description: The key store file location within the Docker container.
@@ -181,10 +174,7 @@ kafka:
ssl_x_keystore_x_type: ssl_x_keystore_x_type:
description: The key store file format. description: The key store file format.
title: ssl.keystore.type title: ssl.keystore.type
options: regex: ^(JKS|PKCS12|PEM)$
- JKS
- PKCS12
- PEM
helpLink: kafka helpLink: kafka
ssl_x_truststore_x_location: ssl_x_truststore_x_location:
description: The trust store file location within the Docker container. description: The trust store file location within the Docker container.
+1 -1
View File
@@ -22,7 +22,7 @@ kibana:
- default - default
- file - file
migrations: migrations:
discardCorruptObjects: "9.3.3" discardCorruptObjects: "8.18.8"
telemetry: telemetry:
enabled: False enabled: False
xpack: xpack:
@@ -9,5 +9,5 @@ SESSIONCOOKIE=$(curl -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http:
# Disable certain Features from showing up in the Kibana UI # Disable certain Features from showing up in the Kibana UI
echo echo
echo "Setting up default Kibana Space:" echo "Setting up default Kibana Space:"
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","searchQueryRules","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","securitySolutionRulesV1","entityManager","streams","cloudConnect","slo"]} ' >> /opt/so/log/kibana/misc.log curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","entityManager"]} ' >> /opt/so/log/kibana/misc.log
echo echo
+5 -10
View File
@@ -3,8 +3,8 @@ kratos:
description: Enables or disables the Kratos authentication system. WARNING - Disabling this process will cause the grid to malfunction. Re-enabling this setting will require manual effort via SSH. description: Enables or disables the Kratos authentication system. WARNING - Disabling this process will cause the grid to malfunction. Re-enabling this setting will require manual effort via SSH.
forcedType: bool forcedType: bool
advanced: True advanced: True
readonly: True
helpLink: kratos helpLink: kratos
oidc: oidc:
enabled: enabled:
description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key. description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key.
@@ -21,12 +21,8 @@ kratos:
description: "Specify the provider type. Required. Valid values are: auth0, generic, github, google, microsoft" description: "Specify the provider type. Required. Valid values are: auth0, generic, github, google, microsoft"
global: True global: True
forcedType: string forcedType: string
options: regex: "auth0|generic|github|google|microsoft"
- auth0 regexFailureMessage: "Valid values are: auth0, generic, github, google, microsoft"
- generic
- github
- google
- microsoft
helpLink: oidc helpLink: oidc
client_id: client_id:
description: Specify the client ID, also referenced as the application ID. Required. description: Specify the client ID, also referenced as the application ID. Required.
@@ -47,9 +43,8 @@ kratos:
description: The source of the subject identifier. Typically 'userinfo'. Only used when provider is 'microsoft'. description: The source of the subject identifier. Typically 'userinfo'. Only used when provider is 'microsoft'.
global: True global: True
forcedType: string forcedType: string
options: regex: me|userinfo
- me regexFailureMessage: "Valid values are: me, userinfo"
- userinfo
helpLink: oidc helpLink: oidc
auth_url: auth_url:
description: Provider's auth URL. Required when provider is 'generic'. description: Provider's auth URL. Required when provider is 'generic'.
+2 -2
View File
@@ -132,8 +132,8 @@ function getinstallinfo() {
log "ERROR" "Failed to get install info from $MINION_ID" log "ERROR" "Failed to get install info from $MINION_ID"
return 1 return 1
fi fi
while read -r var; do export "$var"; done <<< "$INSTALLVARS" export $(echo "$INSTALLVARS" | xargs)
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
log "ERROR" "Failed to source install variables" log "ERROR" "Failed to source install variables"
return 1 return 1
+46 -69
View File
@@ -24,14 +24,6 @@ BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
SALTUPGRADED=false SALTUPGRADED=false
SALT_CLOUD_INSTALLED=false SALT_CLOUD_INSTALLED=false
SALT_CLOUD_CONFIGURED=false SALT_CLOUD_CONFIGURED=false
# Check if salt-cloud is installed
if rpm -q salt-cloud &>/dev/null; then
SALT_CLOUD_INSTALLED=true
fi
# Check if salt-cloud is configured
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
SALT_CLOUD_CONFIGURED=true
fi
# used to display messages to the user at the end of soup # used to display messages to the user at the end of soup
declare -a FINAL_MESSAGE_QUEUE=() declare -a FINAL_MESSAGE_QUEUE=()
@@ -371,7 +363,6 @@ preupgrade_changes() {
echo "Checking to see if changes are needed." echo "Checking to see if changes are needed."
[[ "$INSTALLEDVERSION" =~ ^2\.4\.21[0-9]+$ ]] && up_to_3.0.0 [[ "$INSTALLEDVERSION" =~ ^2\.4\.21[0-9]+$ ]] && up_to_3.0.0
[[ "$INSTALLEDVERSION" == "3.0.0" ]] && up_to_3.1.0
true true
} }
@@ -380,7 +371,6 @@ postupgrade_changes() {
echo "Running post upgrade processes." echo "Running post upgrade processes."
[[ "$POSTVERSION" =~ ^2\.4\.21[0-9]+$ ]] && post_to_3.0.0 [[ "$POSTVERSION" =~ ^2\.4\.21[0-9]+$ ]] && post_to_3.0.0
[[ "$POSTVERSION" == "3.0.0" ]] && post_to_3.1.0
true true
} }
@@ -455,6 +445,7 @@ migrate_pcap_to_suricata() {
} }
up_to_3.0.0() { up_to_3.0.0() {
determine_elastic_agent_upgrade
migrate_pcap_to_suricata migrate_pcap_to_suricata
INSTALLEDVERSION=3.0.0 INSTALLEDVERSION=3.0.0
@@ -478,36 +469,6 @@ post_to_3.0.0() {
### 3.0.0 End ### ### 3.0.0 End ###
### 3.1.0 Scripts ###
elasticsearch_backup_index_templates() {
echo "Backing up current elasticsearch index templates in /opt/so/conf/elasticsearch/templates/index/ to /nsm/backup/3.0.0_elasticsearch_index_templates.tar.gz"
tar -czf /nsm/backup/3.0.0_elasticsearch_index_templates.tar.gz -C /opt/so/conf/elasticsearch/templates/index/ .
}
up_to_3.1.0() {
determine_elastic_agent_upgrade
elasticsearch_backup_index_templates
# Clear existing component template state file.
rm -f /opt/so/state/esfleet_component_templates.json
INSTALLEDVERSION=3.1.0
}
post_to_3.1.0() {
/usr/sbin/so-kibana-space-defaults
# ensure manager has new version of socloud.conf
if [[ $SALT_CLOUD_CONFIGURED == true ]]; then
salt-call state.apply salt.cloud.config concurrent=True
fi
POSTVERSION=3.1.0
}
### 3.1.0 End ###
repo_sync() { repo_sync() {
echo "Sync the local repo." echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -675,6 +636,15 @@ upgrade_check_salt() {
upgrade_salt() { upgrade_salt() {
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo "" echo ""
# Check if salt-cloud is installed
if rpm -q salt-cloud &>/dev/null; then
SALT_CLOUD_INSTALLED=true
fi
# Check if salt-cloud is configured
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
SALT_CLOUD_CONFIGURED=true
fi
echo "Removing yum versionlock for Salt." echo "Removing yum versionlock for Salt."
echo "" echo ""
yum versionlock delete "salt" yum versionlock delete "salt"
@@ -758,12 +728,12 @@ verify_es_version_compatibility() {
local is_active_intermediate_upgrade=1 local is_active_intermediate_upgrade=1
# supported upgrade paths for SO-ES versions # supported upgrade paths for SO-ES versions
declare -A es_upgrade_map=( declare -A es_upgrade_map=(
["9.0.8"]="9.3.3" ["8.18.8"]="9.0.8"
) )
# Elasticsearch MUST upgrade through these versions # Elasticsearch MUST upgrade through these versions
declare -A es_to_so_version=( declare -A es_to_so_version=(
["9.0.8"]="3.0.0-20260331" ["8.18.8"]="2.4.190-20251024"
) )
# Get current Elasticsearch version # Get current Elasticsearch version
@@ -775,17 +745,26 @@ verify_es_version_compatibility() {
exit 160 exit 160
fi fi
if ! target_es_version=$(so-yaml.py get -r $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then if ! target_es_version_raw=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting" # so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
exit 160 # if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
exit 160
fi
# allow upgrade to version < 2.4.110 without checking ES version compatibility
return 0
else
target_es_version=$(sed -n '1p' <<< "$target_es_version_raw")
fi fi
for statefile in "${es_required_version_statefile_base}"-*; do for statefile in "${es_required_version_statefile_base}"-*; do
[[ -f $statefile ]] || continue [[ -f $statefile ]] || continue
local es_required_version_statefile_value local es_required_version_statefile_value=$(cat "$statefile")
es_required_version_statefile_value=$(cat "$statefile")
if [[ "$es_required_version_statefile_value" == "$target_es_version" ]]; then if [[ "$es_required_version_statefile_value" == "$target_es_version" ]]; then
echo "Intermediate upgrade to ES $target_es_version is in progress. Skipping Elasticsearch version compatibility check." echo "Intermediate upgrade to ES $target_es_version is in progress. Skipping Elasticsearch version compatibility check."
@@ -794,7 +773,7 @@ verify_es_version_compatibility() {
fi fi
# use sort to check if es_required_statefile_value is < the current es_version. # use sort to check if es_required_statefile_value is < the current es_version.
if [[ "$(printf '%s\n' "$es_required_version_statefile_value" "$es_version" | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then if [[ "$(printf '%s\n' $es_required_version_statefile_value $es_version | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then
rm -f "$statefile" rm -f "$statefile"
continue continue
fi fi
@@ -805,7 +784,8 @@ verify_es_version_compatibility() {
echo -e "\n##############################################################################################################################\n" echo -e "\n##############################################################################################################################\n"
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss! This command can take up to an hour to complete." echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss! This command can take up to an hour to complete."
if ! timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"; then timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"
if [[ $? -ne 0 ]]; then
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n" echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!" echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
@@ -822,7 +802,6 @@ verify_es_version_compatibility() {
return 0 return 0
fi fi
# shellcheck disable=SC2076 # Do not want a regex here eg usage " 8.18.8 9.0.8 " =~ " 9.0.8 "
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
# supported upgrade # supported upgrade
return 0 return 0
@@ -831,7 +810,7 @@ verify_es_version_compatibility() {
if [[ -z "$compatible_versions" ]]; then if [[ -z "$compatible_versions" ]]; then
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do. # If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version # We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1) local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
next_step_so_version=${es_to_so_version[$first_es_required_version]} next_step_so_version=${es_to_so_version[$first_es_required_version]}
required_es_upgrade_version="$first_es_required_version" required_es_upgrade_version="$first_es_required_version"
else else
@@ -850,7 +829,7 @@ verify_es_version_compatibility() {
if [[ $is_airgap -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
run_airgap_intermediate_upgrade run_airgap_intermediate_upgrade
else else
if [[ -n $ISOLOC ]]; then if [[ ! -z $ISOLOC ]]; then
originally_requested_iso_location="$ISOLOC" originally_requested_iso_location="$ISOLOC"
fi fi
# Make sure ISOLOC is not set. Network installs that used soup -f would have ISOLOC set. # Make sure ISOLOC is not set. Network installs that used soup -f would have ISOLOC set.
@@ -882,8 +861,7 @@ wait_for_salt_minion_with_restart() {
} }
run_airgap_intermediate_upgrade() { run_airgap_intermediate_upgrade() {
local originally_requested_so_version local originally_requested_so_version=$(cat $UPDATE_DIR/VERSION)
originally_requested_so_version=$(cat "$UPDATE_DIR/VERSION")
# preserve ISOLOC value, so we can try to use it post intermediate upgrade # preserve ISOLOC value, so we can try to use it post intermediate upgrade
local originally_requested_iso_location="$ISOLOC" local originally_requested_iso_location="$ISOLOC"
@@ -895,8 +873,7 @@ run_airgap_intermediate_upgrade() {
while [[ -z "$next_iso_location" ]] || [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; do while [[ -z "$next_iso_location" ]] || [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; do
# List removable devices if any are present # List removable devices if any are present
local removable_devices local removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
if [[ -n "$removable_devices" ]]; then if [[ -n "$removable_devices" ]]; then
echo "PATH SIZE TYPE MOUNTPOINTS RM" echo "PATH SIZE TYPE MOUNTPOINTS RM"
echo "$removable_devices" echo "$removable_devices"
@@ -917,21 +894,21 @@ run_airgap_intermediate_upgrade() {
echo "Using $next_iso_location for required intermediary upgrade." echo "Using $next_iso_location for required intermediary upgrade."
exec bash <<EOF exec bash <<EOF
ISOLOC="$next_iso_location" soup -y && \ ISOLOC=$next_iso_location soup -y && \
ISOLOC="$next_iso_location" soup -y && \ ISOLOC=$next_iso_location soup -y && \
echo -e "\n##############################################################################################################################\n" && \ echo -e "\n##############################################################################################################################\n" && \
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \ echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh "$required_es_upgrade_version" "$es_required_version_statefile" && \ timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
echo -e "\n##############################################################################################################################\n" && \ echo -e "\n##############################################################################################################################\n" && \
# automatically start the next soup if the original ISO isn't using the same block device we just used # automatically start the next soup if the original ISO isn't using the same block device we just used
if [[ -n "$originally_requested_iso_location" ]] && [[ "$originally_requested_iso_location" != "$next_iso_location" ]]; then if [[ -n "$originally_requested_iso_location" ]] && [[ "$originally_requested_iso_location" != "$next_iso_location" ]]; then
umount /tmp/soagupdate umount /tmp/soagupdate
ISOLOC="$originally_requested_iso_location" soup -y && \ ISOLOC=$originally_requested_iso_location soup -y && \
ISOLOC="$originally_requested_iso_location" soup -y ISOLOC=$originally_requested_iso_location soup -y
else else
echo "Could not automatically start next soup to $originally_requested_so_version. Soup will now exit here at $(cat /etc/soversion)" && \ echo "Could not automatically start next soup to $originally_requested_so_version. Soup will now exit here at $(cat /etc/soversion)" && \
@@ -947,29 +924,29 @@ run_network_intermediate_upgrade() {
if [[ -n "$BRANCH" ]]; then if [[ -n "$BRANCH" ]]; then
local originally_requested_so_branch="$BRANCH" local originally_requested_so_branch="$BRANCH"
else else
local originally_requested_so_branch="3/main" local originally_requested_so_branch="2.4/main"
fi fi
echo "Starting automated intermediate upgrade to $next_step_so_version." echo "Starting automated intermediate upgrade to $next_step_so_version."
echo "After completion, the system will automatically attempt to upgrade to the latest version." echo "After completion, the system will automatically attempt to upgrade to the latest version."
echo -e "\n##############################################################################################################################\n" echo -e "\n##############################################################################################################################\n"
exec bash << EOF exec bash << EOF
BRANCH="$next_step_so_version" soup -y && \ BRANCH=$next_step_so_version soup -y && \
BRANCH="$next_step_so_version" soup -y && \ BRANCH=$next_step_so_version soup -y && \
echo -e "\n##############################################################################################################################\n" && \ echo -e "\n##############################################################################################################################\n" && \
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \ echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh "$required_es_upgrade_version" "$es_required_version_statefile" && \ timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
echo -e "\n##############################################################################################################################\n" && \ echo -e "\n##############################################################################################################################\n" && \
if [[ -n "$originally_requested_iso_location" ]]; then if [[ -n "$originally_requested_iso_location" ]]; then
# nonairgap soup that used -f originally, runs intermediate upgrade using network + BRANCH, later coming back to the original ISO for the last soup # nonairgap soup that used -f originally, runs intermediate upgrade using network + BRANCH, later coming back to the original ISO for the last soup
ISOLOC="$originally_requested_iso_location" soup -y && \ ISOLOC=$originally_requested_iso_location soup -y && \
ISOLOC="$originally_requested_iso_location" soup -y ISOLOC=$originally_requested_iso_location soup -y
else else
BRANCH="$originally_requested_so_branch" soup -y && \ BRANCH=$originally_requested_so_branch soup -y && \
BRANCH="$originally_requested_so_branch" soup -y BRANCH=$originally_requested_so_branch soup -y
fi fi
echo -e "\n##############################################################################################################################\n" echo -e "\n##############################################################################################################################\n"
EOF EOF
@@ -27,7 +27,6 @@ sool9_{{host}}:
log_file: /opt/so/log/salt/minion log_file: /opt/so/log/salt/minion
grains: grains:
hypervisor_host: {{host ~ "_" ~ role}} hypervisor_host: {{host ~ "_" ~ role}}
sosmodel: HVGUEST
preflight_cmds: preflight_cmds:
- | - |
{%- set hostnames = [MANAGERHOSTNAME] %} {%- set hostnames = [MANAGERHOSTNAME] %}
-1
View File
@@ -2687,5 +2687,4 @@ soc:
lowBalanceColorAlert: 500000 lowBalanceColorAlert: 500000
enabled: true enabled: true
adapter: SOAI adapter: SOAI
charsPerTokenEstimate: 4
+1 -10
View File
@@ -3,7 +3,6 @@ soc:
description: Enables or disables SOC. WARNING - Disabling this setting is unsupported and will cause the grid to malfunction. Re-enabling this setting is a manual effort via SSH. description: Enables or disables SOC. WARNING - Disabling this setting is unsupported and will cause the grid to malfunction. Re-enabling this setting is a manual effort via SSH.
forcedType: bool forcedType: bool
advanced: True advanced: True
readonly: True
telemetryEnabled: telemetryEnabled:
title: SOC Telemetry title: SOC Telemetry
description: When this setting is enabled and the grid is not in airgap mode, SOC will provide feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting. description: When this setting is enabled and the grid is not in airgap mode, SOC will provide feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting.
@@ -762,7 +761,7 @@ soc:
required: True required: True
- field: origin - field: origin
label: Country of Origin for the Model Training label: Country of Origin for the Model Training
required: False required: false
- field: contextLimitSmall - field: contextLimitSmall
label: Context Limit (Small) label: Context Limit (Small)
forcedType: int forcedType: int
@@ -780,10 +779,6 @@ soc:
- field: enabled - field: enabled
label: Enabled label: Enabled
forcedType: bool forcedType: bool
- field: charsPerTokenEstimate
label: Characters per Token Estimate
forcedType: float
required: False
apiTimeoutMs: apiTimeoutMs:
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
global: True global: True
@@ -891,16 +886,12 @@ soc:
suricata: suricata:
description: The template used when creating a new Suricata detection. [publicId] will be replaced with an unused Public Id. description: The template used when creating a new Suricata detection. [publicId] will be replaced with an unused Public Id.
multiline: True multiline: True
forcedType: string
strelka: strelka:
description: The template used when creating a new Strelka detection. description: The template used when creating a new Strelka detection.
multiline: True multiline: True
forcedType: string
elastalert: elastalert:
description: The template used when creating a new ElastAlert detection. [publicId] will be replaced with an unused Public Id. description: The template used when creating a new ElastAlert detection. [publicId] will be replaced with an unused Public Id.
multiline: True multiline: True
forcedType: string
grid: grid:
maxUploadSize: maxUploadSize:
description: The maximum number of bytes for an uploaded PCAP import file. description: The maximum number of bytes for an uploaded PCAP import file.
+1 -1
View File
@@ -33,7 +33,7 @@
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'conditional': SURICATAMERGED.pcap.conditional}) %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'conditional': SURICATAMERGED.pcap.conditional}) %}
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'dir': SURICATAMERGED.pcap.dir}) %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'dir': SURICATAMERGED.pcap.dir}) %}
{# multiply maxsize by 1000 since it is saved in GB, i.e. 52 = 52000MB. filesize is also saved in MB and we strip the MB and convert to int #} {# multiply maxsize by 1000 since it is saved in GB, i.e. 52 = 52000MB. filesize is also saved in MB and we strip the MB and convert to int #}
{% set maxfiles = ([1, (SURICATAMERGED.pcap.maxsize * 1000 / (SURICATAMERGED.pcap.filesize[:-2] | int) / SURICATAMERGED.config['af-packet'].threads | int) | round(0, 'ceil') | int] | max) %} {% set maxfiles = (SURICATAMERGED.pcap.maxsize * 1000 / (SURICATAMERGED.pcap.filesize[:-2] | int) / SURICATAMERGED.config['af-packet'].threads | int) | round | int %}
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'max-files': maxfiles}) %} {% do SURICATAMERGED.config.outputs['pcap-log'].update({'max-files': maxfiles}) %}
{% endif %} {% endif %}
+3 -7
View File
@@ -64,10 +64,8 @@ suricata:
helpLink: suricata helpLink: suricata
conditional: conditional:
description: Set to "all" to record PCAP for all flows. Set to "alerts" to only record PCAP for Suricata alerts. Set to "tag" to only record PCAP for tagged rules. description: Set to "all" to record PCAP for all flows. Set to "alerts" to only record PCAP for Suricata alerts. Set to "tag" to only record PCAP for tagged rules.
options: regex: ^(all|alerts|tag)$
- all regexFailureMessage: You must enter either all, alert or tag.
- alerts
- tag
helpLink: suricata helpLink: suricata
dir: dir:
description: Parent directory to store PCAP. description: Parent directory to store PCAP.
@@ -85,9 +83,7 @@ suricata:
advanced: True advanced: True
cluster-type: cluster-type:
advanced: True advanced: True
options: regex: ^(cluster_flow|cluster_qm)$
- cluster_flow
- cluster_qm
defrag: defrag:
description: Enable defragmentation of IP packets before processing. description: Enable defragmentation of IP packets before processing.
forcedType: bool forcedType: bool
-1
View File
@@ -219,7 +219,6 @@ if [ -n "$test_profile" ]; then
WEBUSER=onionuser@somewhere.invalid WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=0n10nus3r WEBPASSWD1=0n10nus3r
WEBPASSWD2=0n10nus3r WEBPASSWD2=0n10nus3r
NODE_DESCRIPTION="${HOSTNAME} - ${install_type} - ${MSRVIP_OFFSET}"
update_sudoers_for_testing update_sudoers_for_testing
fi fi