mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-05-09 12:52:38 +02:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| acc9b8062e | |||
| c6c538363d |
@@ -0,0 +1,2 @@
|
||||
elasticsearch:
|
||||
index_settings:
|
||||
+3
-20
@@ -38,9 +38,6 @@ base:
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/postgres/auth.sls') %}
|
||||
- postgres.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
@@ -63,8 +60,6 @@ base:
|
||||
- redis.adv_redis
|
||||
- influxdb.soc_influxdb
|
||||
- influxdb.adv_influxdb
|
||||
- postgres.soc_postgres
|
||||
- postgres.adv_postgres
|
||||
- elasticsearch.nodes
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
@@ -102,12 +97,10 @@ base:
|
||||
- node_data.ips
|
||||
- secrets
|
||||
- healthcheck.eval
|
||||
- elasticsearch.index_templates
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/postgres/auth.sls') %}
|
||||
- postgres.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
@@ -133,8 +126,6 @@ base:
|
||||
- redis.adv_redis
|
||||
- influxdb.soc_influxdb
|
||||
- influxdb.adv_influxdb
|
||||
- postgres.soc_postgres
|
||||
- postgres.adv_postgres
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- zeek.soc_zeek
|
||||
@@ -151,12 +142,10 @@ base:
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- elasticsearch.index_templates
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/postgres/auth.sls') %}
|
||||
- postgres.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
@@ -171,8 +160,6 @@ base:
|
||||
- redis.adv_redis
|
||||
- influxdb.soc_influxdb
|
||||
- influxdb.adv_influxdb
|
||||
- postgres.soc_postgres
|
||||
- postgres.adv_postgres
|
||||
- elasticsearch.nodes
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
@@ -269,12 +256,10 @@ base:
|
||||
'*_import':
|
||||
- node_data.ips
|
||||
- secrets
|
||||
- elasticsearch.index_templates
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/postgres/auth.sls') %}
|
||||
- postgres.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
@@ -300,8 +285,6 @@ base:
|
||||
- redis.adv_redis
|
||||
- influxdb.soc_influxdb
|
||||
- influxdb.adv_influxdb
|
||||
- postgres.soc_postgres
|
||||
- postgres.adv_postgres
|
||||
- zeek.soc_zeek
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
|
||||
@@ -29,8 +29,6 @@
|
||||
'manager',
|
||||
'nginx',
|
||||
'influxdb',
|
||||
'postgres',
|
||||
'postgres.auth',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
@@ -43,8 +41,7 @@
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'tcpreplay',
|
||||
'zeek',
|
||||
'strelka'
|
||||
'zeek'
|
||||
] %}
|
||||
|
||||
{% set kafka_states = [
|
||||
@@ -86,26 +83,26 @@
|
||||
),
|
||||
'so-import': (
|
||||
manager_states +
|
||||
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'logstash.ssl', 'strelka.manager']
|
||||
sensor_states | reject('equalto', 'healthcheck') | list +
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'logstash.ssl']
|
||||
),
|
||||
'so-manager': (
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managerhype': (
|
||||
manager_states +
|
||||
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
|
||||
['salt.cloud', 'hypervisor', 'libvirt'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managersearch': (
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
|
||||
@@ -32,4 +32,3 @@ so_config_backup:
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
|
||||
@@ -54,20 +54,6 @@ x509_signing_policies:
|
||||
- extendedKeyUsage: serverAuth
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
postgres:
|
||||
- minions: '*'
|
||||
- signing_private_key: /etc/pki/ca.key
|
||||
- signing_cert: /etc/pki/ca.crt
|
||||
- C: US
|
||||
- ST: Utah
|
||||
- L: Salt Lake City
|
||||
- basicConstraints: "critical CA:false"
|
||||
- keyUsage: "critical keyEncipherment"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- extendedKeyUsage: serverAuth
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
elasticfleet:
|
||||
- minions: '*'
|
||||
- signing_private_key: /etc/pki/ca.key
|
||||
|
||||
@@ -31,7 +31,6 @@ container_list() {
|
||||
"so-hydra"
|
||||
"so-nginx"
|
||||
"so-pcaptools"
|
||||
"so-postgres"
|
||||
"so-soc"
|
||||
"so-suricata"
|
||||
"so-telegraf"
|
||||
@@ -56,11 +55,8 @@ container_list() {
|
||||
"so-logstash"
|
||||
"so-nginx"
|
||||
"so-pcaptools"
|
||||
"so-postgres"
|
||||
"so-redis"
|
||||
"so-soc"
|
||||
"so-strelka-backend"
|
||||
"so-strelka-manager"
|
||||
"so-suricata"
|
||||
"so-telegraf"
|
||||
"so-zeek"
|
||||
|
||||
@@ -42,21 +42,6 @@ clean() {
|
||||
# done
|
||||
#fi
|
||||
|
||||
## Clean up Zeek extracted files processed by Strelka
|
||||
STRELKA_FILES='/nsm/strelka/processed'
|
||||
OLDEST_STRELKA=$(find $STRELKA_FILES -type f -printf '%T+ %p\n' | sort -n | head -n 1)
|
||||
if [ -z "$OLDEST_STRELKA" -o "$OLDEST_STRELKA" == ".." -o "$OLDEST_STRELKA" == "." ]; then
|
||||
echo "$(date) - No old files available to clean up in $STRELKA_FILES" >>$LOG
|
||||
else
|
||||
OLDEST_STRELKA_DATE=$(echo $OLDEST_STRELKA | awk '{print $1}' | cut -d+ -f1)
|
||||
OLDEST_STRELKA_FILE=$(echo $OLDEST_STRELKA | awk '{print $2}')
|
||||
echo "$(date) - Removing extracted files for $OLDEST_STRELKA_DATE" >>$LOG
|
||||
find $STRELKA_FILES -type f -printf '%T+ %p\n' | grep $OLDEST_STRELKA_DATE | awk '{print $2}' | while read FILE; do
|
||||
echo "$(date) - Removing file: $FILE" >>$LOG
|
||||
rm -f "$FILE"
|
||||
done
|
||||
fi
|
||||
|
||||
## Clean up Suricata log files
|
||||
SURICATA_LOGS='/nsm/suricata'
|
||||
OLDEST_SURICATA=$(find $SURICATA_LOGS -type f -printf '%T+ %p\n' | sort -n | head -n 1)
|
||||
|
||||
@@ -134,48 +134,6 @@ docker:
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-backend':
|
||||
final_octet: 36
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-filestream':
|
||||
final_octet: 37
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-frontend':
|
||||
final_octet: 38
|
||||
port_bindings:
|
||||
- 0.0.0.0:57314:57314
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-manager':
|
||||
final_octet: 39
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-gatekeeper':
|
||||
final_octet: 40
|
||||
port_bindings:
|
||||
- 0.0.0.0:6381:6379
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-coordinator':
|
||||
final_octet: 41
|
||||
port_bindings:
|
||||
- 0.0.0.0:6380:6379
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-elastalert':
|
||||
final_octet: 42
|
||||
custom_bind_mounts: []
|
||||
@@ -237,11 +195,3 @@ docker:
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-postgres':
|
||||
final_octet: 47
|
||||
port_bindings:
|
||||
- 0.0.0.0:5432:5432
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
|
||||
@@ -89,12 +89,6 @@ docker:
|
||||
so-redis: *dockerOptions
|
||||
so-sensoroni: *dockerOptions
|
||||
so-soc: *dockerOptions
|
||||
so-strelka-backend: *dockerOptions
|
||||
so-strelka-filestream: *dockerOptions
|
||||
so-strelka-frontend: *dockerOptions
|
||||
so-strelka-manager: *dockerOptions
|
||||
so-strelka-gatekeeper: *dockerOptions
|
||||
so-strelka-coordinator: *dockerOptions
|
||||
so-elastalert: *dockerOptions
|
||||
so-elastic-fleet-package-registry: *dockerOptions
|
||||
so-idh: *dockerOptions
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
this file except in compliance with the Elastic License 2.0. #}
|
||||
|
||||
|
||||
{% import_json '/opt/so/state/esfleet_content_package_components.json' as ADDON_CONTENT_PACKAGE_COMPONENTS %}
|
||||
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
|
||||
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
|
||||
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
|
||||
{% set ADDON_CONTENT_INTEGRATION_DEFAULTS = {} %}
|
||||
{% set DEBUG_STUFF = {} %}
|
||||
|
||||
{% for pkg in ADDON_CONTENT_PACKAGE_COMPONENTS %}
|
||||
{% if pkg.name in CORE_ESFLEET_PACKAGES %}
|
||||
{# skip core content packages #}
|
||||
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
|
||||
{# generate defaults for each content package #}
|
||||
{% if pkg.dataStreams is defined and pkg.dataStreams is not none and pkg.dataStreams | length > 0%}
|
||||
{% for pattern in pkg.dataStreams %}
|
||||
{# in ES 9.3.2 'input' type integrations no longer create default component templates and instead they wait for user input during 'integration' setup (fleet ui config)
|
||||
title: generic is an artifact of that and is not in use #}
|
||||
{% if pattern.title == "generic" %}
|
||||
{% continue %}
|
||||
{% endif %}
|
||||
{% if "metrics-" in pattern.name %}
|
||||
{% set integration_type = "metrics-" %}
|
||||
{% elif "logs-" in pattern.name %}
|
||||
{% set integration_type = "logs-" %}
|
||||
{% else %}
|
||||
{% set integration_type = "" %}
|
||||
{% endif %}
|
||||
{# on content integrations the component name is user defined at the time it is added to an agent policy #}
|
||||
{% set component_name = pattern.title %}
|
||||
{% set index_pattern = pattern.name %}
|
||||
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||
{% set integration_key = "so-" ~ integration_type ~ pkg.name + '_x_' ~ component_name_x %}
|
||||
{# Default integration settings #}
|
||||
{% set integration_defaults = {
|
||||
"index_sorting": false,
|
||||
"index_template": {
|
||||
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"data_stream": {
|
||||
"allow_custom_routing": false,
|
||||
"hidden": false
|
||||
},
|
||||
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
||||
"index_patterns": [index_pattern],
|
||||
"priority": 501,
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
||||
"number_of_replicas": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"allocate":{
|
||||
"number_of_replicas": ""
|
||||
},
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
},
|
||||
"delete": {
|
||||
"actions": {
|
||||
"delete": {}
|
||||
},
|
||||
"min_age": "365d"
|
||||
},
|
||||
"hot": {
|
||||
"actions": {
|
||||
"rollover": {
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"forcemerge":{
|
||||
"max_num_segments": ""
|
||||
},
|
||||
"shrink":{
|
||||
"max_primary_shard_size": "",
|
||||
"method": "COUNT",
|
||||
"number_of_shards": ""
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"allocate": {
|
||||
"number_of_replicas": ""
|
||||
},
|
||||
"forcemerge": {
|
||||
"max_num_segments": ""
|
||||
},
|
||||
"shrink":{
|
||||
"max_primary_shard_size": "",
|
||||
"method": "COUNT",
|
||||
"number_of_shards": ""
|
||||
},
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
}
|
||||
}
|
||||
}
|
||||
} %}
|
||||
|
||||
|
||||
{% do ADDON_CONTENT_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -1,6 +1,5 @@
|
||||
elasticfleet:
|
||||
enabled: False
|
||||
patch_version: 9.3.3+build202604082258 # Elastic Agent specific patch release.
|
||||
enable_manager_output: True
|
||||
config:
|
||||
server:
|
||||
|
||||
+2
-9
@@ -9,22 +9,16 @@
|
||||
"namespace": "so",
|
||||
"description": "Zeek Import logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/zeek/logs/*.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "import",
|
||||
"pipeline": "",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -40,8 +34,7 @@
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,25 +15,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "kratos-logs",
|
||||
"namespace": "so",
|
||||
"description": "Kratos logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/kratos/kratos.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "kratos",
|
||||
"pipeline": "kratos",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -54,10 +48,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,22 +9,16 @@
|
||||
"namespace": "so",
|
||||
"description": "Zeek logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/zeek/logs/current/*.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "zeek",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
|
||||
@@ -36,10 +30,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "9.3.0",
|
||||
"version": "9.0.2",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
|
||||
@@ -6,23 +6,21 @@
|
||||
"name": "agent-monitor",
|
||||
"namespace": "",
|
||||
"description": "",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"output_id": null,
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/agents/agent-monitor.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "agentmonitor",
|
||||
"pipeline": "elasticagent.monitor",
|
||||
"parsers": "",
|
||||
@@ -36,16 +34,15 @@
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint": true,
|
||||
"fingerprint_offset": 0,
|
||||
"file_identity_native": true,
|
||||
"fingerprint_length": 64,
|
||||
"file_identity_native": false,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "hydra-logs",
|
||||
"namespace": "so",
|
||||
"description": "Hydra logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/hydra/hydra.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "hydra",
|
||||
"pipeline": "hydra",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -40,10 +34,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "idh-logs",
|
||||
"namespace": "so",
|
||||
"description": "IDH integration",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/idh/opencanary.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "idh",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -37,10 +31,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,32 +4,26 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-evtx-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Windows EVTX logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/evtx/*.json"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "import",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.15.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.8.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.15.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.15.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.8.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
],
|
||||
@@ -39,10 +33,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-suricata-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Suricata logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/suricata/eve*.json"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "import",
|
||||
"pipeline": "suricata.common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -38,10 +32,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,18 +4,14 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "rita-logs",
|
||||
"namespace": "so",
|
||||
"description": "RITA Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
@@ -23,8 +19,6 @@
|
||||
"/nsm/rita/exploded-dns.csv",
|
||||
"/nsm/rita/long-connections.csv"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "rita",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
@@ -39,10 +33,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "so-ip-mappings",
|
||||
"namespace": "so",
|
||||
"description": "IP Description mappings",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/custom-mappings/ip-descriptions.csv"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "hostnamemappings",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
@@ -38,10 +32,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-auth-sync-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Elastic Auth Sync - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sync.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -37,10 +31,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,26 +4,20 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-detections-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion Console - Detections Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/detections_runtime-status_sigma.log",
|
||||
"/opt/so/log/soc/detections_runtime-status_yara.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -41,10 +35,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-salt-relay-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Salt Relay - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/salt-relay.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -39,10 +33,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-sensoroni-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Sensoroni - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/sensoroni/sensoroni.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -37,10 +31,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-server-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion Console Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sensoroni-server.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -39,10 +33,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "strelka-logs",
|
||||
"namespace": "so",
|
||||
"description": "Strelka Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/strelka/log/strelka.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "strelka",
|
||||
"pipeline": "strelka.file",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -37,10 +31,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,19 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "suricata-logs",
|
||||
"namespace": "so",
|
||||
"description": "Suricata integration",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.filestream": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/suricata/eve*.json"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "suricata",
|
||||
"pipeline": "suricata.common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -37,10 +31,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
this file except in compliance with the Elastic License 2.0. #}
|
||||
|
||||
|
||||
{% import_json '/opt/so/state/esfleet_input_package_components.json' as ADDON_INPUT_PACKAGE_COMPONENTS %}
|
||||
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
|
||||
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
|
||||
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
|
||||
{% set ADDON_INPUT_INTEGRATION_DEFAULTS = {} %}
|
||||
{% set DEBUG_STUFF = {} %}
|
||||
|
||||
{% for pkg in ADDON_INPUT_PACKAGE_COMPONENTS %}
|
||||
{% if pkg.name in CORE_ESFLEET_PACKAGES %}
|
||||
{# skip core input packages #}
|
||||
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
|
||||
{# generate defaults for each input package #}
|
||||
{% if pkg.dataStreams is defined and pkg.dataStreams is not none and pkg.dataStreams | length > 0 %}
|
||||
{% for pattern in pkg.dataStreams %}
|
||||
{# in ES 9.3.2 'input' type integrations no longer create default component templates and instead they wait for user input during 'integration' setup (fleet ui config)
|
||||
title: generic is an artifact of that and is not in use #}
|
||||
{% if pattern.title == "generic" %}
|
||||
{% continue %}
|
||||
{% endif %}
|
||||
{% if "metrics-" in pattern.name %}
|
||||
{% set integration_type = "metrics-" %}
|
||||
{% elif "logs-" in pattern.name %}
|
||||
{% set integration_type = "logs-" %}
|
||||
{% else %}
|
||||
{% set integration_type = "" %}
|
||||
{% endif %}
|
||||
{# on input integrations the component name is user defined at the time it is added to an agent policy #}
|
||||
{% set component_name = pattern.title %}
|
||||
{% set index_pattern = pattern.name %}
|
||||
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||
{% set integration_key = "so-" ~ integration_type ~ pkg.name + '_x_' ~ component_name_x %}
|
||||
{# Default integration settings #}
|
||||
{% set integration_defaults = {
|
||||
"index_sorting": false,
|
||||
"index_template": {
|
||||
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"data_stream": {
|
||||
"allow_custom_routing": false,
|
||||
"hidden": false
|
||||
},
|
||||
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
||||
"index_patterns": [index_pattern],
|
||||
"priority": 501,
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
||||
"number_of_replicas": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"allocate":{
|
||||
"number_of_replicas": ""
|
||||
},
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
},
|
||||
"delete": {
|
||||
"actions": {
|
||||
"delete": {}
|
||||
},
|
||||
"min_age": "365d"
|
||||
},
|
||||
"hot": {
|
||||
"actions": {
|
||||
"rollover": {
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"forcemerge":{
|
||||
"max_num_segments": ""
|
||||
},
|
||||
"shrink":{
|
||||
"max_primary_shard_size": "",
|
||||
"method": "COUNT",
|
||||
"number_of_shards": ""
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"allocate": {
|
||||
"number_of_replicas": ""
|
||||
},
|
||||
"forcemerge": {
|
||||
"max_num_segments": ""
|
||||
},
|
||||
"shrink":{
|
||||
"max_primary_shard_size": "",
|
||||
"method": "COUNT",
|
||||
"number_of_shards": ""
|
||||
},
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
}
|
||||
}
|
||||
}
|
||||
} %}
|
||||
|
||||
|
||||
{% do ADDON_INPUT_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
|
||||
{% do DEBUG_STUFF.update({integration_key: "Generating defaults for "+ pkg.name })%}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
@@ -59,8 +59,8 @@
|
||||
{# skip core integrations #}
|
||||
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
|
||||
{# generate defaults for each integration #}
|
||||
{% if pkg.dataStreams is defined and pkg.dataStreams is not none and pkg.dataStreams | length > 0 %}
|
||||
{% for pattern in pkg.dataStreams %}
|
||||
{% if pkg.es_index_patterns is defined and pkg.es_index_patterns is not none %}
|
||||
{% for pattern in pkg.es_index_patterns %}
|
||||
{% if "metrics-" in pattern.name %}
|
||||
{% set integration_type = "metrics-" %}
|
||||
{% elif "logs-" in pattern.name %}
|
||||
@@ -75,27 +75,44 @@
|
||||
{% if component_name in WEIRD_INTEGRATIONS %}
|
||||
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
||||
{% endif %}
|
||||
|
||||
{# create duplicate of component_name, so we can split generics from @custom component templates in the index template below and overwrite the default @package when needed
|
||||
eg. having to replace unifiedlogs.generic@package with filestream.generic@package, but keep the ability to customize unifiedlogs.generic@custom and its ILM policy #}
|
||||
{% set custom_component_name = component_name %}
|
||||
|
||||
{# duplicate integration_type to assist with sometimes needing to overwrite component templates with 'logs-filestream.generic@package' (there is no metrics-filestream.generic@package) #}
|
||||
{% set generic_integration_type = integration_type %}
|
||||
|
||||
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
|
||||
|
||||
{# if its a .generic template make sure that a .generic@package for the integration exists. Else default to logs-filestream.generic@package #}
|
||||
{% if ".generic" in component_name and integration_type ~ component_name ~ "@package" not in INSTALLED_COMPONENT_TEMPLATES %}
|
||||
{# these generic templates by default are directed to index_pattern of 'logs-generic-*', overwrite that here to point to eg gcp_pubsub.generic-* #}
|
||||
{% set index_pattern = integration_type ~ component_name ~ "-*" %}
|
||||
{# includes use of .generic component template, but it doesn't exist in installed component templates. Redirect it to filestream.generic@package #}
|
||||
{% set component_name = "filestream.generic" %}
|
||||
{% set generic_integration_type = "logs-" %}
|
||||
{% endif %}
|
||||
|
||||
{# Default integration settings #}
|
||||
{% set integration_defaults = {
|
||||
"index_sorting": false,
|
||||
"index_template": {
|
||||
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"composed_of": [generic_integration_type ~ component_name ~ "@package", integration_type ~ custom_component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"data_stream": {
|
||||
"allow_custom_routing": false,
|
||||
"hidden": false
|
||||
},
|
||||
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
||||
"ignore_missing_component_templates": [integration_type ~ custom_component_name ~ "@custom"],
|
||||
"index_patterns": [index_pattern],
|
||||
"priority": 501,
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ custom_component_name ~ "-logs"},
|
||||
"number_of_replicas": 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,33 +135,9 @@ elastic_fleet_bulk_package_install() {
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_get_package_list_by_type() {
|
||||
if ! output=$(fleet_api "epm/packages"); then
|
||||
elastic_fleet_installed_packages() {
|
||||
if ! fleet_api "epm/packages/installed?perPage=500"; then
|
||||
return 1
|
||||
else
|
||||
is_integration=$(jq '[.items[] | select(.type=="integration") | .name ]' <<< "$output")
|
||||
is_input=$(jq '[.items[] | select(.type=="input") | .name ]' <<< "$output")
|
||||
is_content=$(jq '[.items[] | select(.type=="content") | .name ]' <<< "$output")
|
||||
jq -n --argjson is_integration "${is_integration:-[]}" \
|
||||
--argjson is_input "${is_input:-[]}" \
|
||||
--argjson is_content "${is_content:-[]}" \
|
||||
'{"integration": $is_integration,"input": $is_input, "content": $is_content}'
|
||||
fi
|
||||
}
|
||||
elastic_fleet_installed_packages_components() {
|
||||
package_type=${1,,}
|
||||
if [[ "$package_type" != "integration" && "$package_type" != "input" && "$package_type" != "content" ]]; then
|
||||
echo "Error: Invalid package type ${package_type}. Valid types are 'integration', 'input', or 'content'."
|
||||
return 1
|
||||
fi
|
||||
|
||||
packages_by_type=$(elastic_fleet_get_package_list_by_type)
|
||||
packages=$(jq --arg package_type "$package_type" '.[$package_type]' <<< "$packages_by_type")
|
||||
|
||||
if ! output=$(fleet_api "epm/packages/installed?perPage=500"); then
|
||||
return 1
|
||||
else
|
||||
jq -c --argjson packages "$packages" '[.items[] | select(.name | IN($packages[])) | {name: .name, dataStreams: .dataStreams}]' <<< "$output"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,6 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
|
||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
{# Optionally override Elasticsearch version for Elastic Agent patch releases #}
|
||||
{%- if ELASTICFLEETDEFAULTS.elasticfleet.patch_version is defined %}
|
||||
{%- do ELASTICSEARCHDEFAULTS.update({'elasticsearch': {'version': ELASTICFLEETDEFAULTS.elasticfleet.patch_version}}) %}
|
||||
{%- endif %}
|
||||
|
||||
# Only run on Managers
|
||||
if ! is_manager_node; then
|
||||
|
||||
@@ -18,9 +18,7 @@ INSTALLED_PACKAGE_LIST=/tmp/esfleet_installed_packages.json
|
||||
BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
|
||||
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
|
||||
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
|
||||
INTEGRATION_PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
|
||||
INPUT_PACKAGE_COMPONENTS=/opt/so/state/esfleet_input_package_components.json
|
||||
CONTENT_PACKAGE_COMPONENTS=/opt/so/state/esfleet_content_package_components.json
|
||||
PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
|
||||
COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json
|
||||
|
||||
PENDING_UPDATE=false
|
||||
@@ -181,13 +179,10 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
else
|
||||
echo "Elastic integrations don't appear to need installation/updating..."
|
||||
fi
|
||||
# Write out file for generating index/component/ilm templates, keeping each package type separate
|
||||
for package_type in "INTEGRATION" "INPUT" "CONTENT"; do
|
||||
if latest_installed_package_list=$(elastic_fleet_installed_packages_components "$package_type"); then
|
||||
outfile="${package_type}_PACKAGE_COMPONENTS"
|
||||
echo $latest_installed_package_list > "${!outfile}"
|
||||
fi
|
||||
done
|
||||
# Write out file for generating index/component/ilm templates
|
||||
if latest_installed_package_list=$(elastic_fleet_installed_packages); then
|
||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
||||
fi
|
||||
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
|
||||
# Refresh installed component template list
|
||||
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
|
||||
|
||||
@@ -1,164 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS, SO_MANAGED_INDICES %}
|
||||
{% if GLOBALS.role != 'so-heavynode' %}
|
||||
{% from 'elasticsearch/template.map.jinja' import ALL_ADDON_SETTINGS %}
|
||||
{% endif %}
|
||||
|
||||
escomponenttemplates:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/elasticsearch/templates/component
|
||||
- source: salt://elasticsearch/templates/component
|
||||
- user: 930
|
||||
- group: 939
|
||||
- clean: True
|
||||
- onchanges_in:
|
||||
- file: so-elasticsearch-templates-reload
|
||||
- show_changes: False
|
||||
|
||||
# Clean up legacy and non-SO managed templates from the elasticsearch/templates/index/ directory
|
||||
so_index_template_dir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elasticsearch/templates/index
|
||||
- clean: True
|
||||
{%- if SO_MANAGED_INDICES %}
|
||||
- require:
|
||||
{%- for index in SO_MANAGED_INDICES %}
|
||||
- file: so_index_template_{{index}}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
# Auto-generate index templates for SO managed indices (directly defined in elasticsearch/defaults.yaml)
|
||||
# These index templates are for the core SO datasets and are always required
|
||||
{% for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||
{% if settings.index_template is defined %}
|
||||
so_index_template_{{index}}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/templates/index/{{ index }}-template.json
|
||||
- source: salt://elasticsearch/base-template.json.jinja
|
||||
- defaults:
|
||||
TEMPLATE_CONFIG: {{ settings.index_template }}
|
||||
- template: jinja
|
||||
- onchanges_in:
|
||||
- file: so-elasticsearch-templates-reload
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% if GLOBALS.role != "so-heavynode" %}
|
||||
# Auto-generate optional index templates for integration | input | content packages
|
||||
# These index templates are not used by default (until user adds package to an agent policy).
|
||||
# Pre-configured with standard defaults, and incorporated into SOC configuration for user customization.
|
||||
{% for index,settings in ALL_ADDON_SETTINGS.items() %}
|
||||
{% if settings.index_template is defined %}
|
||||
addon_index_template_{{index}}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/templates/addon-index/{{ index }}-template.json
|
||||
- source: salt://elasticsearch/base-template.json.jinja
|
||||
- defaults:
|
||||
TEMPLATE_CONFIG: {{ settings.index_template }}
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
- onchanges_in:
|
||||
- file: addon-elasticsearch-templates-reload
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
so-es-cluster-settings:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-cluster-settings
|
||||
- cwd: /opt/so
|
||||
- template: jinja
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
- http: wait_for_so-elasticsearch
|
||||
{% endif %}
|
||||
|
||||
# heavynodes will only load ILM policies for SO managed indices. (Indicies defined in elasticsearch/defaults.yaml)
|
||||
so-elasticsearch-ilm-policy-load:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-ilm-policy-load
|
||||
- cwd: /opt/so
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: so-elasticsearch-ilm-policy-load-script
|
||||
- onchanges:
|
||||
- file: so-elasticsearch-ilm-policy-load-script
|
||||
|
||||
so-elasticsearch-templates-reload:
|
||||
file.absent:
|
||||
- name: /opt/so/state/estemplates.txt
|
||||
|
||||
addon-elasticsearch-templates-reload:
|
||||
file.absent:
|
||||
- name: /opt/so/state/addon_estemplates.txt
|
||||
|
||||
# so-elasticsearch-templates-load will have its first successful run during the 'so-elastic-fleet-setup' script
|
||||
so-elasticsearch-templates:
|
||||
cmd.run:
|
||||
{%- if GLOBALS.role == "so-heavynode" %}
|
||||
- name: /usr/sbin/so-elasticsearch-templates-load --heavynode
|
||||
{%- else %}
|
||||
- name: /usr/sbin/so-elasticsearch-templates-load
|
||||
{%- endif %}
|
||||
- cwd: /opt/so
|
||||
- template: jinja
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
|
||||
so-elasticsearch-pipelines:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-pipelines {{ GLOBALS.hostname }}
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: so-elasticsearch-pipelines-script
|
||||
|
||||
so-elasticsearch-roles-load:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-roles-load
|
||||
- cwd: /opt/so
|
||||
- template: jinja
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
|
||||
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% if ELASTICSEARCHMERGED.index_clean %}
|
||||
{% set ap = "present" %}
|
||||
{% else %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
so-elasticsearch-indices-delete:
|
||||
cron.{{ap}}:
|
||||
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
|
||||
- identifier: so-elasticsearch-indices-delete
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
- hour: '*'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -66,8 +66,6 @@ so-elasticsearch-ilm-policy-load-script:
|
||||
- group: 939
|
||||
- mode: 754
|
||||
- template: jinja
|
||||
- defaults:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
- show_changes: False
|
||||
|
||||
so-elasticsearch-pipelines-script:
|
||||
@@ -93,13 +91,6 @@ estemplatedir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
esaddontemplatedir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elasticsearch/templates/addon-index
|
||||
- user: 930
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
esrolesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elasticsearch/roles
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
elasticsearch:
|
||||
enabled: false
|
||||
version: 9.3.3
|
||||
version: 9.0.8
|
||||
index_clean: true
|
||||
vm:
|
||||
max_map_count: 1048576
|
||||
|
||||
+122
-14
@@ -10,6 +10,8 @@
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
|
||||
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
@@ -17,9 +19,6 @@ include:
|
||||
- elasticsearch.ssl
|
||||
- elasticsearch.config
|
||||
- elasticsearch.sostatus
|
||||
{%- if GLOBALS.role != 'so-searchode' %}
|
||||
- elasticsearch.cluster
|
||||
{%- endif%}
|
||||
|
||||
so-elasticsearch:
|
||||
docker_container.running:
|
||||
@@ -107,19 +106,128 @@ delete_so-elasticsearch_so-status.disabled:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-elasticsearch$
|
||||
|
||||
wait_for_so-elasticsearch:
|
||||
http.wait_for_successful_query:
|
||||
- name: "https://localhost:9200/"
|
||||
- username: 'so_elastic'
|
||||
- password: '{{ ELASTICSEARCHMERGED.auth.users.so_elastic_user.pass }}'
|
||||
- ssl: True
|
||||
- verify_ssl: False
|
||||
- status: 200
|
||||
- wait_for: 300
|
||||
- request_interval: 15
|
||||
- backend: requests
|
||||
{% if GLOBALS.role != "so-searchnode" %}
|
||||
escomponenttemplates:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/elasticsearch/templates/component
|
||||
- source: salt://elasticsearch/templates/component
|
||||
- user: 930
|
||||
- group: 939
|
||||
- clean: True
|
||||
- onchanges_in:
|
||||
- file: so-elasticsearch-templates-reload
|
||||
- show_changes: False
|
||||
|
||||
# Auto-generate templates from defaults file
|
||||
{% for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||
{% if settings.index_template is defined %}
|
||||
es_index_template_{{index}}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/templates/index/{{ index }}-template.json
|
||||
- source: salt://elasticsearch/base-template.json.jinja
|
||||
- defaults:
|
||||
TEMPLATE_CONFIG: {{ settings.index_template }}
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
- onchanges_in:
|
||||
- file: so-elasticsearch-templates-reload
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% if TEMPLATES %}
|
||||
# Sync custom templates to /opt/so/conf/elasticsearch/templates
|
||||
{% for TEMPLATE in TEMPLATES %}
|
||||
es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}:
|
||||
file.managed:
|
||||
- source: salt://elasticsearch/templates/index/{{TEMPLATE}}
|
||||
{% if 'jinja' in TEMPLATE.split('.')[-1] %}
|
||||
- name: /opt/so/conf/elasticsearch/templates/index/{{TEMPLATE.split('/')[1] | replace(".jinja", "")}}
|
||||
- template: jinja
|
||||
{% else %}
|
||||
- name: /opt/so/conf/elasticsearch/templates/index/{{TEMPLATE.split('/')[1]}}
|
||||
{% endif %}
|
||||
- user: 930
|
||||
- group: 939
|
||||
- show_changes: False
|
||||
- onchanges_in:
|
||||
- file: so-elasticsearch-templates-reload
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
so-es-cluster-settings:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-cluster-settings
|
||||
- cwd: /opt/so
|
||||
- template: jinja
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
{% endif %}
|
||||
|
||||
so-elasticsearch-ilm-policy-load:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-ilm-policy-load
|
||||
- cwd: /opt/so
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: so-elasticsearch-ilm-policy-load-script
|
||||
- onchanges:
|
||||
- file: so-elasticsearch-ilm-policy-load-script
|
||||
|
||||
so-elasticsearch-templates-reload:
|
||||
file.absent:
|
||||
- name: /opt/so/state/estemplates.txt
|
||||
|
||||
so-elasticsearch-templates:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-templates-load
|
||||
- cwd: /opt/so
|
||||
- template: jinja
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
|
||||
so-elasticsearch-pipelines:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-pipelines {{ GLOBALS.hostname }}
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: so-elasticsearch-pipelines-script
|
||||
|
||||
so-elasticsearch-roles-load:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-roles-load
|
||||
- cwd: /opt/so
|
||||
- template: jinja
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
|
||||
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% if ELASTICSEARCHMERGED.index_clean %}
|
||||
{% set ap = "present" %}
|
||||
{% else %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
so-elasticsearch-indices-delete:
|
||||
cron.{{ap}}:
|
||||
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
|
||||
- identifier: so-elasticsearch-indices-delete
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
- hour: '*'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
|
||||
+13
-74
@@ -10,28 +10,24 @@
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_ecs_version_f5923549",
|
||||
"field": "ecs.version",
|
||||
"value": "8.17.0"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_observer_vendor_ad9d35cc",
|
||||
"field": "observer.vendor",
|
||||
"value": "netgate"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_observer_type_5dddf3ba",
|
||||
"field": "observer.type",
|
||||
"value": "firewall"
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_message_to_event_original_56a77271",
|
||||
"field": "message",
|
||||
"target_field": "event.original",
|
||||
"ignore_missing": true,
|
||||
@@ -40,14 +36,12 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_event_kind_de80643c",
|
||||
"field": "event.kind",
|
||||
"value": "event"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_event_timezone_4ca44cac",
|
||||
"field": "event.timezone",
|
||||
"value": "{{{_tmp.tz_offset}}}",
|
||||
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
|
||||
@@ -55,7 +49,6 @@
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"tag": "grok_event_original_27d9c8c7",
|
||||
"description": "Parse syslog header",
|
||||
"field": "event.original",
|
||||
"patterns": [
|
||||
@@ -79,7 +72,6 @@
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"tag": "date__tmp_timestamp8601_to_timestamp_6ac9d3ce",
|
||||
"if": "ctx._tmp.timestamp8601 != null",
|
||||
"field": "_tmp.timestamp8601",
|
||||
"target_field": "@timestamp",
|
||||
@@ -90,7 +82,6 @@
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"tag": "date__tmp_timestamp_to_timestamp_f21e536e",
|
||||
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
|
||||
"field": "_tmp.timestamp",
|
||||
"target_field": "@timestamp",
|
||||
@@ -104,7 +95,6 @@
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"tag": "grok_process_name_cef3d489",
|
||||
"description": "Set Event Provider",
|
||||
"field": "process.name",
|
||||
"patterns": [
|
||||
@@ -117,83 +107,71 @@
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag": "pipeline_e16851a7",
|
||||
"name": "logs-pfsense.log-1.25.2-firewall",
|
||||
"name": "logs-pfsense.log-1.23.1-firewall",
|
||||
"if": "ctx.event.provider == 'filterlog'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag": "pipeline_828590b5",
|
||||
"name": "logs-pfsense.log-1.25.2-openvpn",
|
||||
"name": "logs-pfsense.log-1.23.1-openvpn",
|
||||
"if": "ctx.event.provider == 'openvpn'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag": "pipeline_9d37039c",
|
||||
"name": "logs-pfsense.log-1.25.2-ipsec",
|
||||
"name": "logs-pfsense.log-1.23.1-ipsec",
|
||||
"if": "ctx.event.provider == 'charon'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag": "pipeline_ad56bbca",
|
||||
"name": "logs-pfsense.log-1.25.2-dhcp",
|
||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\", \"dnsmasq-dhcp\"].contains(ctx.event.provider)"
|
||||
"name": "logs-pfsense.log-1.23.1-dhcp",
|
||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag": "pipeline_dd85553d",
|
||||
"name": "logs-pfsense.log-1.25.2-unbound",
|
||||
"name": "logs-pfsense.log-1.23.1-unbound",
|
||||
"if": "ctx.event.provider == 'unbound'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag": "pipeline_720ed255",
|
||||
"name": "logs-pfsense.log-1.25.2-haproxy",
|
||||
"name": "logs-pfsense.log-1.23.1-haproxy",
|
||||
"if": "ctx.event.provider == 'haproxy'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag": "pipeline_456beba5",
|
||||
"name": "logs-pfsense.log-1.25.2-php-fpm",
|
||||
"name": "logs-pfsense.log-1.23.1-php-fpm",
|
||||
"if": "ctx.event.provider == 'php-fpm'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag": "pipeline_a0d89375",
|
||||
"name": "logs-pfsense.log-1.25.2-squid",
|
||||
"name": "logs-pfsense.log-1.23.1-squid",
|
||||
"if": "ctx.event.provider == 'squid'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag": "pipeline_c2f1ed55",
|
||||
"name": "logs-pfsense.log-1.25.2-snort",
|
||||
"name": "logs-pfsense.log-1.23.1-snort",
|
||||
"if": "ctx.event.provider == 'snort'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"tag":"pipeline_33db1c9e",
|
||||
"name": "logs-pfsense.log-1.25.2-suricata",
|
||||
"name": "logs-pfsense.log-1.23.1-suricata",
|
||||
"if": "ctx.event.provider == 'suricata'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"drop": {
|
||||
"tag": "drop_9d7c46f8",
|
||||
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dnsmasq-dhcp\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"snort\", \"suricata\"].contains(ctx.event?.provider)"
|
||||
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"snort\", \"suricata\"].contains(ctx.event?.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_event_category_4780a983",
|
||||
"field": "event.category",
|
||||
"value": "network",
|
||||
"if": "ctx.network != null"
|
||||
@@ -201,7 +179,6 @@
|
||||
},
|
||||
{
|
||||
"convert": {
|
||||
"tag": "convert_source_address_to_source_ip_f5632a20",
|
||||
"field": "source.address",
|
||||
"target_field": "source.ip",
|
||||
"type": "ip",
|
||||
@@ -211,7 +188,6 @@
|
||||
},
|
||||
{
|
||||
"convert": {
|
||||
"tag": "convert_destination_address_to_destination_ip_f1388f0c",
|
||||
"field": "destination.address",
|
||||
"target_field": "destination.ip",
|
||||
"type": "ip",
|
||||
@@ -221,7 +197,6 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_network_type_1f1d940a",
|
||||
"field": "network.type",
|
||||
"value": "ipv6",
|
||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
|
||||
@@ -229,7 +204,6 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_network_type_69deca38",
|
||||
"field": "network.type",
|
||||
"value": "ipv4",
|
||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
|
||||
@@ -237,7 +211,6 @@
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"tag": "geoip_source_ip_to_source_geo_da2e41b2",
|
||||
"field": "source.ip",
|
||||
"target_field": "source.geo",
|
||||
"ignore_missing": true
|
||||
@@ -245,7 +218,6 @@
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"tag": "geoip_destination_ip_to_destination_geo_ab5e2968",
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination.geo",
|
||||
"ignore_missing": true
|
||||
@@ -253,7 +225,6 @@
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"tag": "geoip_source_ip_to_source_as_28d69883",
|
||||
"ignore_missing": true,
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"field": "source.ip",
|
||||
@@ -266,7 +237,6 @@
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"tag": "geoip_destination_ip_to_destination_as_8a007787",
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination.as",
|
||||
@@ -279,7 +249,6 @@
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_source_as_asn_to_source_as_number_a917047d",
|
||||
"field": "source.as.asn",
|
||||
"target_field": "source.as.number",
|
||||
"ignore_missing": true
|
||||
@@ -287,7 +256,6 @@
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_source_as_organization_name_to_source_as_organization_name_f1362d0b",
|
||||
"field": "source.as.organization_name",
|
||||
"target_field": "source.as.organization.name",
|
||||
"ignore_missing": true
|
||||
@@ -295,7 +263,6 @@
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_destination_as_asn_to_destination_as_number_3b459fcd",
|
||||
"field": "destination.as.asn",
|
||||
"target_field": "destination.as.number",
|
||||
"ignore_missing": true
|
||||
@@ -303,7 +270,6 @@
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_destination_as_organization_name_to_destination_as_organization_name_814bd459",
|
||||
"field": "destination.as.organization_name",
|
||||
"target_field": "destination.as.organization.name",
|
||||
"ignore_missing": true
|
||||
@@ -311,14 +277,12 @@
|
||||
},
|
||||
{
|
||||
"community_id": {
|
||||
"tag": "community_id_d2308e7a",
|
||||
"target_field": "network.community_id",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"tag": "grok_observer_ingress_interface_name_968018d3",
|
||||
"field": "observer.ingress.interface.name",
|
||||
"patterns": [
|
||||
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
|
||||
@@ -329,7 +293,6 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_network_vlan_id_efd4d96a",
|
||||
"field": "network.vlan.id",
|
||||
"copy_from": "observer.ingress.vlan.id",
|
||||
"ignore_empty_value": true
|
||||
@@ -337,7 +300,6 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_ip_c1a6356b",
|
||||
"field": "related.ip",
|
||||
"value": "{{{destination.ip}}}",
|
||||
"allow_duplicates": false,
|
||||
@@ -346,7 +308,6 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_ip_8121c591",
|
||||
"field": "related.ip",
|
||||
"value": "{{{source.ip}}}",
|
||||
"allow_duplicates": false,
|
||||
@@ -355,7 +316,6 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_ip_53b62ed8",
|
||||
"field": "related.ip",
|
||||
"value": "{{{source.nat.ip}}}",
|
||||
"allow_duplicates": false,
|
||||
@@ -364,7 +324,6 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_hosts_6f162628",
|
||||
"field": "related.hosts",
|
||||
"value": "{{{destination.domain}}}",
|
||||
"if": "ctx.destination?.domain != null"
|
||||
@@ -372,7 +331,6 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_user_c036eec2",
|
||||
"field": "related.user",
|
||||
"value": "{{{user.name}}}",
|
||||
"if": "ctx.user?.name != null"
|
||||
@@ -380,7 +338,6 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_network_direction_cb1e3125",
|
||||
"field": "network.direction",
|
||||
"value": "{{{network.direction}}}bound",
|
||||
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
|
||||
@@ -388,7 +345,6 @@
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"tag": "remove_a82e20f2",
|
||||
"field": [
|
||||
"_tmp"
|
||||
],
|
||||
@@ -397,21 +353,11 @@
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"tag": "script_a7f2c062",
|
||||
"lang": "painless",
|
||||
"description": "This script processor iterates over the whole document to remove fields with null values.",
|
||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_preserve_original_event_on_error",
|
||||
"field": "tags",
|
||||
"value": "preserve_original_event",
|
||||
"allow_duplicates": false,
|
||||
"if": "ctx.error?.message != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "global@custom",
|
||||
@@ -459,14 +405,7 @@
|
||||
{
|
||||
"append": {
|
||||
"field": "error.message",
|
||||
"value": "Processor '{{{ _ingest.on_failure_processor_type }}}' {{#_ingest.on_failure_processor_tag}}with tag '{{{ _ingest.on_failure_processor_tag }}}' {{/_ingest.on_failure_processor_tag}}in pipeline '{{{ _ingest.pipeline }}}' failed with message '{{{ _ingest.on_failure_message }}}'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "tags",
|
||||
"value": "preserve_original_event",
|
||||
"allow_duplicates": false
|
||||
"value": "{{{ _ingest.on_failure_message }}}"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -45,7 +45,3 @@ appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.rolling.ref = rolling
|
||||
rootLogger.appenderRef.rolling_json.ref = rolling_json
|
||||
|
||||
# Suppress NotEntitledException WARNs (ES 9.3.3 bug)
|
||||
logger.entitlement_security.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.x-pack-security.org.elasticsearch.security.org.elasticsearch.xpack.security
|
||||
logger.entitlement_security.level = error
|
||||
@@ -14,42 +14,15 @@
|
||||
|
||||
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
||||
|
||||
{% set ALL_ADDON_INTEGRATION_DEFAULTS = {} %}
|
||||
{% set ALL_ADDON_SETTINGS_ORIG = {} %}
|
||||
{% set ALL_ADDON_SETTINGS_GLOBAL_OVERRIDES = {} %}
|
||||
{% set ALL_ADDON_SETTINGS = {} %}
|
||||
{# start generation of integration default index_settings #}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
|
||||
{# import integration type defaults #}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') %}
|
||||
{% set check_integration_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
|
||||
{% if check_integration_package_components.size > 1 %}
|
||||
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
||||
{% do ALL_ADDON_INTEGRATION_DEFAULTS.update(ADDON_INTEGRATION_DEFAULTS) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{# import input type defaults #}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_input_package_components.json') %}
|
||||
{% set check_input_package_components = salt['file.stats']('/opt/so/state/esfleet_input_package_components.json') %}
|
||||
{% if check_input_package_components.size > 1 %}
|
||||
{% from 'elasticfleet/input-defaults.map.jinja' import ADDON_INPUT_INTEGRATION_DEFAULTS %}
|
||||
{% do ALL_ADDON_INTEGRATION_DEFAULTS.update(ADDON_INPUT_INTEGRATION_DEFAULTS) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{# import content type defaults #}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_content_package_components.json') %}
|
||||
{% set check_content_package_components = salt['file.stats']('/opt/so/state/esfleet_content_package_components.json') %}
|
||||
{% if check_content_package_components.size > 1 %}
|
||||
{% from 'elasticfleet/content-defaults.map.jinja' import ADDON_CONTENT_INTEGRATION_DEFAULTS %}
|
||||
{% do ALL_ADDON_INTEGRATION_DEFAULTS.update(ADDON_CONTENT_INTEGRATION_DEFAULTS) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% for index, settings in ALL_ADDON_INTEGRATION_DEFAULTS.items() %}
|
||||
{% do ALL_ADDON_SETTINGS_ORIG.update({index: settings}) %}
|
||||
{% endfor %}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
|
||||
{% set check_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
|
||||
{% if check_package_components.size > 1 %}
|
||||
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
||||
{% for index, settings in ADDON_INTEGRATION_DEFAULTS.items() %}
|
||||
{% do ES_INDEX_SETTINGS_ORIG.update({index: settings}) %}
|
||||
{% endfor %}
|
||||
{% endif%}
|
||||
{% endif %}
|
||||
{# end generation of integration default index_settings #}
|
||||
|
||||
@@ -58,33 +31,25 @@
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update({index: salt['defaults.merge'](ELASTICSEARCHDEFAULTS.elasticsearch.index_settings[index], PILLAR_GLOBAL_OVERRIDES, in_place=False)}) %}
|
||||
{% endfor %}
|
||||
|
||||
{% if ALL_ADDON_SETTINGS_ORIG.keys() | length > 0 %}
|
||||
{% for index in ALL_ADDON_SETTINGS_ORIG.keys() %}
|
||||
{% do ALL_ADDON_SETTINGS_GLOBAL_OVERRIDES.update({index: salt['defaults.merge'](ALL_ADDON_SETTINGS_ORIG[index], PILLAR_GLOBAL_OVERRIDES, in_place=False)}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% set ES_INDEX_SETTINGS = {} %}
|
||||
{% macro create_final_index_template(DEFINED_SETTINGS, GLOBAL_OVERRIDES, FINAL_INDEX_SETTINGS) %}
|
||||
|
||||
{% do GLOBAL_OVERRIDES.update(salt['defaults.merge'](GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
|
||||
{% for index, settings in GLOBAL_OVERRIDES.items() %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
|
||||
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
|
||||
|
||||
{# prevent this action from being performed on custom defined indices. #}
|
||||
{# the custom defined index is not present in either of the dictionaries and fails to reder. #}
|
||||
{% if index in DEFINED_SETTINGS and index in GLOBAL_OVERRIDES %}
|
||||
{% if index in ES_INDEX_SETTINGS_ORIG and index in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES %}
|
||||
|
||||
{# dont merge policy from the global_overrides if policy isn't defined in the original index settingss #}
|
||||
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
|
||||
{% if not DEFINED_SETTINGS[index].policy is defined and GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% do GLOBAL_OVERRIDES[index].pop('policy') %}
|
||||
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
|
||||
{% endif %}
|
||||
|
||||
{# this prevents and index from inderiting a policy phase from global overrides if it wasnt defined in the defaults. #}
|
||||
{% if GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% for phase in GLOBAL_OVERRIDES[index].policy.phases.copy() %}
|
||||
{% if DEFINED_SETTINGS[index].policy.phases[phase] is not defined %}
|
||||
{% do GLOBAL_OVERRIDES[index].policy.phases.pop(phase) %}
|
||||
{% if ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% for phase in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.copy() %}
|
||||
{% if ES_INDEX_SETTINGS_ORIG[index].policy.phases[phase] is not defined %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.pop(phase) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -146,14 +111,5 @@
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% do FINAL_INDEX_SETTINGS.update({index | replace("_x_", "."): GLOBAL_OVERRIDES[index]}) %}
|
||||
{% endfor %}
|
||||
{% endmacro %}
|
||||
|
||||
{{ create_final_index_template(ES_INDEX_SETTINGS_ORIG, ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_SETTINGS) }}
|
||||
{{ create_final_index_template(ALL_ADDON_SETTINGS_ORIG, ALL_ADDON_SETTINGS_GLOBAL_OVERRIDES, ALL_ADDON_SETTINGS) }}
|
||||
|
||||
{% set SO_MANAGED_INDICES = [] %}
|
||||
{% for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||
{% do SO_MANAGED_INDICES.append(index) %}
|
||||
{% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %}
|
||||
{% endfor %}
|
||||
@@ -6,19 +6,8 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [[ -z "$1" ]]; then
|
||||
if output=$(so-elasticsearch-query "_component_template" --retry 3 --retry-delay 1 --fail); then
|
||||
jq '[.component_templates[] | .name] | sort' <<< "$output"
|
||||
else
|
||||
echo "Failed to retrieve component templates from Elasticsearch."
|
||||
exit 1
|
||||
fi
|
||||
if [ "$1" == "" ]; then
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://localhost:9200/_component_template | jq '.component_templates[] |.name'| sort
|
||||
else
|
||||
if output=$(so-elasticsearch-query "_component_template/$1" --retry 3 --retry-delay 1 --fail); then
|
||||
jq <<< "$output"
|
||||
else
|
||||
echo "Failed to retrieve component template '$1' from Elasticsearch."
|
||||
exit 1
|
||||
fi
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://localhost:9200/_component_template/$1 | jq
|
||||
fi
|
||||
@@ -1,253 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
SO_STATEFILE_SUCCESS=/opt/so/state/estemplates.txt
|
||||
ADDON_STATEFILE_SUCCESS=/opt/so/state/addon_estemplates.txt
|
||||
ELASTICSEARCH_TEMPLATES_DIR="/opt/so/conf/elasticsearch/templates"
|
||||
SO_TEMPLATES_DIR="${ELASTICSEARCH_TEMPLATES_DIR}/index"
|
||||
ADDON_TEMPLATES_DIR="${ELASTICSEARCH_TEMPLATES_DIR}/addon-index"
|
||||
SO_LOAD_FAILURES=0
|
||||
ADDON_LOAD_FAILURES=0
|
||||
SO_LOAD_FAILURES_NAMES=()
|
||||
ADDON_LOAD_FAILURES_NAMES=()
|
||||
IS_HEAVYNODE="false"
|
||||
FORCE="false"
|
||||
VERBOSE="false"
|
||||
SHOULD_EXIT_ON_FAILURE="true"
|
||||
|
||||
# If soup is running, ignore errors
|
||||
pgrep soup >/dev/null && SHOULD_EXIT_ON_FAILURE="false"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--heavynode)
|
||||
IS_HEAVYNODE="true"
|
||||
;;
|
||||
--force)
|
||||
FORCE="true"
|
||||
;;
|
||||
--verbose)
|
||||
VERBOSE="true"
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 [options]"
|
||||
echo "Options:"
|
||||
echo " --heavynode Only loads index templates specific to heavynodes"
|
||||
echo " --force Force reload all templates regardless of statefiles (default: false)"
|
||||
echo " --verbose Enable verbose output"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
load_template() {
|
||||
local uri="$1"
|
||||
local file="$2"
|
||||
|
||||
echo "Loading template file $file"
|
||||
if ! output=$(retry 3 3 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"); then
|
||||
echo "$output"
|
||||
|
||||
return 1
|
||||
|
||||
elif [[ "$VERBOSE" == "true" ]]; then
|
||||
echo "$output"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
check_required_component_template_exists() {
|
||||
local required
|
||||
local missing
|
||||
local file=$1
|
||||
|
||||
required=$(jq '[((.composed_of //[]) - (.ignore_missing_component_templates // []))[]]' "$file")
|
||||
missing=$(jq -n --argjson required "$required" --argjson component_templates "$component_templates" '(($required) - ($component_templates))')
|
||||
|
||||
if [[ $(jq length <<<"$missing") -gt 0 ]]; then
|
||||
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_heavynode_compatiable_index_template() {
|
||||
# The only templates that are relevant to heavynodes are from datasets defined in elasticagent/files/elastic-agent.yml.jinja.
|
||||
# Heavynodes do not have fleet server packages installed and do not support elastic agents reporting directly to them.
|
||||
local -A heavynode_index_templates=(
|
||||
["so-import"]=1
|
||||
["so-syslog"]=1
|
||||
["so-logs-soc"]=1
|
||||
["so-suricata"]=1
|
||||
["so-suricata.alerts"]=1
|
||||
["so-zeek"]=1
|
||||
["so-strelka"]=1
|
||||
)
|
||||
|
||||
local template_name="$1"
|
||||
|
||||
if [[ ! -v heavynode_index_templates["$template_name"] ]]; then
|
||||
|
||||
return 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
load_component_templates() {
|
||||
local printed_name="$1"
|
||||
local pattern="${ELASTICSEARCH_TEMPLATES_DIR}/component/$2"
|
||||
local append_mappings="${3:-"false"}"
|
||||
|
||||
# current state of nullglob shell option
|
||||
shopt -q nullglob && nullglob_set=1 || nullglob_set=0
|
||||
|
||||
shopt -s nullglob
|
||||
echo -e "\nLoading $printed_name component templates...\n"
|
||||
for component in "$pattern"/*.json; do
|
||||
tmpl_name=$(basename "${component%.json}")
|
||||
|
||||
if [[ "$append_mappings" == "true" ]]; then
|
||||
# avoid duplicating "-mappings" if it already exists in the component template filename
|
||||
tmpl_name="${tmpl_name%-mappings}-mappings"
|
||||
fi
|
||||
|
||||
if ! load_template "_component_template/${tmpl_name}" "$component"; then
|
||||
SO_LOAD_FAILURES=$((SO_LOAD_FAILURES + 1))
|
||||
SO_LOAD_FAILURES_NAMES+=("$component")
|
||||
fi
|
||||
done
|
||||
|
||||
# restore nullglob shell option if needed
|
||||
if [[ $nullglob_set -eq 1 ]]; then
|
||||
shopt -u nullglob
|
||||
fi
|
||||
}
|
||||
|
||||
check_elasticsearch_responsive() {
|
||||
# Cannot load templates if Elasticsearch is not responding.
|
||||
# NOTE: Slightly faster exit w/ failure than previous "retry 240 1" if there is a problem with Elasticsearch the
|
||||
# script should exit sooner rather than hang at the 'so-elasticsearch-templates' salt state.
|
||||
retry 3 15 "so-elasticsearch-query / --output /dev/null --fail" ||
|
||||
fail "Elasticsearch is not responding. Please review Elasticsearch logs /opt/so/log/elasticsearch/securityonion.log for more details. Additionally, consider running so-elasticsearch-troubleshoot."
|
||||
}
|
||||
|
||||
if [[ "$FORCE" == "true" || ! -f "$SO_STATEFILE_SUCCESS" ]]; then
|
||||
check_elasticsearch_responsive
|
||||
|
||||
if [[ "$IS_HEAVYNODE" == "false" ]]; then
|
||||
# TODO: Better way to check if fleet server is installed vs checking for Elastic Defend component template.
|
||||
fleet_check="logs-endpoint.alerts@package"
|
||||
if ! so-elasticsearch-query "_component_template/$fleet_check" --output /dev/null --retry 5 --retry-delay 3 --fail; then
|
||||
# This check prevents so-elasticsearch-templates-load from running before so-elastic-fleet-setup has run.
|
||||
echo -e "\nPackage $fleet_check not yet installed. Fleet Server may not be fully configured yet."
|
||||
# Fleet Server is required because some SO index templates depend on components installed via
|
||||
# specific integrations eg Elastic Defend. These are components that we do not manually create / manage
|
||||
# via /opt/so/saltstack/salt/elasticsearch/templates/component/
|
||||
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# load_component_templates "Name" "directory" "append '-mappings'?"
|
||||
load_component_templates "ECS" "ecs" "true"
|
||||
load_component_templates "Elastic Agent" "elastic-agent"
|
||||
load_component_templates "Security Onion" "so"
|
||||
|
||||
component_templates=$(so-elasticsearch-component-templates-list)
|
||||
echo -e "Loading Security Onion index templates...\n"
|
||||
for so_idx_tmpl in "${SO_TEMPLATES_DIR}"/*.json; do
|
||||
tmpl_name=$(basename "${so_idx_tmpl%-template.json}")
|
||||
|
||||
if [[ "$IS_HEAVYNODE" == "true" ]]; then
|
||||
# TODO: Better way to load only heavynode specific templates
|
||||
if ! check_heavynode_compatiable_index_template "$tmpl_name"; then
|
||||
if [[ "$VERBOSE" == "true" ]]; then
|
||||
echo "Skipping over $so_idx_tmpl, template is not a heavynode specific index template."
|
||||
fi
|
||||
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
if check_required_component_template_exists "$so_idx_tmpl"; then
|
||||
if ! load_template "_index_template/$tmpl_name" "$so_idx_tmpl"; then
|
||||
SO_LOAD_FAILURES=$((SO_LOAD_FAILURES + 1))
|
||||
SO_LOAD_FAILURES_NAMES+=("$so_idx_tmpl")
|
||||
fi
|
||||
else
|
||||
echo "Skipping over $so_idx_tmpl due to missing required component template(s)."
|
||||
SO_LOAD_FAILURES=$((SO_LOAD_FAILURES + 1))
|
||||
SO_LOAD_FAILURES_NAMES+=("$so_idx_tmpl")
|
||||
|
||||
continue
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $SO_LOAD_FAILURES -eq 0 ]]; then
|
||||
echo "All Security Onion core templates loaded successfully."
|
||||
|
||||
touch "$SO_STATEFILE_SUCCESS"
|
||||
else
|
||||
echo "Encountered $SO_LOAD_FAILURES failure(s) loading templates:"
|
||||
for failed_template in "${SO_LOAD_FAILURES_NAMES[@]}"; do
|
||||
echo " - $failed_template"
|
||||
done
|
||||
if [[ "$SHOULD_EXIT_ON_FAILURE" == "true" ]]; then
|
||||
fail "Failed to load all Security Onion core templates successfully."
|
||||
fi
|
||||
fi
|
||||
else
|
||||
|
||||
echo "Security Onion core templates already loaded"
|
||||
fi
|
||||
|
||||
# Start loading addon templates
|
||||
if [[ (-d "$ADDON_TEMPLATES_DIR" && -f "$SO_STATEFILE_SUCCESS" && "$IS_HEAVYNODE" == "false" && ! -f "$ADDON_STATEFILE_SUCCESS") || (-d "$ADDON_TEMPLATES_DIR" && "$IS_HEAVYNODE" == "false" && "$FORCE" == "true") ]]; then
|
||||
|
||||
check_elasticsearch_responsive
|
||||
|
||||
echo -e "\nLoading addon integration index templates...\n"
|
||||
component_templates=$(so-elasticsearch-component-templates-list)
|
||||
|
||||
for addon_idx_tmpl in "${ADDON_TEMPLATES_DIR}"/*.json; do
|
||||
tmpl_name=$(basename "${addon_idx_tmpl%-template.json}")
|
||||
|
||||
if check_required_component_template_exists "$addon_idx_tmpl"; then
|
||||
if ! load_template "_index_template/${tmpl_name}" "$addon_idx_tmpl"; then
|
||||
ADDON_LOAD_FAILURES=$((ADDON_LOAD_FAILURES + 1))
|
||||
ADDON_LOAD_FAILURES_NAMES+=("$addon_idx_tmpl")
|
||||
fi
|
||||
else
|
||||
echo "Skipping over $addon_idx_tmpl due to missing required component template(s)."
|
||||
ADDON_LOAD_FAILURES=$((ADDON_LOAD_FAILURES + 1))
|
||||
ADDON_LOAD_FAILURES_NAMES+=("$addon_idx_tmpl")
|
||||
|
||||
continue
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ADDON_LOAD_FAILURES -eq 0 ]]; then
|
||||
echo "All addon integration templates loaded successfully."
|
||||
|
||||
touch "$ADDON_STATEFILE_SUCCESS"
|
||||
else
|
||||
echo "Encountered $ADDON_LOAD_FAILURES failure(s) loading addon integration templates:"
|
||||
for failed_template in "${ADDON_LOAD_FAILURES_NAMES[@]}"; do
|
||||
echo " - $failed_template"
|
||||
done
|
||||
if [[ "$SHOULD_EXIT_ON_FAILURE" == "true" ]]; then
|
||||
fail "Failed to load all addon integration templates successfully."
|
||||
fi
|
||||
fi
|
||||
|
||||
elif [[ ! -f "$SO_STATEFILE_SUCCESS" && "$IS_HEAVYNODE" == "false" ]]; then
|
||||
echo "Skipping loading addon integration templates until Security Onion core templates have been loaded."
|
||||
|
||||
elif [[ -f "$ADDON_STATEFILE_SUCCESS" && "$IS_HEAVYNODE" == "false" && "$FORCE" == "false" ]]; then
|
||||
echo "Addon integration templates already loaded"
|
||||
fi
|
||||
@@ -7,9 +7,6 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
{%- from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
|
||||
{%- if GLOBALS.role != "so-heavynode" %}
|
||||
{%- from 'elasticsearch/template.map.jinja' import ALL_ADDON_SETTINGS %}
|
||||
{%- endif %}
|
||||
|
||||
{%- for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||
{%- if settings.policy is defined %}
|
||||
@@ -36,13 +33,3 @@
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
echo
|
||||
{%- if GLOBALS.role != "so-heavynode" %}
|
||||
{%- for index, settings in ALL_ADDON_SETTINGS.items() %}
|
||||
{%- if settings.policy is defined %}
|
||||
echo
|
||||
echo "Setting up {{ index }}-logs policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
@@ -0,0 +1,165 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt
|
||||
STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
|
||||
|
||||
if [[ -f $STATE_FILE_INITIAL ]]; then
|
||||
# The initial template load has already run. As this is a subsequent load, all dependencies should
|
||||
# already be satisified. Therefore, immediately exit/abort this script upon any template load failure
|
||||
# since this is an unrecoverable failure.
|
||||
should_exit_on_failure=1
|
||||
else
|
||||
# This is the initial template load, and there likely are some components not yet setup in Elasticsearch.
|
||||
# Therefore load as many templates as possible at this time and if an error occurs proceed to the next
|
||||
# template. But if at least one template fails to load do not mark the templates as having been loaded.
|
||||
# This will allow the next load to resume the load of the templates that failed to load initially.
|
||||
should_exit_on_failure=0
|
||||
echo "This is the initial template load"
|
||||
fi
|
||||
|
||||
# If soup is running, ignore errors
|
||||
pgrep soup > /dev/null && should_exit_on_failure=0
|
||||
|
||||
load_failures=0
|
||||
|
||||
load_template() {
|
||||
uri=$1
|
||||
file=$2
|
||||
|
||||
echo "Loading template file $i"
|
||||
if ! retry 3 1 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"; then
|
||||
if [[ $should_exit_on_failure -eq 1 ]]; then
|
||||
fail "Could not load template file: $file"
|
||||
else
|
||||
load_failures=$((load_failures+1))
|
||||
echo "Incremented load failure counter: $load_failures"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
echo "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load."
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
{% if GLOBALS.role != 'so-heavynode' %}
|
||||
if [ -f /usr/sbin/so-elastic-fleet-common ]; then
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
fi
|
||||
{% endif %}
|
||||
|
||||
default_conf_dir=/opt/so/conf
|
||||
|
||||
# Define a default directory to load pipelines from
|
||||
ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/"
|
||||
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
file="/opt/so/conf/elasticsearch/templates/index/so-common-template.json"
|
||||
{% else %}
|
||||
file="/usr/sbin/so-elastic-fleet-common"
|
||||
{% endif %}
|
||||
|
||||
if [ -f "$file" ]; then
|
||||
# Wait for ElasticSearch to initialize
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
{% if GLOBALS.role != 'so-heavynode' %}
|
||||
TEMPLATE="logs-endpoint.alerts@package"
|
||||
INSTALLED=$(so-elasticsearch-query _component_template/$TEMPLATE | jq -r .component_templates[0].name)
|
||||
if [ "$INSTALLED" != "$TEMPLATE" ]; then
|
||||
echo
|
||||
echo "Packages not yet installed."
|
||||
echo
|
||||
exit 0
|
||||
fi
|
||||
{% endif %}
|
||||
|
||||
touch $STATE_FILE_INITIAL
|
||||
|
||||
cd ${ELASTICSEARCH_TEMPLATES}/component/ecs
|
||||
|
||||
echo "Loading ECS component templates..."
|
||||
for i in *; do
|
||||
TEMPLATE=$(echo $i | cut -d '.' -f1)
|
||||
load_template "_component_template/${TEMPLATE}-mappings" "$i"
|
||||
done
|
||||
echo
|
||||
|
||||
cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent
|
||||
|
||||
echo "Loading Elastic Agent component templates..."
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
component_pattern="so-*"
|
||||
{% else %}
|
||||
component_pattern="*"
|
||||
{% endif %}
|
||||
for i in $component_pattern; do
|
||||
TEMPLATE=${i::-5}
|
||||
load_template "_component_template/$TEMPLATE" "$i"
|
||||
done
|
||||
echo
|
||||
|
||||
# Load SO-specific component templates
|
||||
cd ${ELASTICSEARCH_TEMPLATES}/component/so
|
||||
|
||||
echo "Loading Security Onion component templates..."
|
||||
for i in *; do
|
||||
TEMPLATE=$(echo $i | cut -d '.' -f1);
|
||||
load_template "_component_template/$TEMPLATE" "$i"
|
||||
done
|
||||
echo
|
||||
|
||||
# Load SO index templates
|
||||
cd ${ELASTICSEARCH_TEMPLATES}/index
|
||||
|
||||
echo "Loading Security Onion index templates..."
|
||||
shopt -s extglob
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*|*endpoint*|*elasticsearch*|*generic*|*fleet_server*|*soc*)"
|
||||
{% else %}
|
||||
pattern="*"
|
||||
{% endif %}
|
||||
# Index templates will be skipped if the following conditions are met:
|
||||
# 1. The template is part of the "so-logs-" template group
|
||||
# 2. The template name does not correlate to at least one existing component template
|
||||
# In this situation, the script will treat the skipped template as a temporary failure
|
||||
# and allow the templates to be loaded again on the next run or highstate, whichever
|
||||
# comes first.
|
||||
COMPONENT_LIST=$(so-elasticsearch-component-templates-list)
|
||||
for i in $pattern; do
|
||||
TEMPLATE=${i::-14}
|
||||
COMPONENT_PATTERN=${TEMPLATE:3}
|
||||
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then
|
||||
load_failures=$((load_failures+1))
|
||||
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
|
||||
else
|
||||
load_template "_index_template/$TEMPLATE" "$i"
|
||||
fi
|
||||
done
|
||||
else
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
echo "Common template does not exist. Exiting..."
|
||||
{% else %}
|
||||
echo "Elastic Fleet not configured. Exiting..."
|
||||
{% endif %}
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cd - >/dev/null
|
||||
|
||||
if [[ $load_failures -eq 0 ]]; then
|
||||
echo "All templates loaded successfully"
|
||||
touch $STATE_FILE_SUCCESS
|
||||
else
|
||||
echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate"
|
||||
fi
|
||||
else
|
||||
echo "Templates already loaded"
|
||||
fi
|
||||
@@ -11,15 +11,8 @@
|
||||
'so-kratos',
|
||||
'so-hydra',
|
||||
'so-nginx',
|
||||
'so-postgres',
|
||||
'so-redis',
|
||||
'so-soc',
|
||||
'so-strelka-coordinator',
|
||||
'so-strelka-gatekeeper',
|
||||
'so-strelka-frontend',
|
||||
'so-strelka-backend',
|
||||
'so-strelka-manager',
|
||||
'so-strelka-filestream'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role in ['so-manager', 'so-standalone','so-managersearch', 'so-managerhype'] %}
|
||||
@@ -35,15 +28,8 @@
|
||||
'so-hydra',
|
||||
'so-logstash',
|
||||
'so-nginx',
|
||||
'so-postgres',
|
||||
'so-redis',
|
||||
'so-soc',
|
||||
'so-strelka-coordinator',
|
||||
'so-strelka-gatekeeper',
|
||||
'so-strelka-frontend',
|
||||
'so-strelka-backend',
|
||||
'so-strelka-manager',
|
||||
'so-strelka-filestream'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role == 'so-searchnode' %}
|
||||
@@ -60,12 +46,6 @@
|
||||
'so-logstash',
|
||||
'so-nginx',
|
||||
'so-redis',
|
||||
'so-strelka-coordinator',
|
||||
'so-strelka-gatekeeper',
|
||||
'so-strelka-frontend',
|
||||
'so-strelka-backend',
|
||||
'so-strelka-manager',
|
||||
'so-strelka-filestream'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role == 'so-import' %}
|
||||
@@ -79,7 +59,6 @@
|
||||
'so-kratos',
|
||||
'so-hydra',
|
||||
'so-nginx',
|
||||
'so-postgres',
|
||||
'so-soc'
|
||||
] %}
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ firewall:
|
||||
self: []
|
||||
sensor: []
|
||||
standalone: []
|
||||
strelka_frontend: []
|
||||
syslog: []
|
||||
desktop: []
|
||||
customhostgroup0: []
|
||||
@@ -98,10 +97,6 @@ firewall:
|
||||
tcp:
|
||||
- 8086
|
||||
udp: []
|
||||
postgres:
|
||||
tcp:
|
||||
- 5432
|
||||
udp: []
|
||||
kafka_controller:
|
||||
tcp:
|
||||
- 9093
|
||||
@@ -144,10 +139,6 @@ firewall:
|
||||
tcp:
|
||||
- 22
|
||||
udp: []
|
||||
strelka_frontend:
|
||||
tcp:
|
||||
- 57314
|
||||
udp: []
|
||||
syslog:
|
||||
tcp:
|
||||
- 514
|
||||
@@ -197,7 +188,6 @@ firewall:
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
- postgres
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- localrules
|
||||
@@ -227,9 +217,6 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
@@ -384,7 +371,6 @@ firewall:
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
- postgres
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- docker_registry
|
||||
@@ -596,7 +582,6 @@ firewall:
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
- postgres
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- docker_registry
|
||||
@@ -806,7 +791,6 @@ firewall:
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
- postgres
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- docker_registry
|
||||
@@ -1019,7 +1003,6 @@ firewall:
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
- postgres
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- docker_registry
|
||||
@@ -1033,7 +1016,6 @@ firewall:
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- endgame
|
||||
- strelka_frontend
|
||||
- localrules
|
||||
fleet:
|
||||
portgroups:
|
||||
@@ -1122,9 +1104,6 @@ firewall:
|
||||
- external_suricata
|
||||
external_kafka:
|
||||
portgroups: []
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
desktop:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
@@ -1318,9 +1297,6 @@ firewall:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -1410,9 +1386,6 @@ firewall:
|
||||
- syslog
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% from 'telegraf/map.jinja' import TELEGRAFMERGED %}
|
||||
{% import_yaml 'firewall/defaults.yaml' as FIREWALL_DEFAULT %}
|
||||
|
||||
{# add our ip to self #}
|
||||
@@ -56,16 +55,4 @@
|
||||
|
||||
{% endif %}
|
||||
|
||||
{# Open Postgres (5432) to minion hostgroups when Telegraf is configured to write to Postgres #}
|
||||
{% set TG_OUT = TELEGRAFMERGED.output | upper %}
|
||||
{% if TG_OUT in ['POSTGRES', 'BOTH'] %}
|
||||
{% if role.startswith('manager') or role == 'standalone' or role == 'eval' %}
|
||||
{% for r in ['sensor', 'searchnode', 'heavynode', 'receiver', 'fleet', 'idh', 'desktop', 'import'] %}
|
||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('postgres') %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
|
||||
@@ -47,7 +47,6 @@ firewall:
|
||||
self: *ROhostgroupsettingsadv
|
||||
sensor: *hostgroupsettings
|
||||
standalone: *hostgroupsettings
|
||||
strelka_frontend: *hostgroupsettings
|
||||
syslog: *hostgroupsettings
|
||||
desktop: *hostgroupsettings
|
||||
customhostgroup0: &customhostgroupsettings
|
||||
@@ -156,9 +155,6 @@ firewall:
|
||||
ssh:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
strelka_frontend:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
syslog:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
@@ -226,8 +222,6 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
external_suricata:
|
||||
portgroups: *portgroupsdocker
|
||||
strelka_frontend:
|
||||
portgroups: *portgroupsdocker
|
||||
syslog:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
@@ -571,8 +565,6 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
external_suricata:
|
||||
portgroups: *portgroupsdocker
|
||||
strelka_frontend:
|
||||
portgroups: *portgroupsdocker
|
||||
syslog:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
@@ -711,8 +703,6 @@ firewall:
|
||||
hostgroups:
|
||||
self:
|
||||
portgroups: *portgroupsdocker
|
||||
strelka_frontend:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
@@ -774,8 +764,6 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
self:
|
||||
portgroups: *portgroupsdocker
|
||||
strelka_frontend:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
|
||||
@@ -11,14 +11,18 @@ global:
|
||||
regexFailureMessage: You must enter a valid IP address or CIDR.
|
||||
mdengine:
|
||||
description: Which engine to use for meta data generation. Options are ZEEK and SURICATA.
|
||||
regex: ^(ZEEK|SURICATA)$
|
||||
options:
|
||||
- ZEEK
|
||||
- SURICATA
|
||||
regexFailureMessage: You must enter either ZEEK or SURICATA.
|
||||
global: True
|
||||
pcapengine:
|
||||
description: Which engine to use for generating pcap. Currently only SURICATA is supported.
|
||||
regex: ^(SURICATA)$
|
||||
options:
|
||||
- SURICATA
|
||||
regexFailureMessage: You must enter either SURICATA.
|
||||
global: True
|
||||
ids:
|
||||
description: Which IDS engine to use. Currently only Suricata is supported.
|
||||
@@ -38,9 +42,11 @@ global:
|
||||
advanced: True
|
||||
pipeline:
|
||||
description: Sets which pipeline technology for events to use. The use of Kafka requires a Security Onion Pro license.
|
||||
regex: ^(REDIS|KAFKA)$
|
||||
options:
|
||||
- REDIS
|
||||
- KAFKA
|
||||
regexFailureMessage: You must enter either REDIS or KAFKA.
|
||||
global: True
|
||||
advanced: True
|
||||
repo_host:
|
||||
@@ -59,5 +65,4 @@ global:
|
||||
description: Allows use of Endgame with Security Onion. This feature requires a license from Endgame.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb
|
||||
|
||||
|
||||
@@ -85,10 +85,7 @@ influxdb:
|
||||
description: The log level to use for outputting log statements. Allowed values are debug, info, or error.
|
||||
global: True
|
||||
advanced: false
|
||||
options:
|
||||
- info
|
||||
- debug
|
||||
- error
|
||||
regex: ^(info|debug|error)$
|
||||
helpLink: influxdb
|
||||
metrics-disabled:
|
||||
description: If true, the HTTP endpoint that exposes internal InfluxDB metrics will be inaccessible.
|
||||
@@ -143,9 +140,7 @@ influxdb:
|
||||
description: Determines the type of storage used for secrets. Allowed values are bolt or vault.
|
||||
global: True
|
||||
advanced: True
|
||||
options:
|
||||
- bolt
|
||||
- vault
|
||||
regex: ^(bolt|vault)$
|
||||
helpLink: influxdb
|
||||
session-length:
|
||||
description: Number of minutes that a user login session can remain authenticated.
|
||||
@@ -265,9 +260,7 @@ influxdb:
|
||||
description: The type of data store to use for HTTP resources. Allowed values are disk or memory. Memory should not be used for production Security Onion installations.
|
||||
global: True
|
||||
advanced: True
|
||||
options:
|
||||
- disk
|
||||
- memory
|
||||
regex: ^(disk|memory)$
|
||||
helpLink: influxdb
|
||||
tls-cert:
|
||||
description: The container path to the certificate to use for TLS encryption of the HTTP requests and responses.
|
||||
|
||||
@@ -131,10 +131,7 @@ kafka:
|
||||
ssl_x_keystore_x_type:
|
||||
description: The key store file format.
|
||||
title: ssl.keystore.type
|
||||
options:
|
||||
- JKS
|
||||
- PKCS12
|
||||
- PEM
|
||||
regex: ^(JKS|PKCS12|PEM)$
|
||||
helpLink: kafka
|
||||
ssl_x_truststore_x_location:
|
||||
description: The trust store file location within the Docker container.
|
||||
@@ -163,11 +160,7 @@ kafka:
|
||||
security_x_protocol:
|
||||
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
|
||||
title: security.protocol
|
||||
options:
|
||||
- SASL_SSL
|
||||
- PLAINTEXT
|
||||
- SSL
|
||||
- SASL_PLAINTEXT
|
||||
regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
|
||||
helpLink: kafka
|
||||
ssl_x_keystore_x_location:
|
||||
description: The key store file location within the Docker container.
|
||||
@@ -181,10 +174,7 @@ kafka:
|
||||
ssl_x_keystore_x_type:
|
||||
description: The key store file format.
|
||||
title: ssl.keystore.type
|
||||
options:
|
||||
- JKS
|
||||
- PKCS12
|
||||
- PEM
|
||||
regex: ^(JKS|PKCS12|PEM)$
|
||||
helpLink: kafka
|
||||
ssl_x_truststore_x_location:
|
||||
description: The trust store file location within the Docker container.
|
||||
|
||||
@@ -9,5 +9,5 @@ SESSIONCOOKIE=$(curl -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http:
|
||||
# Disable certain Features from showing up in the Kibana UI
|
||||
echo
|
||||
echo "Setting up default Kibana Space:"
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","searchQueryRules","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","securitySolutionRulesV1","entityManager","streams","cloudConnect","slo"]} ' >> /opt/so/log/kibana/misc.log
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","entityManager"]} ' >> /opt/so/log/kibana/misc.log
|
||||
echo
|
||||
|
||||
@@ -21,12 +21,8 @@ kratos:
|
||||
description: "Specify the provider type. Required. Valid values are: auth0, generic, github, google, microsoft"
|
||||
global: True
|
||||
forcedType: string
|
||||
options:
|
||||
- auth0
|
||||
- generic
|
||||
- github
|
||||
- google
|
||||
- microsoft
|
||||
regex: "auth0|generic|github|google|microsoft"
|
||||
regexFailureMessage: "Valid values are: auth0, generic, github, google, microsoft"
|
||||
helpLink: oidc
|
||||
client_id:
|
||||
description: Specify the client ID, also referenced as the application ID. Required.
|
||||
@@ -47,9 +43,8 @@ kratos:
|
||||
description: The source of the subject identifier. Typically 'userinfo'. Only used when provider is 'microsoft'.
|
||||
global: True
|
||||
forcedType: string
|
||||
options:
|
||||
- me
|
||||
- userinfo
|
||||
regex: me|userinfo
|
||||
regexFailureMessage: "Valid values are: me, userinfo"
|
||||
helpLink: oidc
|
||||
auth_url:
|
||||
description: Provider's auth URL. Required when provider is 'generic'.
|
||||
|
||||
@@ -231,16 +231,6 @@ logrotate:
|
||||
- dateext
|
||||
- dateyesterday
|
||||
- su root socore
|
||||
/nsm/strelka/log/strelka_x_log:
|
||||
- daily
|
||||
- rotate 14
|
||||
- missingok
|
||||
- copytruncate
|
||||
- compress
|
||||
- create
|
||||
- extension .log
|
||||
- dateext
|
||||
- dateyesterday
|
||||
/opt/so/log/sensor_clean_x_log:
|
||||
- daily
|
||||
- rotate 2
|
||||
|
||||
@@ -147,13 +147,6 @@ logrotate:
|
||||
multiline: True
|
||||
global: True
|
||||
forcedType: "[]string"
|
||||
"/nsm/strelka/log/strelka_x_log":
|
||||
description: List of logrotate options for this file.
|
||||
title: /nsm/strelka/log/strelka.log
|
||||
advanced: True
|
||||
multiline: True
|
||||
global: True
|
||||
forcedType: "[]string"
|
||||
"/opt/so/log/sensor_clean_x_log":
|
||||
description: List of logrotate options for this file.
|
||||
title: /opt/so/log/sensor_clean.log
|
||||
|
||||
@@ -89,7 +89,6 @@ so-logstash:
|
||||
- /nsm/zeek:/nsm/zeek:ro
|
||||
- /nsm/suricata:/suricata:ro
|
||||
- /opt/so/log/fleet/:/osquery/logs:ro
|
||||
- /opt/so/log/strelka:/strelka:ro
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-logstash'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-logstash'].custom_bind_mounts %}
|
||||
|
||||
@@ -23,7 +23,6 @@ VALID_ROLES = {
|
||||
'a': { 'role': 'analyst','desc': 'Analyst - 80/tcp, 443/tcp' },
|
||||
'b': { 'role': 'beats_endpoint', 'desc': 'Logstash Beat - 5044/tcp' },
|
||||
'e': { 'role': 'elasticsearch_rest', 'desc': 'Elasticsearch REST API - 9200/tcp' },
|
||||
'f': { 'role': 'strelka_frontend', 'desc': 'Strelka frontend - 57314/tcp' },
|
||||
's': { 'role': 'syslog', 'desc': 'Syslog device - 514/tcp/udp' },
|
||||
}
|
||||
|
||||
@@ -91,7 +90,6 @@ def main():
|
||||
group.add_argument('-a', dest='roles', action='append_const', const=VALID_ROLES['a']['role'], help="Analyst - 80/tcp, 443/tcp")
|
||||
group.add_argument('-b', dest='roles', action='append_const', const=VALID_ROLES['b']['role'], help="Logstash Beat - 5044/tcp")
|
||||
group.add_argument('-e', dest='roles', action='append_const', const=VALID_ROLES['e']['role'], help="Elasticsearch REST API - 9200/tcp")
|
||||
group.add_argument('-f', dest='roles', action='append_const', const=VALID_ROLES['f']['role'], help="Strelka frontend - 57314/tcp")
|
||||
group.add_argument('-s', dest='roles', action='append_const', const=VALID_ROLES['s']['role'], help="Syslog device - 514/tcp/udp")
|
||||
|
||||
ip_g = main_parser.add_argument_group(title='allow')
|
||||
|
||||
@@ -133,7 +133,7 @@ function getinstallinfo() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
while read -r var; do export "$var"; done <<< "$INSTALLVARS"
|
||||
export $(echo "$INSTALLVARS" | xargs)
|
||||
if [ $? -ne 0 ]; then
|
||||
log "ERROR" "Failed to source install variables"
|
||||
return 1
|
||||
@@ -511,28 +511,6 @@ function add_redis_to_minion() {
|
||||
fi
|
||||
}
|
||||
|
||||
function add_strelka_to_minion() {
|
||||
printf '%s\n'\
|
||||
"strelka:"\
|
||||
" backend:"\
|
||||
" enabled: True"\
|
||||
" filestream:"\
|
||||
" enabled: True"\
|
||||
" frontend:"\
|
||||
" enabled: True"\
|
||||
" manager:"\
|
||||
" enabled: True"\
|
||||
" coordinator:"\
|
||||
" enabled: True"\
|
||||
" gatekeeper:"\
|
||||
" enabled: True"\
|
||||
" " >> $PILLARFILE
|
||||
if [ $? -ne 0 ]; then
|
||||
log "ERROR" "Failed to add strelka configuration to $PILLARFILE"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function add_telegraf_to_minion() {
|
||||
printf '%s\n'\
|
||||
"telegraf:"\
|
||||
@@ -729,7 +707,6 @@ function createEVAL() {
|
||||
pcapspace || return 1
|
||||
add_elasticsearch_to_minion || return 1
|
||||
add_sensor_to_minion || return 1
|
||||
add_strelka_to_minion || return 1
|
||||
add_elastalert_to_minion || return 1
|
||||
add_kibana_to_minion || return 1
|
||||
add_telegraf_to_minion || return 1
|
||||
@@ -748,7 +725,6 @@ function createSTANDALONE() {
|
||||
add_elasticsearch_to_minion || return 1
|
||||
add_logstash_to_minion || return 1
|
||||
add_sensor_to_minion || return 1
|
||||
add_strelka_to_minion || return 1
|
||||
add_elastalert_to_minion || return 1
|
||||
add_kibana_to_minion || return 1
|
||||
add_redis_to_minion || return 1
|
||||
@@ -833,7 +809,6 @@ function createHEAVYNODE() {
|
||||
add_elasticsearch_to_minion || return 1
|
||||
add_elastic_agent_to_minion || return 1
|
||||
add_sensor_to_minion || return 1
|
||||
add_strelka_to_minion || return 1
|
||||
add_telegraf_to_minion || return 1
|
||||
}
|
||||
|
||||
@@ -844,7 +819,6 @@ function createSENSOR() {
|
||||
PCAP_PERCENTAGE=3
|
||||
pcapspace || return 1
|
||||
add_sensor_to_minion || return 1
|
||||
add_strelka_to_minion || return 1
|
||||
add_telegraf_to_minion || return 1
|
||||
}
|
||||
|
||||
|
||||
@@ -363,7 +363,6 @@ preupgrade_changes() {
|
||||
echo "Checking to see if changes are needed."
|
||||
|
||||
[[ "$INSTALLEDVERSION" =~ ^2\.4\.21[0-9]+$ ]] && up_to_3.0.0
|
||||
[[ "$INSTALLEDVERSION" == "3.0.0" ]] && up_to_3.1.0
|
||||
true
|
||||
}
|
||||
|
||||
@@ -372,7 +371,6 @@ postupgrade_changes() {
|
||||
echo "Running post upgrade processes."
|
||||
|
||||
[[ "$POSTVERSION" =~ ^2\.4\.21[0-9]+$ ]] && post_to_3.0.0
|
||||
[[ "$POSTVERSION" == "3.0.0" ]] && post_to_3.1.0
|
||||
true
|
||||
}
|
||||
|
||||
@@ -447,6 +445,7 @@ migrate_pcap_to_suricata() {
|
||||
}
|
||||
|
||||
up_to_3.0.0() {
|
||||
determine_elastic_agent_upgrade
|
||||
migrate_pcap_to_suricata
|
||||
|
||||
INSTALLEDVERSION=3.0.0
|
||||
@@ -470,58 +469,6 @@ post_to_3.0.0() {
|
||||
|
||||
### 3.0.0 End ###
|
||||
|
||||
### 3.1.0 Scripts ###
|
||||
|
||||
elasticsearch_backup_index_templates() {
|
||||
echo "Backing up current elasticsearch index templates in /opt/so/conf/elasticsearch/templates/index/ to /nsm/backup/3.0.0_elasticsearch_index_templates.tar.gz"
|
||||
tar -czf /nsm/backup/3.0.0_elasticsearch_index_templates.tar.gz -C /opt/so/conf/elasticsearch/templates/index/ .
|
||||
}
|
||||
|
||||
up_to_3.1.0() {
|
||||
determine_elastic_agent_upgrade
|
||||
elasticsearch_backup_index_templates
|
||||
# Clear existing component template state file.
|
||||
rm -f /opt/so/state/esfleet_component_templates.json
|
||||
|
||||
|
||||
INSTALLEDVERSION=3.1.0
|
||||
}
|
||||
|
||||
post_to_3.1.0() {
|
||||
/usr/sbin/so-kibana-space-defaults
|
||||
|
||||
# One-time backfill for minions that existed before the postgres Telegraf
|
||||
# feature shipped. Generate the aggregate pillar on the manager and create
|
||||
# the per-minion DB roles, then fan each minion's cred into its own pillar
|
||||
# file. Going forward the reactor handles each new salt-key accept with a
|
||||
# targeted fan-out, so a manager highstate no longer needs to iterate.
|
||||
echo "Provisioning Telegraf Postgres users for existing minions."
|
||||
salt-call --local state.apply postgres.auth,postgres.telegraf_users queue=True || true
|
||||
|
||||
AGGREGATE_PILLAR=/opt/so/saltstack/local/pillar/postgres/auth.sls
|
||||
MINIONS_DIR=/opt/so/saltstack/local/pillar/minions
|
||||
if [[ -f "$AGGREGATE_PILLAR" && -d "$MINIONS_DIR" ]]; then
|
||||
for pillar_file in "$MINIONS_DIR"/*.sls; do
|
||||
[[ -f "$pillar_file" ]] || continue
|
||||
mid=$(basename "$pillar_file" .sls)
|
||||
[[ "$mid" == adv_* ]] && continue
|
||||
safe=$(echo "$mid" | tr '.-' '__' | tr '[:upper:]' '[:lower:]')
|
||||
existing_user=$(so-yaml.py get -r "$pillar_file" postgres.telegraf.user 2>/dev/null || true)
|
||||
[[ "$existing_user" == "so_telegraf_${safe}" ]] && continue
|
||||
user=$(so-yaml.py get -r "$AGGREGATE_PILLAR" "postgres.auth.users.telegraf_${safe}.user" 2>/dev/null || true)
|
||||
pass=$(so-yaml.py get -r "$AGGREGATE_PILLAR" "postgres.auth.users.telegraf_${safe}.pass" 2>/dev/null || true)
|
||||
[[ -z "$user" || -z "$pass" ]] && continue
|
||||
so-yaml.py replace "$pillar_file" postgres.telegraf.user "$user" >/dev/null
|
||||
so-yaml.py replace "$pillar_file" postgres.telegraf.pass "$pass" >/dev/null
|
||||
done
|
||||
fi
|
||||
|
||||
POSTVERSION=3.1.0
|
||||
}
|
||||
|
||||
### 3.1.0 End ###
|
||||
|
||||
|
||||
repo_sync() {
|
||||
echo "Sync the local repo."
|
||||
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
||||
@@ -781,12 +728,12 @@ verify_es_version_compatibility() {
|
||||
local is_active_intermediate_upgrade=1
|
||||
# supported upgrade paths for SO-ES versions
|
||||
declare -A es_upgrade_map=(
|
||||
["9.0.8"]="9.3.3"
|
||||
["8.18.8"]="9.0.8"
|
||||
)
|
||||
|
||||
# Elasticsearch MUST upgrade through these versions
|
||||
declare -A es_to_so_version=(
|
||||
["9.0.8"]="3.0.0-20260331"
|
||||
["8.18.8"]="2.4.190-20251024"
|
||||
)
|
||||
|
||||
# Get current Elasticsearch version
|
||||
@@ -798,17 +745,26 @@ verify_es_version_compatibility() {
|
||||
exit 160
|
||||
fi
|
||||
|
||||
if ! target_es_version=$(so-yaml.py get -r $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then
|
||||
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
|
||||
if ! target_es_version_raw=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then
|
||||
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
|
||||
|
||||
exit 160
|
||||
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
|
||||
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
|
||||
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
|
||||
|
||||
exit 160
|
||||
fi
|
||||
|
||||
# allow upgrade to version < 2.4.110 without checking ES version compatibility
|
||||
return 0
|
||||
else
|
||||
target_es_version=$(sed -n '1p' <<< "$target_es_version_raw")
|
||||
fi
|
||||
|
||||
for statefile in "${es_required_version_statefile_base}"-*; do
|
||||
[[ -f $statefile ]] || continue
|
||||
|
||||
local es_required_version_statefile_value
|
||||
es_required_version_statefile_value=$(cat "$statefile")
|
||||
local es_required_version_statefile_value=$(cat "$statefile")
|
||||
|
||||
if [[ "$es_required_version_statefile_value" == "$target_es_version" ]]; then
|
||||
echo "Intermediate upgrade to ES $target_es_version is in progress. Skipping Elasticsearch version compatibility check."
|
||||
@@ -817,7 +773,7 @@ verify_es_version_compatibility() {
|
||||
fi
|
||||
|
||||
# use sort to check if es_required_statefile_value is < the current es_version.
|
||||
if [[ "$(printf '%s\n' "$es_required_version_statefile_value" "$es_version" | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then
|
||||
if [[ "$(printf '%s\n' $es_required_version_statefile_value $es_version | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then
|
||||
rm -f "$statefile"
|
||||
continue
|
||||
fi
|
||||
@@ -828,7 +784,8 @@ verify_es_version_compatibility() {
|
||||
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss! This command can take up to an hour to complete."
|
||||
if ! timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"; then
|
||||
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||
|
||||
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
|
||||
@@ -845,7 +802,6 @@ verify_es_version_compatibility() {
|
||||
return 0
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2076 # Do not want a regex here eg usage " 8.18.8 9.0.8 " =~ " 9.0.8 "
|
||||
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
|
||||
# supported upgrade
|
||||
return 0
|
||||
@@ -854,7 +810,7 @@ verify_es_version_compatibility() {
|
||||
if [[ -z "$compatible_versions" ]]; then
|
||||
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
|
||||
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
|
||||
first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
|
||||
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
|
||||
next_step_so_version=${es_to_so_version[$first_es_required_version]}
|
||||
required_es_upgrade_version="$first_es_required_version"
|
||||
else
|
||||
@@ -873,7 +829,7 @@ verify_es_version_compatibility() {
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
run_airgap_intermediate_upgrade
|
||||
else
|
||||
if [[ -n $ISOLOC ]]; then
|
||||
if [[ ! -z $ISOLOC ]]; then
|
||||
originally_requested_iso_location="$ISOLOC"
|
||||
fi
|
||||
# Make sure ISOLOC is not set. Network installs that used soup -f would have ISOLOC set.
|
||||
@@ -905,8 +861,7 @@ wait_for_salt_minion_with_restart() {
|
||||
}
|
||||
|
||||
run_airgap_intermediate_upgrade() {
|
||||
local originally_requested_so_version
|
||||
originally_requested_so_version=$(cat "$UPDATE_DIR/VERSION")
|
||||
local originally_requested_so_version=$(cat $UPDATE_DIR/VERSION)
|
||||
# preserve ISOLOC value, so we can try to use it post intermediate upgrade
|
||||
local originally_requested_iso_location="$ISOLOC"
|
||||
|
||||
@@ -918,8 +873,7 @@ run_airgap_intermediate_upgrade() {
|
||||
|
||||
while [[ -z "$next_iso_location" ]] || [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; do
|
||||
# List removable devices if any are present
|
||||
local removable_devices
|
||||
removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
|
||||
local removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
|
||||
if [[ -n "$removable_devices" ]]; then
|
||||
echo "PATH SIZE TYPE MOUNTPOINTS RM"
|
||||
echo "$removable_devices"
|
||||
@@ -940,21 +894,21 @@ run_airgap_intermediate_upgrade() {
|
||||
|
||||
echo "Using $next_iso_location for required intermediary upgrade."
|
||||
exec bash <<EOF
|
||||
ISOLOC="$next_iso_location" soup -y && \
|
||||
ISOLOC="$next_iso_location" soup -y && \
|
||||
ISOLOC=$next_iso_location soup -y && \
|
||||
ISOLOC=$next_iso_location soup -y && \
|
||||
|
||||
echo -e "\n##############################################################################################################################\n" && \
|
||||
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
|
||||
|
||||
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh "$required_es_upgrade_version" "$es_required_version_statefile" && \
|
||||
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
|
||||
|
||||
echo -e "\n##############################################################################################################################\n" && \
|
||||
|
||||
# automatically start the next soup if the original ISO isn't using the same block device we just used
|
||||
if [[ -n "$originally_requested_iso_location" ]] && [[ "$originally_requested_iso_location" != "$next_iso_location" ]]; then
|
||||
umount /tmp/soagupdate
|
||||
ISOLOC="$originally_requested_iso_location" soup -y && \
|
||||
ISOLOC="$originally_requested_iso_location" soup -y
|
||||
ISOLOC=$originally_requested_iso_location soup -y && \
|
||||
ISOLOC=$originally_requested_iso_location soup -y
|
||||
else
|
||||
echo "Could not automatically start next soup to $originally_requested_so_version. Soup will now exit here at $(cat /etc/soversion)" && \
|
||||
|
||||
@@ -970,29 +924,29 @@ run_network_intermediate_upgrade() {
|
||||
if [[ -n "$BRANCH" ]]; then
|
||||
local originally_requested_so_branch="$BRANCH"
|
||||
else
|
||||
local originally_requested_so_branch="3/main"
|
||||
local originally_requested_so_branch="2.4/main"
|
||||
fi
|
||||
|
||||
echo "Starting automated intermediate upgrade to $next_step_so_version."
|
||||
echo "After completion, the system will automatically attempt to upgrade to the latest version."
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
exec bash << EOF
|
||||
BRANCH="$next_step_so_version" soup -y && \
|
||||
BRANCH="$next_step_so_version" soup -y && \
|
||||
BRANCH=$next_step_so_version soup -y && \
|
||||
BRANCH=$next_step_so_version soup -y && \
|
||||
|
||||
echo -e "\n##############################################################################################################################\n" && \
|
||||
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
|
||||
|
||||
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh "$required_es_upgrade_version" "$es_required_version_statefile" && \
|
||||
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
|
||||
|
||||
echo -e "\n##############################################################################################################################\n" && \
|
||||
if [[ -n "$originally_requested_iso_location" ]]; then
|
||||
# nonairgap soup that used -f originally, runs intermediate upgrade using network + BRANCH, later coming back to the original ISO for the last soup
|
||||
ISOLOC="$originally_requested_iso_location" soup -y && \
|
||||
ISOLOC="$originally_requested_iso_location" soup -y
|
||||
ISOLOC=$originally_requested_iso_location soup -y && \
|
||||
ISOLOC=$originally_requested_iso_location soup -y
|
||||
else
|
||||
BRANCH="$originally_requested_so_branch" soup -y && \
|
||||
BRANCH="$originally_requested_so_branch" soup -y
|
||||
BRANCH=$originally_requested_so_branch soup -y && \
|
||||
BRANCH=$originally_requested_so_branch soup -y
|
||||
fi
|
||||
echo -e "\n##############################################################################################################################\n"
|
||||
EOF
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# Fired by salt/reactor/telegraf_user_sync.sls when salt-key accepts a new
|
||||
# minion. Only provisions the per-minion pillar entry and DB role on the
|
||||
# manager; the minion itself will pick up its telegraf config on its first
|
||||
# highstate during onboarding, so there's no need to push the telegraf state
|
||||
# from here.
|
||||
#
|
||||
# Target the manager via role grains — same pattern as orch/delete_hypervisor.sls.
|
||||
# The reactor doesn't know the manager's minion id, and grains.master on the
|
||||
# runner is a hostname, not a targetable id.
|
||||
{% set FANOUT_MINION = salt['pillar.get']('postgres_fanout_minion', '') %}
|
||||
|
||||
manager_sync_telegraf_pg_users:
|
||||
salt.state:
|
||||
- tgt: 'G@role:so-manager or G@role:so-managerhype or G@role:so-managersearch or G@role:so-standalone or G@role:so-eval'
|
||||
- tgt_type: compound
|
||||
- sls:
|
||||
- postgres.auth
|
||||
- postgres.telegraf_users
|
||||
- queue: True
|
||||
{% if FANOUT_MINION %}
|
||||
- pillar:
|
||||
postgres_fanout_minion: {{ FANOUT_MINION }}
|
||||
{% endif %}
|
||||
@@ -1,90 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
|
||||
{% set DIGITS = "1234567890" %}
|
||||
{% set LOWERCASE = "qwertyuiopasdfghjklzxcvbnm" %}
|
||||
{% set UPPERCASE = "QWERTYUIOPASDFGHJKLZXCVBNM" %}
|
||||
{% set SYMBOLS = "~!@#^&*()-_=+[]|;:,.<>?" %}
|
||||
{% set CHARS = DIGITS~LOWERCASE~UPPERCASE~SYMBOLS %}
|
||||
{% set so_postgres_user_pass = salt['pillar.get']('postgres:auth:users:so_postgres_user:pass', salt['random.get_str'](72, chars=CHARS)) %}
|
||||
|
||||
{# Per-minion Telegraf Postgres credentials. Merge currently-up minions with any #}
|
||||
{# previously-known entries in pillar so existing passwords persist across runs. #}
|
||||
{% set existing = salt['pillar.get']('postgres:auth:users', {}) %}
|
||||
{% set up_minions = salt['saltutil.runner']('manage.up') or [] %}
|
||||
{% set telegraf_users = {} %}
|
||||
{% for key, entry in existing.items() %}
|
||||
{%- if key.startswith('telegraf_') and entry.get('user') and entry.get('pass') %}
|
||||
{%- do telegraf_users.update({key: entry}) %}
|
||||
{%- endif %}
|
||||
{% endfor %}
|
||||
{% for mid in up_minions %}
|
||||
{%- set safe = mid | replace('.','_') | replace('-','_') | lower %}
|
||||
{%- set key = 'telegraf_' ~ safe %}
|
||||
{%- if key not in telegraf_users %}
|
||||
{%- do telegraf_users.update({key: {'user': 'so_telegraf_' ~ safe, 'pass': salt['random.get_str'](72, chars=CHARS)}}) %}
|
||||
{%- endif %}
|
||||
{% endfor %}
|
||||
|
||||
postgres_auth_pillar:
|
||||
file.managed:
|
||||
- name: /opt/so/saltstack/local/pillar/postgres/auth.sls
|
||||
- mode: 640
|
||||
- reload_pillar: True
|
||||
- contents: |
|
||||
postgres:
|
||||
auth:
|
||||
users:
|
||||
so_postgres_user:
|
||||
user: so_postgres
|
||||
pass: "{{ so_postgres_user_pass }}"
|
||||
{% for key, entry in telegraf_users.items() %}
|
||||
{{ key }}:
|
||||
user: {{ entry.user }}
|
||||
pass: "{{ entry.pass }}"
|
||||
{% endfor %}
|
||||
- show_changes: False
|
||||
|
||||
{# Fan a specific minion's telegraf cred out to its own pillar file. Only
|
||||
runs when postgres_fanout_minion pillar is provided — otherwise this state
|
||||
is a no-op. That keeps manager highstates from doing N so-yaml.py forks
|
||||
when nothing changed. The reactor passes postgres_fanout_minion through
|
||||
the orch on salt-key accept; soup handles bulk backfill separately. #}
|
||||
{% set fanout_mid = salt['pillar.get']('postgres_fanout_minion') %}
|
||||
{% if fanout_mid %}
|
||||
{%- set safe = fanout_mid | replace('.','_') | replace('-','_') | lower %}
|
||||
{%- set key = 'telegraf_' ~ safe %}
|
||||
{%- set entry = telegraf_users.get(key) %}
|
||||
{%- if entry %}
|
||||
|
||||
postgres_telegraf_minion_pillar_{{ safe }}:
|
||||
cmd.run:
|
||||
- name: |
|
||||
set -e
|
||||
PILLAR_FILE=/opt/so/saltstack/local/pillar/minions/{{ fanout_mid }}.sls
|
||||
if [ ! -f "$PILLAR_FILE" ]; then
|
||||
echo '{}' > "$PILLAR_FILE"
|
||||
chown socore:socore "$PILLAR_FILE" 2>/dev/null || true
|
||||
chmod 640 "$PILLAR_FILE"
|
||||
fi
|
||||
/usr/sbin/so-yaml.py replace "$PILLAR_FILE" postgres.telegraf.user '{{ entry.user }}'
|
||||
/usr/sbin/so-yaml.py replace "$PILLAR_FILE" postgres.telegraf.pass '{{ entry.pass }}'
|
||||
- unless: |
|
||||
[ "$(/usr/sbin/so-yaml.py get -r /opt/so/saltstack/local/pillar/minions/{{ fanout_mid }}.sls postgres.telegraf.user 2>/dev/null)" = '{{ entry.user }}' ]
|
||||
- require:
|
||||
- file: postgres_auth_pillar
|
||||
|
||||
{%- endif %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,111 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'postgres/map.jinja' import PGMERGED %}
|
||||
|
||||
# Postgres Setup
|
||||
postgresconfdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/postgres
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
postgressecretsdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/postgres/secrets
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 700
|
||||
- require:
|
||||
- file: postgresconfdir
|
||||
|
||||
postgresdatadir:
|
||||
file.directory:
|
||||
- name: /nsm/postgres
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
postgreslogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/postgres
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
postgresinitdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/postgres/init
|
||||
- user: 939
|
||||
- group: 939
|
||||
- require:
|
||||
- file: postgresconfdir
|
||||
|
||||
postgresinitusers:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/postgres/init/init-users.sh
|
||||
- source: salt://postgres/files/init-users.sh
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 755
|
||||
|
||||
postgresconf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/postgres/postgresql.conf
|
||||
- source: salt://postgres/files/postgresql.conf.jinja
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
- defaults:
|
||||
PGMERGED: {{ PGMERGED }}
|
||||
|
||||
postgreshba:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/postgres/pg_hba.conf
|
||||
- source: salt://postgres/files/pg_hba.conf
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 640
|
||||
|
||||
postgres_super_secret:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/postgres/secrets/postgres_password
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 600
|
||||
- contents_pillar: 'secrets:postgres_pass'
|
||||
- show_changes: False
|
||||
- require:
|
||||
- file: postgressecretsdir
|
||||
|
||||
postgres_app_secret:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/postgres/secrets/so_postgres_pass
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 600
|
||||
- contents_pillar: 'postgres:auth:users:so_postgres_user:pass'
|
||||
- show_changes: False
|
||||
- require:
|
||||
- file: postgressecretsdir
|
||||
|
||||
postgres_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://postgres/tools/sbin
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 755
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,19 +0,0 @@
|
||||
postgres:
|
||||
enabled: True
|
||||
telegraf:
|
||||
retention_days: 14
|
||||
config:
|
||||
listen_addresses: '*'
|
||||
port: 5432
|
||||
max_connections: 100
|
||||
shared_buffers: 256MB
|
||||
ssl: 'on'
|
||||
ssl_cert_file: '/conf/postgres.crt'
|
||||
ssl_key_file: '/conf/postgres.key'
|
||||
ssl_ca_file: '/conf/ca.crt'
|
||||
hba_file: '/conf/pg_hba.conf'
|
||||
log_destination: 'stderr'
|
||||
logging_collector: 'off'
|
||||
log_min_messages: 'warning'
|
||||
shared_preload_libraries: pg_cron
|
||||
cron.database_name: so_telegraf
|
||||
@@ -1,33 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- postgres.sostatus
|
||||
|
||||
so-postgres:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-postgres_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-postgres$
|
||||
|
||||
so_postgres_backup:
|
||||
cron.absent:
|
||||
- name: /usr/sbin/so-postgres-backup > /dev/null 2>&1
|
||||
- identifier: so_postgres_backup
|
||||
- user: root
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,109 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% set SO_POSTGRES_USER = salt['pillar.get']('postgres:auth:users:so_postgres_user:user', 'so_postgres') %}
|
||||
|
||||
include:
|
||||
- postgres.auth
|
||||
- postgres.ssl
|
||||
- postgres.config
|
||||
- postgres.sostatus
|
||||
- postgres.telegraf_users
|
||||
|
||||
so-postgres:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-postgres:{{ GLOBALS.so_version }}
|
||||
- hostname: so-postgres
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-postgres'].ip }}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKERMERGED.containers['so-postgres'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- environment:
|
||||
- POSTGRES_DB=securityonion
|
||||
# Passwords are delivered via mounted 0600 secret files, not plaintext env vars.
|
||||
# The upstream postgres image resolves POSTGRES_PASSWORD_FILE; entrypoint.sh and
|
||||
# init-users.sh resolve SO_POSTGRES_PASS_FILE the same way.
|
||||
- POSTGRES_PASSWORD_FILE=/run/secrets/postgres_password
|
||||
- SO_POSTGRES_USER={{ SO_POSTGRES_USER }}
|
||||
- SO_POSTGRES_PASS_FILE=/run/secrets/so_postgres_pass
|
||||
{% if DOCKERMERGED.containers['so-postgres'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-postgres'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- binds:
|
||||
- /opt/so/log/postgres/:/log:rw
|
||||
- /nsm/postgres:/var/lib/postgresql/data:rw
|
||||
- /opt/so/conf/postgres/postgresql.conf:/conf/postgresql.conf:ro
|
||||
- /opt/so/conf/postgres/pg_hba.conf:/conf/pg_hba.conf:ro
|
||||
- /opt/so/conf/postgres/secrets:/run/secrets:ro
|
||||
- /opt/so/conf/postgres/init/init-users.sh:/docker-entrypoint-initdb.d/init-users.sh:ro
|
||||
- /etc/pki/postgres.crt:/conf/postgres.crt:ro
|
||||
- /etc/pki/postgres.key:/conf/postgres.key:ro
|
||||
- /etc/pki/tls/certs/intca.crt:/conf/ca.crt:ro
|
||||
{% if DOCKERMERGED.containers['so-postgres'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-postgres'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-postgres'].extra_hosts %}
|
||||
- extra_hosts:
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-postgres'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-postgres'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-postgres'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: postgresconf
|
||||
- file: postgreshba
|
||||
- file: postgresinitusers
|
||||
- file: postgres_super_secret
|
||||
- file: postgres_app_secret
|
||||
- x509: postgres_crt
|
||||
- x509: postgres_key
|
||||
- require:
|
||||
- file: postgresconf
|
||||
- file: postgreshba
|
||||
- file: postgresinitusers
|
||||
- file: postgres_super_secret
|
||||
- file: postgres_app_secret
|
||||
- x509: postgres_crt
|
||||
- x509: postgres_key
|
||||
|
||||
delete_so-postgres_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-postgres$
|
||||
|
||||
so_postgres_backup:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-postgres-backup > /dev/null 2>&1
|
||||
- identifier: so_postgres_backup
|
||||
- user: root
|
||||
- minute: '5'
|
||||
- hour: '0'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Create or update application user for SOC platform access
|
||||
# This script runs on first database initialization via docker-entrypoint-initdb.d
|
||||
# The password is properly escaped to handle special characters
|
||||
if [ -z "${SO_POSTGRES_PASS:-}" ] && [ -n "${SO_POSTGRES_PASS_FILE:-}" ] && [ -r "$SO_POSTGRES_PASS_FILE" ]; then
|
||||
SO_POSTGRES_PASS="$(< "$SO_POSTGRES_PASS_FILE")"
|
||||
fi
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||
DO \$\$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '${SO_POSTGRES_USER}') THEN
|
||||
EXECUTE format('CREATE ROLE %I WITH LOGIN PASSWORD %L', '${SO_POSTGRES_USER}', '${SO_POSTGRES_PASS}');
|
||||
ELSE
|
||||
EXECUTE format('ALTER ROLE %I WITH PASSWORD %L', '${SO_POSTGRES_USER}', '${SO_POSTGRES_PASS}');
|
||||
END IF;
|
||||
END
|
||||
\$\$;
|
||||
GRANT ALL PRIVILEGES ON DATABASE "$POSTGRES_DB" TO "$SO_POSTGRES_USER";
|
||||
-- Lock the SOC database down at the connect layer; PUBLIC gets CONNECT
|
||||
-- by default, which would let per-minion telegraf roles open sessions
|
||||
-- here. They have no schema/table grants inside so reads fail, but
|
||||
-- revoking CONNECT closes the soft edge entirely.
|
||||
REVOKE CONNECT ON DATABASE "$POSTGRES_DB" FROM PUBLIC;
|
||||
GRANT CONNECT ON DATABASE "$POSTGRES_DB" TO "$SO_POSTGRES_USER";
|
||||
EOSQL
|
||||
|
||||
# Bootstrap the Telegraf metrics database. Per-minion roles + schemas are
|
||||
# reconciled on every state.apply by postgres/telegraf_users.sls; this block
|
||||
# only ensures the shared database exists on first initialization.
|
||||
if ! psql -U "$POSTGRES_USER" -tAc "SELECT 1 FROM pg_database WHERE datname='so_telegraf'" | grep -q 1; then
|
||||
psql -v ON_ERROR_STOP=1 -U "$POSTGRES_USER" -c "CREATE DATABASE so_telegraf"
|
||||
fi
|
||||
@@ -1,16 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Managed by Salt — do not edit by hand.
|
||||
# Client authentication config: only local (Unix socket) connections and TLS-wrapped TCP
|
||||
# connections are accepted. Plain-text `host ...` lines are intentionally omitted so a
|
||||
# misconfigured client with sslmode=disable cannot negotiate a cleartext session.
|
||||
|
||||
# Local connections (Unix socket, container-internal) use peer/trust.
|
||||
local all all trust
|
||||
|
||||
# TCP connections MUST use TLS (hostssl) and authenticate with SCRAM.
|
||||
hostssl all all 0.0.0.0/0 scram-sha-256
|
||||
hostssl all all ::/0 scram-sha-256
|
||||
@@ -1,8 +0,0 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% for key, value in PGMERGED.config.items() %}
|
||||
{{ key }} = '{{ value | string | replace("'", "''") }}'
|
||||
{% endfor %}
|
||||
@@ -1,13 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'postgres/map.jinja' import PGMERGED %}
|
||||
|
||||
include:
|
||||
{% if PGMERGED.enabled %}
|
||||
- postgres.enabled
|
||||
{% else %}
|
||||
- postgres.disabled
|
||||
{% endif %}
|
||||
@@ -1,7 +0,0 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'postgres/defaults.yaml' as PGDEFAULTS %}
|
||||
{% set PGMERGED = salt['pillar.get']('postgres', PGDEFAULTS.postgres, merge=True) %}
|
||||
@@ -1,89 +0,0 @@
|
||||
postgres:
|
||||
enabled:
|
||||
description: Whether the PostgreSQL database container is enabled on this grid. Backs the assistant store and the Telegraf metrics database.
|
||||
forcedType: bool
|
||||
readonly: True
|
||||
helpLink: influxdb
|
||||
telegraf:
|
||||
retention_days:
|
||||
description: Number of days of Telegraf metrics to keep in the so_telegraf database. Older partitions are dropped hourly by pg_partman.
|
||||
forcedType: int
|
||||
helpLink: postgres
|
||||
config:
|
||||
max_connections:
|
||||
description: Maximum number of concurrent PostgreSQL connections.
|
||||
forcedType: int
|
||||
global: True
|
||||
helpLink: postgres
|
||||
shared_buffers:
|
||||
description: Amount of memory PostgreSQL uses for shared buffers (e.g. 256MB, 1GB). Raising this improves read cache hit rate at the cost of system RAM.
|
||||
global: True
|
||||
helpLink: postgres
|
||||
log_min_messages:
|
||||
description: Minimum severity of server messages written to the PostgreSQL log.
|
||||
options:
|
||||
- debug1
|
||||
- info
|
||||
- notice
|
||||
- warning
|
||||
- error
|
||||
- log
|
||||
- fatal
|
||||
global: True
|
||||
helpLink: postgres
|
||||
listen_addresses:
|
||||
description: Interfaces PostgreSQL listens on. Must remain '*' so clients on the docker bridge network can connect.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
port:
|
||||
description: TCP port PostgreSQL listens on inside the container. Firewall rules and container port mapping assume 5432.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
ssl:
|
||||
description: Whether PostgreSQL accepts TLS connections. Must remain 'on' — pg_hba.conf requires hostssl for TCP.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
ssl_cert_file:
|
||||
description: Path (inside the container) to the TLS server certificate. Salt-managed.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
ssl_key_file:
|
||||
description: Path (inside the container) to the TLS server private key. Salt-managed.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
ssl_ca_file:
|
||||
description: Path (inside the container) to the CA bundle PostgreSQL uses to verify client certificates. Salt-managed.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
hba_file:
|
||||
description: Path (inside the container) to the pg_hba.conf authentication file. Salt-managed — edit salt/postgres/files/pg_hba.conf.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
log_destination:
|
||||
description: Where PostgreSQL writes its server log. 'stderr' routes to the container log stream.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
logging_collector:
|
||||
description: Whether to run a separate logging collector process. Disabled because the docker log stream already captures stderr.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
shared_preload_libraries:
|
||||
description: Comma-separated list of extensions loaded at server start. Required for pg_cron which drives pg_partman maintenance — do not remove.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
cron.database_name:
|
||||
description: Database pg_cron schedules jobs in. Must be so_telegraf so partman maintenance runs in the right database context.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: postgres
|
||||
@@ -1,21 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-postgres_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-postgres
|
||||
- unless: grep -q so-postgres /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,55 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
|
||||
postgres_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/postgres.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/postgres.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/postgres.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
postgres_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/postgres.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: postgres
|
||||
- private_key: /etc/pki/postgres.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
postgresKeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/postgres.key
|
||||
- mode: 400
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,157 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'telegraf/map.jinja' import TELEGRAFMERGED %}
|
||||
|
||||
{# postgres_wait_ready below requires `docker_container: so-postgres`, which is
|
||||
declared in postgres.enabled. Include it here so state.apply postgres.telegraf_users
|
||||
on its own (from the reactor orch or from soup) still has that ID in scope. Salt
|
||||
de-duplicates the circular include. #}
|
||||
include:
|
||||
- postgres.enabled
|
||||
|
||||
{% set TG_OUT = TELEGRAFMERGED.output | upper %}
|
||||
{% if TG_OUT in ['POSTGRES', 'BOTH'] %}
|
||||
|
||||
# docker_container.running returns as soon as the container starts, but on
|
||||
# first-init docker-entrypoint.sh starts a temporary postgres with
|
||||
# `listen_addresses=''` to run /docker-entrypoint-initdb.d scripts, then
|
||||
# shuts it down before exec'ing the real CMD. A default pg_isready check
|
||||
# (Unix socket) passes during that ephemeral phase and races the shutdown
|
||||
# with "the database system is shutting down". Checking TCP readiness on
|
||||
# 127.0.0.1 only succeeds after the final postgres binds the port.
|
||||
postgres_wait_ready:
|
||||
cmd.run:
|
||||
- name: |
|
||||
for i in $(seq 1 60); do
|
||||
if docker exec so-postgres pg_isready -h 127.0.0.1 -U postgres -q 2>/dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "so-postgres did not accept TCP connections within 120s" >&2
|
||||
exit 1
|
||||
- require:
|
||||
- docker_container: so-postgres
|
||||
|
||||
# Ensure the shared Telegraf database exists. init-users.sh only runs on a
|
||||
# fresh data dir, so hosts upgraded onto an existing /nsm/postgres volume
|
||||
# would otherwise never get so_telegraf.
|
||||
postgres_create_telegraf_db:
|
||||
cmd.run:
|
||||
- name: |
|
||||
if ! docker exec so-postgres psql -U postgres -tAc "SELECT 1 FROM pg_database WHERE datname='so_telegraf'" | grep -q 1; then
|
||||
docker exec so-postgres psql -v ON_ERROR_STOP=1 -U postgres -c "CREATE DATABASE so_telegraf"
|
||||
fi
|
||||
- require:
|
||||
- cmd: postgres_wait_ready
|
||||
|
||||
# Provision the shared group role and schema once. Every per-minion role is a
|
||||
# member of so_telegraf, and each Telegraf connection does SET ROLE so_telegraf
|
||||
# (via options='-c role=so_telegraf' in the connection string) so tables created
|
||||
# on first write are owned by the group role and every member can INSERT/SELECT.
|
||||
postgres_telegraf_group_role:
|
||||
cmd.run:
|
||||
- name: |
|
||||
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d so_telegraf <<'EOSQL'
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'so_telegraf') THEN
|
||||
CREATE ROLE so_telegraf NOLOGIN;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
GRANT CONNECT ON DATABASE so_telegraf TO so_telegraf;
|
||||
CREATE SCHEMA IF NOT EXISTS telegraf AUTHORIZATION so_telegraf;
|
||||
GRANT USAGE, CREATE ON SCHEMA telegraf TO so_telegraf;
|
||||
CREATE SCHEMA IF NOT EXISTS partman;
|
||||
CREATE EXTENSION IF NOT EXISTS pg_partman SCHEMA partman;
|
||||
CREATE EXTENSION IF NOT EXISTS pg_cron;
|
||||
-- Telegraf (running as so_telegraf) calls partman.create_parent()
|
||||
-- on first write of each metric, which needs USAGE on the partman
|
||||
-- schema, EXECUTE on its functions/procedures, and write access to
|
||||
-- partman.part_config so it can register new partitioned parents.
|
||||
GRANT USAGE, CREATE ON SCHEMA partman TO so_telegraf;
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA partman TO so_telegraf;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA partman TO so_telegraf;
|
||||
GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA partman TO so_telegraf;
|
||||
-- partman creates per-parent template tables (partman.template_*) at
|
||||
-- runtime; default privileges extend DML/sequence access to them.
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA partman
|
||||
GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO so_telegraf;
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA partman
|
||||
GRANT USAGE, SELECT, UPDATE ON SEQUENCES TO so_telegraf;
|
||||
-- Hourly partman maintenance. cron.schedule is idempotent by jobname.
|
||||
SELECT cron.schedule(
|
||||
'telegraf-partman-maintenance',
|
||||
'17 * * * *',
|
||||
'CALL partman.run_maintenance_proc()'
|
||||
);
|
||||
EOSQL
|
||||
- require:
|
||||
- cmd: postgres_create_telegraf_db
|
||||
|
||||
{% set users = salt['pillar.get']('postgres:auth:users', {}) %}
|
||||
{% for key, entry in users.items() %}
|
||||
{% if key.startswith('telegraf_') and entry.get('user') and entry.get('pass') %}
|
||||
{% set u = entry.user %}
|
||||
{% set p = entry.pass | replace("'", "''") %}
|
||||
|
||||
postgres_telegraf_role_{{ u }}:
|
||||
cmd.run:
|
||||
- name: |
|
||||
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d so_telegraf <<'EOSQL'
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ u }}') THEN
|
||||
EXECUTE format('CREATE ROLE %I WITH LOGIN PASSWORD %L', '{{ u }}', '{{ p }}');
|
||||
ELSE
|
||||
EXECUTE format('ALTER ROLE %I WITH PASSWORD %L', '{{ u }}', '{{ p }}');
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
GRANT CONNECT ON DATABASE so_telegraf TO "{{ u }}";
|
||||
GRANT so_telegraf TO "{{ u }}";
|
||||
EOSQL
|
||||
- require:
|
||||
- cmd: postgres_telegraf_group_role
|
||||
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
# Reconcile partman retention from pillar. Runs after role/schema setup so
|
||||
# any partitioned parents Telegraf has already created get their retention
|
||||
# refreshed whenever postgres.telegraf.retention_days changes.
|
||||
{% set retention = salt['pillar.get']('postgres:telegraf:retention_days', 14) | int %}
|
||||
postgres_telegraf_retention_reconcile:
|
||||
cmd.run:
|
||||
- name: |
|
||||
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d so_telegraf <<'EOSQL'
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (SELECT 1 FROM pg_catalog.pg_extension WHERE extname = 'pg_partman') THEN
|
||||
UPDATE partman.part_config
|
||||
SET retention = '{{ retention }} days',
|
||||
retention_keep_table = false
|
||||
WHERE parent_table LIKE 'telegraf.%';
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
EOSQL
|
||||
- require:
|
||||
- cmd: postgres_telegraf_group_role
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,39 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
# Backups contain role password hashes and full chat data; keep them 0600.
|
||||
umask 0077
|
||||
|
||||
TODAY=$(date '+%Y_%m_%d')
|
||||
BACKUPDIR=/nsm/backup
|
||||
BACKUPFILE="$BACKUPDIR/so-postgres-backup-$TODAY.sql.gz"
|
||||
MAXBACKUPS=7
|
||||
|
||||
mkdir -p $BACKUPDIR
|
||||
|
||||
# Skip if already backed up today
|
||||
if [ -f "$BACKUPFILE" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Skip if container isn't running
|
||||
if ! docker ps --format '{{.Names}}' | grep -q '^so-postgres$'; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Dump all databases and roles, compress
|
||||
docker exec so-postgres pg_dumpall -U postgres | gzip > "$BACKUPFILE"
|
||||
|
||||
# Retention cleanup
|
||||
NUMBACKUPS=$(find $BACKUPDIR -type f -name "so-postgres-backup*" | wc -l)
|
||||
while [ "$NUMBACKUPS" -gt "$MAXBACKUPS" ]; do
|
||||
OLDEST=$(find $BACKUPDIR -type f -name "so-postgres-backup*" -printf '%T+ %p\n' | sort | head -n 1 | awk -F" " '{print $2}')
|
||||
rm -f "$OLDEST"
|
||||
NUMBACKUPS=$(find $BACKUPDIR -type f -name "so-postgres-backup*" | wc -l)
|
||||
done
|
||||
@@ -1,80 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <operation> [args]"
|
||||
echo ""
|
||||
echo "Supported Operations:"
|
||||
echo " sql Execute a SQL command, requires: <sql>"
|
||||
echo " sqlfile Execute a SQL file, requires: <path>"
|
||||
echo " shell Open an interactive psql shell"
|
||||
echo " dblist List databases"
|
||||
echo " userlist List database roles"
|
||||
echo ""
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
# Check for prerequisites
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
COMMAND=$(basename $0)
|
||||
OP=$1
|
||||
shift
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
log() {
|
||||
echo -e "$(date) | $COMMAND | $@" >&2
|
||||
}
|
||||
|
||||
so_psql() {
|
||||
docker exec so-postgres psql -U postgres -d securityonion "$@"
|
||||
}
|
||||
|
||||
case "$OP" in
|
||||
|
||||
sql)
|
||||
[ $# -lt 1 ] && usage
|
||||
so_psql -c "$1"
|
||||
;;
|
||||
|
||||
sqlfile)
|
||||
[ $# -ne 1 ] && usage
|
||||
if [ ! -f "$1" ]; then
|
||||
log "File not found: $1"
|
||||
exit 1
|
||||
fi
|
||||
docker cp "$1" so-postgres:/tmp/sqlfile.sql
|
||||
docker exec so-postgres psql -U postgres -d securityonion -f /tmp/sqlfile.sql
|
||||
docker exec so-postgres rm -f /tmp/sqlfile.sql
|
||||
;;
|
||||
|
||||
shell)
|
||||
docker exec -it so-postgres psql -U postgres -d securityonion
|
||||
;;
|
||||
|
||||
dblist)
|
||||
so_psql -c "\l"
|
||||
;;
|
||||
|
||||
userlist)
|
||||
so_psql -c "\du"
|
||||
;;
|
||||
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart postgres $1
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start postgres $1
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop postgres $1
|
||||
@@ -1,157 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# Point-in-time host metrics from the Telegraf Postgres backend.
|
||||
# Sanity-check tool for verifying metrics are landing before the grid
|
||||
# dashboards consume them.
|
||||
#
|
||||
# Assumes Telegraf's postgresql output is configured with
|
||||
# tags_as_foreign_keys = true, tags_as_jsonb = true, fields_as_jsonb = true,
|
||||
# so metric tables are (time, tag_id, fields jsonb) and tag tables are
|
||||
# (tag_id, tags jsonb).
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 [host]
|
||||
|
||||
Shows the most recent CPU, memory, disk, and load metrics for each host
|
||||
from the so_telegraf Postgres database. Without an argument, reports on
|
||||
every host that has data. With a host, limits output to that one.
|
||||
|
||||
Requires: sudo, so-postgres running, telegraf.output set to
|
||||
POSTGRES or BOTH.
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "${1:-}" in
|
||||
-h|--help) usage ;;
|
||||
esac
|
||||
|
||||
FILTER_HOST="${1:-}"
|
||||
SCHEMA="telegraf"
|
||||
|
||||
# Host values are interpolated into SQL below. Hostnames are [A-Za-z0-9._-];
|
||||
# any other character in a tag value or CLI arg is rejected to prevent a
|
||||
# stored-tag (or CLI) → SQL injection via a compromised Telegraf writer.
|
||||
HOST_RE='^[A-Za-z0-9._-]+$'
|
||||
if [ -n "$FILTER_HOST" ] && ! [[ "$FILTER_HOST" =~ $HOST_RE ]]; then
|
||||
echo "Invalid host filter: $FILTER_HOST" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
so_psql() {
|
||||
docker exec so-postgres psql -U postgres -d so_telegraf -At -F $'\t' "$@"
|
||||
}
|
||||
|
||||
if ! docker exec so-postgres psql -U postgres -lqt 2>/dev/null | cut -d\| -f1 | grep -qw so_telegraf; then
|
||||
echo "Database so_telegraf not found. Is telegraf.output set to POSTGRES or BOTH?"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
table_exists() {
|
||||
local table="$1"
|
||||
[ -n "$(so_psql -c "SELECT 1 FROM information_schema.tables WHERE table_schema='${SCHEMA}' AND table_name='${table}' LIMIT 1;")" ]
|
||||
}
|
||||
|
||||
# Discover hosts from cpu_tag (every minion reports cpu).
|
||||
if ! table_exists "cpu_tag"; then
|
||||
echo "${SCHEMA}.cpu_tag not found. Has Telegraf written any rows yet?"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
HOSTS=$(so_psql -c "
|
||||
SELECT DISTINCT tags->>'host'
|
||||
FROM \"${SCHEMA}\".cpu_tag
|
||||
WHERE tags ? 'host'
|
||||
ORDER BY 1;")
|
||||
|
||||
if [ -z "$HOSTS" ]; then
|
||||
echo "No hosts found in ${SCHEMA}. Is Telegraf configured to write to Postgres?"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
print_metric() {
|
||||
so_psql -c "$1"
|
||||
}
|
||||
|
||||
for host in $HOSTS; do
|
||||
if ! [[ "$host" =~ $HOST_RE ]]; then
|
||||
echo "Skipping host with invalid characters in tag value: $host" >&2
|
||||
continue
|
||||
fi
|
||||
if [ -n "$FILTER_HOST" ] && [ "$host" != "$FILTER_HOST" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "===================================================================="
|
||||
echo " Host: $host"
|
||||
echo "===================================================================="
|
||||
|
||||
if table_exists "cpu"; then
|
||||
print_metric "
|
||||
SELECT 'cpu ' AS metric,
|
||||
to_char(c.time, 'YYYY-MM-DD HH24:MI:SS') AS ts,
|
||||
round((100 - (c.fields->>'usage_idle')::numeric), 1) || '% used'
|
||||
FROM \"${SCHEMA}\".cpu c
|
||||
JOIN \"${SCHEMA}\".cpu_tag t USING (tag_id)
|
||||
WHERE t.tags->>'host' = '${host}' AND t.tags->>'cpu' = 'cpu-total'
|
||||
ORDER BY c.time DESC LIMIT 1;"
|
||||
fi
|
||||
|
||||
if table_exists "mem"; then
|
||||
print_metric "
|
||||
SELECT 'memory ' AS metric,
|
||||
to_char(m.time, 'YYYY-MM-DD HH24:MI:SS') AS ts,
|
||||
round((m.fields->>'used_percent')::numeric, 1) || '% used (' ||
|
||||
pg_size_pretty((m.fields->>'used')::bigint) || ' of ' ||
|
||||
pg_size_pretty((m.fields->>'total')::bigint) || ')'
|
||||
FROM \"${SCHEMA}\".mem m
|
||||
JOIN \"${SCHEMA}\".mem_tag t USING (tag_id)
|
||||
WHERE t.tags->>'host' = '${host}'
|
||||
ORDER BY m.time DESC LIMIT 1;"
|
||||
fi
|
||||
|
||||
if table_exists "disk"; then
|
||||
print_metric "
|
||||
SELECT 'disk ' || rpad(t.tags->>'path', 12) AS metric,
|
||||
to_char(d.time, 'YYYY-MM-DD HH24:MI:SS') AS ts,
|
||||
round((d.fields->>'used_percent')::numeric, 1) || '% used (' ||
|
||||
pg_size_pretty((d.fields->>'used')::bigint) || ' of ' ||
|
||||
pg_size_pretty((d.fields->>'total')::bigint) || ')'
|
||||
FROM \"${SCHEMA}\".disk d
|
||||
JOIN \"${SCHEMA}\".disk_tag t USING (tag_id)
|
||||
WHERE t.tags->>'host' = '${host}'
|
||||
AND d.time = (SELECT max(d2.time)
|
||||
FROM \"${SCHEMA}\".disk d2
|
||||
JOIN \"${SCHEMA}\".disk_tag t2 USING (tag_id)
|
||||
WHERE t2.tags->>'host' = '${host}')
|
||||
ORDER BY t.tags->>'path';"
|
||||
fi
|
||||
|
||||
if table_exists "system"; then
|
||||
print_metric "
|
||||
SELECT 'load ' AS metric,
|
||||
to_char(s.time, 'YYYY-MM-DD HH24:MI:SS') AS ts,
|
||||
(s.fields->>'load1') || ' / ' ||
|
||||
(s.fields->>'load5') || ' / ' ||
|
||||
(s.fields->>'load15') || ' (1/5/15m)'
|
||||
FROM \"${SCHEMA}\".system s
|
||||
JOIN \"${SCHEMA}\".system_tag t USING (tag_id)
|
||||
WHERE t.tags->>'host' = '${host}'
|
||||
ORDER BY s.time DESC LIMIT 1;"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
done
|
||||
@@ -1,18 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{# Fires on salt/key. Only act on successful key acceptance — not reauth. #}
|
||||
{% if data.get('act') == 'accept' and data.get('result') == True and data.get('id') %}
|
||||
|
||||
{{ data['id'] }}_telegraf_pg_sync:
|
||||
runner.state.orchestrate:
|
||||
- args:
|
||||
- mods: orch.telegraf_postgres_sync
|
||||
- pillar:
|
||||
postgres_fanout_minion: {{ data['id'] }}
|
||||
|
||||
{% do salt.log.info('telegraf_user_sync reactor: syncing telegraf PG user for minion %s' % data['id']) %}
|
||||
|
||||
{% endif %}
|
||||
@@ -62,19 +62,6 @@ engines_config:
|
||||
- name: /etc/salt/master.d/engines.conf
|
||||
- source: salt://salt/files/engines.conf
|
||||
|
||||
reactor_config_telegraf:
|
||||
file.managed:
|
||||
- name: /etc/salt/master.d/reactor_telegraf.conf
|
||||
- contents: |
|
||||
reactor:
|
||||
- 'salt/key':
|
||||
- /opt/so/saltstack/default/salt/reactor/telegraf_user_sync.sls
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- watch_in:
|
||||
- service: salt_master_service
|
||||
|
||||
# update the bootstrap script when used for salt-cloud
|
||||
salt_bootstrap_cloud:
|
||||
file.managed:
|
||||
|
||||
@@ -16,6 +16,21 @@ sensoroni:
|
||||
soc_host:
|
||||
suripcap:
|
||||
pcapMaxCount: 100000
|
||||
fileanalyze:
|
||||
enabled: False
|
||||
watchDirs:
|
||||
- /nsm/zeek/extracted/complete
|
||||
processedDir: /nsm/strelka/processed
|
||||
historyDir: /nsm/strelka/history
|
||||
logFile: /var/log/strelka/strelka.log
|
||||
concurrency: 8
|
||||
maxDepth: 15
|
||||
recycleSeconds: 300
|
||||
dedupMaxEntries: 100000
|
||||
dedupTTLSeconds: 3600
|
||||
yaraRulesPath: /opt/so/conf/strelka/rules/compiled/rules.compiled
|
||||
passwordsPath: /etc/strelka/passwords.dat
|
||||
scannerTimeout: 150
|
||||
analyzers:
|
||||
echotrail:
|
||||
base_url: https://api.echotrail.io/insights/
|
||||
|
||||
@@ -36,6 +36,22 @@
|
||||
"pcapInputPath": "/nsm/suripcap",
|
||||
"pcapOutputPath": "/nsm/pcapout",
|
||||
"pcapMaxCount": {{ SENSORONIMERGED.config.suripcap.pcapMaxCount }}
|
||||
{%- endif %}
|
||||
{%- if SENSORONIMERGED.config.fileanalyze.enabled %}
|
||||
},
|
||||
"fileanalyze": {
|
||||
"watchDirs": {{ SENSORONIMERGED.config.fileanalyze.watchDirs | tojson }},
|
||||
"processedDir": "{{ SENSORONIMERGED.config.fileanalyze.processedDir }}",
|
||||
"historyDir": "{{ SENSORONIMERGED.config.fileanalyze.historyDir }}",
|
||||
"logFile": "{{ SENSORONIMERGED.config.fileanalyze.logFile }}",
|
||||
"concurrency": {{ SENSORONIMERGED.config.fileanalyze.concurrency }},
|
||||
"maxDepth": {{ SENSORONIMERGED.config.fileanalyze.maxDepth }},
|
||||
"recycleSeconds": {{ SENSORONIMERGED.config.fileanalyze.recycleSeconds }},
|
||||
"dedupMaxEntries": {{ SENSORONIMERGED.config.fileanalyze.dedupMaxEntries }},
|
||||
"dedupTTLSeconds": {{ SENSORONIMERGED.config.fileanalyze.dedupTTLSeconds }},
|
||||
"yaraRulesPath": "{{ SENSORONIMERGED.config.fileanalyze.yaraRulesPath }}",
|
||||
"passwordsPath": "{{ SENSORONIMERGED.config.fileanalyze.passwordsPath }}",
|
||||
"scannerTimeout": {{ SENSORONIMERGED.config.fileanalyze.scannerTimeout }}
|
||||
{%- endif %}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,6 +65,60 @@ sensoroni:
|
||||
description: The maximum number of PCAP packets to extract from eligible PCAP files, for PCAP jobs. If there are issues fetching excessively large packet streams consider lowering this value to reduce the number of collected packets returned to the user interface.
|
||||
helpLink: pcap
|
||||
advanced: True
|
||||
fileanalyze:
|
||||
enabled:
|
||||
description: Enable or disable the file analysis module. When enabled, this replaces Strelka for file scanning on sensor nodes.
|
||||
forcedType: bool
|
||||
advanced: False
|
||||
helpLink: strelka
|
||||
watchDirs:
|
||||
description: Directories to watch for new files to analyze. Typically the Zeek or Suricata extracted file directories.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
processedDir:
|
||||
description: Directory to move files to after scanning is complete.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
historyDir:
|
||||
description: Directory for on-disk deduplication history. Each scanned file hash is recorded here to survive restarts.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
logFile:
|
||||
description: Path to the JSON log file where scan results are written. This file is picked up by the existing filebeat pipeline.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
concurrency:
|
||||
description: Maximum number of files to scan concurrently.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
maxDepth:
|
||||
description: Maximum recursive extraction depth for nested archives.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
recycleSeconds:
|
||||
description: Interval in seconds to recycle the file watcher to pick up new subdirectories.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
dedupMaxEntries:
|
||||
description: Maximum number of entries in the in-memory deduplication cache.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
dedupTTLSeconds:
|
||||
description: Time-to-live in seconds for deduplication cache entries.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
yaraRulesPath:
|
||||
description: Path to compiled YARA rules file.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
passwordsPath:
|
||||
description: Path to password dictionary for encrypted file cracking attempts.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
scannerTimeout:
|
||||
description: Timeout in seconds for individual scanner execution.
|
||||
advanced: True
|
||||
helpLink: strelka
|
||||
analyzers:
|
||||
echotrail:
|
||||
api_key:
|
||||
|
||||
@@ -24,11 +24,6 @@
|
||||
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %}
|
||||
|
||||
{% if GLOBALS.postgres is defined and GLOBALS.postgres.auth is defined %}
|
||||
{% set PG_ADMIN_PASS = salt['pillar.get']('secrets:postgres_pass', '') %}
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.update({'postgres': {'hostUrl': GLOBALS.manager_ip, 'port': 5432, 'username': GLOBALS.postgres.auth.users.so_postgres_user.user, 'password': GLOBALS.postgres.auth.users.so_postgres_user.pass, 'adminUser': 'postgres', 'adminPassword': PG_ADMIN_PASS, 'dbname': 'securityonion', 'sslMode': 'require', 'assistantEnabled': true, 'esHostUrl': 'https://' ~ GLOBALS.manager_ip ~ ':9200', 'esUsername': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'esPassword': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass, 'esVerifyCert': false}}) %}
|
||||
{% endif %}
|
||||
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.influxdb.update({'hostUrl': 'https://' ~ GLOBALS.influxdb_host ~ ':8086'}) %}
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.influxdb.update({'token': INFLUXDB_TOKEN}) %}
|
||||
{% for tool in SOCDEFAULTS.soc.config.server.client.tools %}
|
||||
|
||||
@@ -2687,5 +2687,4 @@ soc:
|
||||
lowBalanceColorAlert: 500000
|
||||
enabled: true
|
||||
adapter: SOAI
|
||||
charsPerTokenEstimate: 4
|
||||
|
||||
|
||||
@@ -761,7 +761,7 @@ soc:
|
||||
required: True
|
||||
- field: origin
|
||||
label: Country of Origin for the Model Training
|
||||
required: False
|
||||
required: false
|
||||
- field: contextLimitSmall
|
||||
label: Context Limit (Small)
|
||||
forcedType: int
|
||||
@@ -779,10 +779,6 @@ soc:
|
||||
- field: enabled
|
||||
label: Enabled
|
||||
forcedType: bool
|
||||
- field: charsPerTokenEstimate
|
||||
label: Characters per Token Estimate
|
||||
forcedType: float
|
||||
required: False
|
||||
apiTimeoutMs:
|
||||
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
|
||||
global: True
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'strelka/map.jinja' import STRELKAMERGED %}
|
||||
|
||||
include:
|
||||
- strelka.config
|
||||
- strelka.backend.sostatus
|
||||
|
||||
backend_backend_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/strelka/backend/backend.yaml
|
||||
- source: salt://strelka/backend/files/backend.yaml.jinja
|
||||
- template: jinja
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
- defaults:
|
||||
BACKENDCONFIG: {{ STRELKAMERGED.backend.config.backend }}
|
||||
|
||||
backend_logging_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/strelka/backend/logging.yaml
|
||||
- source: salt://strelka/backend/files/logging.yaml.jinja
|
||||
- template: jinja
|
||||
- user: 939
|
||||
- group: 939
|
||||
- defaults:
|
||||
LOGGINGCONFIG: {{ STRELKAMERGED.backend.config.logging }}
|
||||
|
||||
backend_passwords:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/strelka/backend/passwords.dat
|
||||
- source: salt://strelka/backend/files/passwords.dat.jinja
|
||||
- template: jinja
|
||||
- user: 939
|
||||
- group: 939
|
||||
- defaults:
|
||||
PASSWORDS: {{ STRELKAMERGED.backend.config.passwords }}
|
||||
|
||||
backend_taste:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/strelka/backend/taste/taste.yara
|
||||
- source: salt://strelka/backend/files/taste/taste.yara
|
||||
- makedirs: True
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,27 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- strelka.backend.sostatus
|
||||
|
||||
so-strelka-backend:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-strelka-backend_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-strelka-backend$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,69 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- strelka.backend.config
|
||||
- strelka.backend.sostatus
|
||||
|
||||
strelka_backend:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-backend:{{ GLOBALS.so_version }}
|
||||
- binds:
|
||||
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro
|
||||
- /opt/so/conf/strelka/rules/compiled/:/etc/yara/:ro
|
||||
{% if DOCKERMERGED.containers['so-strelka-backend'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-strelka-backend'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- name: so-strelka-backend
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-strelka-backend'].ip }}
|
||||
- command: strelka-backend
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
|
||||
{% if DOCKERMERGED.containers['so-strelka-backend'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-strelka-backend'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-strelka-backend'].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-strelka-backend'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-strelka-backend'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-strelka-backend'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- restart_policy: on-failure
|
||||
- watch:
|
||||
- file: strelkasensorcompiledrules
|
||||
- file: backend_backend_config
|
||||
- file: backend_logging_config
|
||||
- file: backend_passwords
|
||||
- file: backend_taste
|
||||
|
||||
delete_so-strelka-backend_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-strelka-backend$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1 +0,0 @@
|
||||
{{ BACKENDCONFIG | yaml(false) }}
|
||||
@@ -1 +0,0 @@
|
||||
{{ LOGGINGCONFIG | yaml(false) }}
|
||||
@@ -1 +0,0 @@
|
||||
{{ PASSWORDS | join('\n') }}
|
||||
@@ -1,748 +0,0 @@
|
||||
// Archive Files
|
||||
|
||||
rule _7zip_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
strings:
|
||||
$a = { 37 7A BC AF 27 1C }
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule arj_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
condition:
|
||||
uint16(0) == 0xEA60
|
||||
}
|
||||
|
||||
rule cab_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
strings:
|
||||
$a = { 4D 53 43 46 00 00 00 00 }
|
||||
condition:
|
||||
$a at 0 or
|
||||
( uint16(0) == 0x5A4D and $a )
|
||||
}
|
||||
|
||||
rule cpio_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
strings:
|
||||
$a = { 30 37 30 37 30 31 }
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule iso_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
strings:
|
||||
$a = { 43 44 30 30 31 }
|
||||
condition:
|
||||
$a at 0x8001 and $a at 0x8801 and $a at 0x9001
|
||||
}
|
||||
|
||||
rule mhtml_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
strings:
|
||||
$a = "MIME-Version: 1.0"
|
||||
$b = "This document is a Single File Web Page, also known as a Web Archive file"
|
||||
condition:
|
||||
$a at 0 and $b
|
||||
}
|
||||
|
||||
rule rar_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
condition:
|
||||
uint16(0) == 0x6152 and uint8(2) == 0x72 and uint16(3) == 0x1A21 and uint8(5) == 0x07
|
||||
}
|
||||
|
||||
rule tar_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
strings:
|
||||
$a = { 75 73 74 61 72 }
|
||||
condition:
|
||||
uint16(0) == 0x9D1F or
|
||||
uint16(0) == 0xA01F or
|
||||
$a at 257
|
||||
}
|
||||
|
||||
rule xar_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
condition:
|
||||
uint32(0) == 0x21726178
|
||||
}
|
||||
|
||||
rule zip_file
|
||||
{
|
||||
meta:
|
||||
type = "archive"
|
||||
condition:
|
||||
( uint32(0) == 0x04034B50 and not uint32(4) == 0x00060014 )
|
||||
}
|
||||
|
||||
// Audio Files
|
||||
|
||||
rule mp3_file
|
||||
{
|
||||
meta:
|
||||
type = "audio"
|
||||
condition:
|
||||
uint16(0) == 0x4449 and uint8(2) == 0x33
|
||||
}
|
||||
|
||||
// Certificate Files
|
||||
|
||||
rule pkcs7_file
|
||||
{
|
||||
meta:
|
||||
type = "certificate"
|
||||
strings:
|
||||
$a = "-----BEGIN PKCS7-----"
|
||||
condition:
|
||||
(uint16(0) == 0x8230 and uint16(4) == 0x0906) or
|
||||
uint32(0) == 0x09068030 or
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule x509_der_file
|
||||
{
|
||||
meta:
|
||||
type = "certificate"
|
||||
condition:
|
||||
uint16(0) == 0x8230 and ( uint16(4) == 0x8230 or uint16(4) == 0x8130 )
|
||||
}
|
||||
|
||||
rule x509_pem_file
|
||||
{
|
||||
meta:
|
||||
type = "certificate"
|
||||
strings:
|
||||
$a = "-----BEGIN CERTI"
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
// Compressed Files
|
||||
|
||||
rule bzip2_file
|
||||
{
|
||||
meta:
|
||||
type = "compressed"
|
||||
condition:
|
||||
uint16(0) == 0x5A42 and uint8(2) == 0x68
|
||||
}
|
||||
|
||||
rule gzip_file
|
||||
{
|
||||
meta:
|
||||
type = "compressed"
|
||||
condition:
|
||||
uint16(0) == 0x8B1F and uint8(2) == 0x08
|
||||
}
|
||||
|
||||
rule lzma_file
|
||||
{
|
||||
meta:
|
||||
type = "compressed"
|
||||
condition:
|
||||
uint16(0) == 0x005D and uint8(2) == 0x00
|
||||
}
|
||||
|
||||
rule xz_file
|
||||
{
|
||||
meta:
|
||||
type = "compressed"
|
||||
condition:
|
||||
uint32(0) == 0x587A37FD and uint16(4) == 0x005A
|
||||
}
|
||||
|
||||
// Document Files
|
||||
|
||||
rule doc_subheader_file
|
||||
{
|
||||
meta:
|
||||
type = "document"
|
||||
condition:
|
||||
uint32(0) == 0x00C1A5EC
|
||||
}
|
||||
|
||||
rule mso_file
|
||||
{
|
||||
meta:
|
||||
type = "document"
|
||||
strings:
|
||||
$a = { 3C 3F 6D 73 6F 2D 61 70 70 6C 69 63 61 74 69 6F 6E 20 } // <?mso-application
|
||||
$b = { 3C 3F 6D 73 6F 2D 63 6F 6E 74 65 6E 74 54 79 70 65 } // <?mso-contentType
|
||||
condition:
|
||||
$a at 0 or
|
||||
$b at 0
|
||||
}
|
||||
|
||||
rule olecf_file
|
||||
{
|
||||
meta:
|
||||
description = "Object Linking and Embedding (OLE) Compound File (CF)"
|
||||
type = "document"
|
||||
condition:
|
||||
uint32(0) == 0xE011CFD0 and uint32(4) == 0xE11AB1A1
|
||||
}
|
||||
|
||||
rule ooxml_file
|
||||
{
|
||||
meta:
|
||||
description = "Microsoft Office Open XML Format"
|
||||
type = "document"
|
||||
condition:
|
||||
uint32(0) == 0x04034B50 and uint32(4) == 0x00060014
|
||||
}
|
||||
|
||||
rule pdf_file
|
||||
{
|
||||
meta:
|
||||
description = "Portable Document Format"
|
||||
type = "document"
|
||||
condition:
|
||||
uint32(0) == 0x46445025
|
||||
}
|
||||
|
||||
rule poi_hpbf_file
|
||||
{
|
||||
meta:
|
||||
description = "https://poi.apache.org/components/hpbf/file-format.html"
|
||||
type = "document"
|
||||
strings:
|
||||
$a = { 43 48 4E 4B 49 4E 4B } // CHNKINK
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule rtf_file
|
||||
{
|
||||
meta:
|
||||
type = "document"
|
||||
condition:
|
||||
uint32(0) == 0x74725C7B
|
||||
}
|
||||
|
||||
rule vbframe_file
|
||||
{
|
||||
meta:
|
||||
type = "document"
|
||||
strings:
|
||||
$a = { 56 45 52 53 49 4F 4E 20 35 2E 30 30 0D 0A 42 65 67 69 6E } // VERSION 5.00\r\nBegin
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule wordml_file
|
||||
{
|
||||
meta:
|
||||
description = "Microsoft Office Word 2003 XML format"
|
||||
type = "document"
|
||||
strings:
|
||||
$a = { 3C 3F 78 6D 6C 20 76 65 72 73 69 6F 6E 3D } // <?xml version=
|
||||
$b = "http://schemas.microsoft.com/office/word/2003/wordml"
|
||||
condition:
|
||||
$a at 0 and $b
|
||||
}
|
||||
|
||||
rule xfdf_file
|
||||
{
|
||||
meta:
|
||||
description = "XML Forms Data Format"
|
||||
type = "document"
|
||||
strings:
|
||||
$a = { 3C 78 66 64 66 20 78 6D 6C 6E 73 3D } // <xfdf xmlns=
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
// Email Files
|
||||
|
||||
rule email_file
|
||||
{
|
||||
meta:
|
||||
type = "email"
|
||||
strings:
|
||||
$a = "\x0aReceived:" nocase fullword
|
||||
$b = "\x0AReturn-Path:" nocase fullword
|
||||
$c = "\x0aMessage-ID:" nocase fullword
|
||||
$d = "\x0aReply-To:" nocase fullword
|
||||
$e = "\x0aX-Mailer:" nocase fullword
|
||||
condition:
|
||||
$a in (0..2048) or
|
||||
$b in (0..2048) or
|
||||
$c in (0..2048) or
|
||||
$d in (0..2048) or
|
||||
$e in (0..2048)
|
||||
}
|
||||
|
||||
rule tnef_file
|
||||
{
|
||||
meta:
|
||||
description = "Transport Neutral Encapsulation Format"
|
||||
type = "email"
|
||||
condition:
|
||||
uint32(0) == 0x223E9F78
|
||||
}
|
||||
|
||||
// Encryption Files
|
||||
|
||||
rule pgp_file
|
||||
{
|
||||
meta:
|
||||
type = "encryption"
|
||||
strings:
|
||||
$a = { ?? ?? 2D 2D 2D 42 45 47 49 4E 20 50 47 50 20 50 55 42 4C 49 43 20 4B 45 59 20 42 4C 4F 43 4B 2D } // (.{2})(\x2D\x2D\x2DBEGIN PGP PUBLIC KEY BLOCK\x2D)
|
||||
$b = { ?? ?? 2D 2D 2D 42 45 47 49 4E 20 50 47 50 20 53 49 47 4E 41 54 55 52 45 2D } // (\x2D\x2D\x2D\x2D\x2DBEGIN PGP SIGNATURE\x2D)
|
||||
$c = { ?? ?? 2D 2D 2D 42 45 47 49 4E 20 50 47 50 20 4D 45 53 53 41 47 45 2D } // (\x2D\x2D\x2D\x2D\x2DBEGIN PGP MESSAGE\x2D)
|
||||
condition:
|
||||
$a at 0 or
|
||||
$b at 0 or
|
||||
$c at 0
|
||||
}
|
||||
|
||||
// Executable Files
|
||||
|
||||
rule elf_file
|
||||
{
|
||||
meta:
|
||||
description = "Executable and Linkable Format"
|
||||
type = "executable"
|
||||
condition:
|
||||
uint32(0) == 0x464C457F
|
||||
}
|
||||
|
||||
rule lnk_file
|
||||
{
|
||||
meta:
|
||||
description = "Windows Shortcut file"
|
||||
type = "executable"
|
||||
condition:
|
||||
uint32(0) == 0x0000004C
|
||||
}
|
||||
|
||||
rule macho_file
|
||||
{
|
||||
meta:
|
||||
description = "Mach object"
|
||||
type = "executable"
|
||||
condition:
|
||||
uint32(0) == 0xCEFAEDFE or
|
||||
uint32(0) == 0xCFFAEDFE or
|
||||
uint32(0) == 0xFEEDFACE or
|
||||
uint32(0) == 0xFEEDFACF
|
||||
}
|
||||
|
||||
rule mz_file
|
||||
{
|
||||
meta:
|
||||
description = "DOS MZ executable"
|
||||
type = "executable"
|
||||
condition:
|
||||
uint16(0) == 0x5A4D
|
||||
}
|
||||
|
||||
// Image Files
|
||||
|
||||
rule bmp_file
|
||||
{
|
||||
meta:
|
||||
type = "image"
|
||||
strings:
|
||||
$a = { 42 4D ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ?? ( 0C | 28 | 40 | 6C | 7C | 80 ) 00 } // BM
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule cmap_file
|
||||
{
|
||||
meta:
|
||||
type = "image"
|
||||
strings:
|
||||
$a = { 62 65 67 69 6E 63 6D 61 70 } // begincmap
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule gif_file
|
||||
{
|
||||
meta:
|
||||
description = "Graphics Interchange Format"
|
||||
type = "image"
|
||||
condition:
|
||||
uint32(0) == 0x38464947 and ( uint16(4) == 0x6137 or uint16(4) == 0x6139 )
|
||||
}
|
||||
|
||||
rule jpeg_file
|
||||
{
|
||||
meta:
|
||||
type = "image"
|
||||
condition:
|
||||
uint32(0) == 0xE0FFD8FF or
|
||||
uint32(0) == 0xE1FFD8FF or
|
||||
uint32(0) == 0xE2FFD8FF or
|
||||
uint32(0) == 0xE8FFD8FF
|
||||
}
|
||||
|
||||
rule postscript_file
|
||||
{
|
||||
meta:
|
||||
type = "image"
|
||||
strings:
|
||||
$a = { 25 21 50 53 2D 41 64 6F 62 65 2D 33 2E 30 } // %!PS-Adobe-3.0
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule png_file
|
||||
{
|
||||
meta:
|
||||
type = "image"
|
||||
condition:
|
||||
uint32(0) == 0x474E5089
|
||||
}
|
||||
|
||||
rule psd_file
|
||||
{
|
||||
meta:
|
||||
description = "Photoshop Document"
|
||||
type = "image"
|
||||
condition:
|
||||
uint32(0) == 0x53504238
|
||||
}
|
||||
|
||||
rule psd_image_file
|
||||
{
|
||||
meta:
|
||||
description = "Photoshop Document image resource block"
|
||||
type = "image"
|
||||
condition:
|
||||
uint32(0) == 0x4D494238
|
||||
}
|
||||
|
||||
rule svg_file
|
||||
{
|
||||
meta:
|
||||
type = "image"
|
||||
strings:
|
||||
$a = { 3C 73 76 67 20 } // <svg
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule xicc_file
|
||||
{
|
||||
meta:
|
||||
type = "image"
|
||||
strings:
|
||||
$a = { 58 49 43 43 5F 50 52 4F 46 49 4C 45 } // XICC_PROFILE
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule xmp_file
|
||||
{
|
||||
meta:
|
||||
type = "image"
|
||||
strings:
|
||||
$a = { 3C 3F 78 70 61 63 6B 65 74 20 62 65 67 69 6E 3D } // <?xpacket begin=
|
||||
$b = { 3C 78 3A 78 6D 70 6D 65 74 61 20 78 6D 6C 6E 73 3A 78 3D } // <x:xmpmeta xmlns:x=
|
||||
condition:
|
||||
$a at 0 or $b at 0
|
||||
}
|
||||
|
||||
// Metadata Files
|
||||
|
||||
rule jar_manifest_file
|
||||
{
|
||||
meta:
|
||||
type = "metadata"
|
||||
condition:
|
||||
uint32(0) == 0x696E614D and uint32(4) == 0x74736566
|
||||
}
|
||||
|
||||
rule bplist_file
|
||||
{
|
||||
meta:
|
||||
description = "Binary Property List"
|
||||
type = "metadata"
|
||||
condition:
|
||||
uint32(0) == 0x696C7062 and uint32(4) == 0x30307473
|
||||
}
|
||||
|
||||
// Multimedia Files
|
||||
|
||||
rule fws_file
|
||||
{
|
||||
meta:
|
||||
type = "multimedia"
|
||||
condition:
|
||||
uint16(0) == 0x5746 and uint8(2) == 0x53
|
||||
}
|
||||
|
||||
rule cws_file
|
||||
{
|
||||
meta:
|
||||
description = "zlib compressed Flash file"
|
||||
type = "multimedia"
|
||||
condition:
|
||||
uint16(0) == 0x5743 and uint8(2) == 0x53
|
||||
}
|
||||
|
||||
|
||||
rule zws_file
|
||||
{
|
||||
meta:
|
||||
description = "LZMA compressed Flash file"
|
||||
type = "multimedia"
|
||||
condition:
|
||||
uint16(0) == 0x575A and uint8(2) == 0x53
|
||||
}
|
||||
|
||||
// Package Files
|
||||
|
||||
rule debian_package_file
|
||||
{
|
||||
meta:
|
||||
type = "package"
|
||||
strings:
|
||||
$a = { 21 3C 61 72 63 68 3E 0A 64 65 62 69 61 6E } // \x21\x3Carch\x3E\x0Adebian
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule rpm_file
|
||||
{
|
||||
meta:
|
||||
type = "package"
|
||||
condition:
|
||||
uint32(0) == 0x6D707264 or uint32(0) == 0xDBEEABED
|
||||
}
|
||||
|
||||
// Packer Files
|
||||
|
||||
rule upx_file
|
||||
{
|
||||
meta:
|
||||
description = "Ultimate Packer for Executables"
|
||||
type = "packer"
|
||||
strings:
|
||||
$a = {55505830000000}
|
||||
$b = {55505831000000}
|
||||
$c = "UPX!"
|
||||
condition:
|
||||
uint16(0) == 0x5A4D and
|
||||
$a in (0..1024) and
|
||||
$b in (0..1024) and
|
||||
$c in (0..1024)
|
||||
}
|
||||
|
||||
// Script Files
|
||||
|
||||
rule batch_file
|
||||
{
|
||||
meta:
|
||||
type = "script"
|
||||
strings:
|
||||
$a = { ( 45 | 65 ) ( 43 | 63 ) ( 48 | 68 ) ( 4F | 6F ) 20 ( 4F | 6F) ( 46 | 66 ) ( 46 | 66 ) } // [Ee][Cc][Hh][Oo] [Oo][Ff][Ff]
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule javascript_file
|
||||
{
|
||||
meta:
|
||||
type = "script"
|
||||
strings:
|
||||
$var = { 76 61 72 20 } // var
|
||||
$function1 = { 66 75 6E 63 74 69 6F 6E } // function
|
||||
$function2 = { 28 66 75 6E 63 74 69 6F 6E } // (function
|
||||
$function3 = { 66 75 6E 63 74 69 6F 6E [0-1] 28 } // function[0-1](
|
||||
$if = { 69 66 [0-1] 28 } // if[0-1](
|
||||
$misc1 = { 24 28 } // $(
|
||||
$misc2 = { 2F ( 2A | 2F ) } // \/(\/|\*)
|
||||
$jquery = { 6A 51 75 65 72 79 } // jQuery
|
||||
$try = { 74 72 79 [0-1] 7B } // try[0-1]{
|
||||
$catch = { 63 61 74 63 68 28 } // catch(
|
||||
$push = { 2E 70 75 73 68 28 } // .push(
|
||||
$array = { 6E 65 77 20 41 72 72 61 79 28 } // new Array(
|
||||
$document1 = { 64 6f 63 75 6d 65 6e 74 2e 63 72 65 61 74 65 } // document.create
|
||||
$document2 = { 64 6F 63 75 6D 65 6E 74 2E 77 72 69 74 65 } // document.write
|
||||
$window = { 77 69 6E 64 6F 77 ( 2E | 5B ) } // window[.\[]
|
||||
$define = { 64 65 66 69 6E 65 28 } // define(
|
||||
$eval = { 65 76 61 6C 28 } // eval(
|
||||
$unescape = { 75 6E 65 73 63 61 70 65 28 } // unescape(
|
||||
condition:
|
||||
$var at 0 or
|
||||
$function1 at 0 or
|
||||
$function2 at 0 or
|
||||
$if at 0 or
|
||||
$jquery at 0 or
|
||||
$function3 in (0..30) or
|
||||
$push in (0..30) or
|
||||
$array in (0..30) or
|
||||
( $try at 0 and $catch in (5..5000) ) or
|
||||
$document1 in (0..100) or
|
||||
$document2 in (0..100) or
|
||||
$window in (0..100) or
|
||||
$define in (0..100) or
|
||||
$eval in (0..100) or
|
||||
$unescape in (0..100) or
|
||||
( ( $misc1 at 0 or $misc2 at 0 ) and $var and $function1 and $if )
|
||||
}
|
||||
|
||||
rule vb_file
|
||||
{
|
||||
meta:
|
||||
type = "script"
|
||||
strings:
|
||||
$a = { 41 74 74 72 69 62 75 74 65 20 56 42 5F 4E 61 6D 65 20 3D } // Attribute VB_Name =
|
||||
$b = { 4F 70 74 69 6F 6E 20 45 78 70 6C 69 63 69 74 } // Option Explicit
|
||||
$c = { 44 69 6D 20 } // Dim
|
||||
$d = { 50 75 62 6C 69 63 20 53 75 62 20 } // Public Sub
|
||||
$e = { 50 72 69 76 61 74 65 20 53 75 62 20 } // Private Sub
|
||||
condition:
|
||||
$a at 0 or
|
||||
$b at 0 or
|
||||
$c at 0 or
|
||||
$d at 0 or
|
||||
$e at 0
|
||||
}
|
||||
|
||||
// Text Files
|
||||
|
||||
rule hta_file
|
||||
{
|
||||
meta:
|
||||
type = "text"
|
||||
strings:
|
||||
$a = { 3C 48 54 41 3A 41 50 50 4C 49 43 41 54 49 4F 4E 20 } // <HTA:APPLICATION
|
||||
condition:
|
||||
$a in (0..2000)
|
||||
}
|
||||
|
||||
rule html_file
|
||||
{
|
||||
meta:
|
||||
type = "text"
|
||||
strings:
|
||||
$a = { 3C 21 ( 64 | 44 ) ( 6F | 4F ) ( 63 |43 ) ( 74 | 54 ) ( 79 | 59 ) ( 70 | 50 ) ( 65 | 45 ) 20 ( 68 | 48 ) ( 74 | 54 ) ( 6D | 4D ) ( 6C | 4C ) } // <![Dd][Oo][Cc][Tt][Yy][Pp][Ee] [Hh][Tt][Mm][Ll]
|
||||
$b = { 3C ( 68 | 48 ) ( 74 | 54 ) ( 6D | 4D ) ( 6C | 4C ) } // <[Hh][Tt][Mm][Ll]
|
||||
$c = { 3C ( 62 | 42 ) ( 72 | 52 ) } // <br
|
||||
$d = { 3C ( 44 | 64 ) ( 49 | 69 ) ( 56 | 76 ) } // <[Dd][Ii][Vv]
|
||||
$e = { 3C ( 41 | 61 ) 20 ( 48 |68 ) ( 52 | 72 ) ( 45 | 65 ) ( 46 | 66 ) 3D } // <[Aa] [Hh][Rr][Ee][Ff]=
|
||||
$f = { 3C ( 48 | 68 ) ( 45 | 65 ) ( 41 | 61 ) ( 44 | 64 ) } // <[Hh][Ee][Aa][Dd]
|
||||
$g = { 3C ( 53 | 73 ) ( 43 | 63 ) ( 52 | 72 ) ( 49 | 69 ) ( 50 | 70 ) ( 54 | 74 ) } // <[Ss][Cc][Rr][Ii][Pp][Tt]
|
||||
$h = { 3C ( 53 | 73 ) ( 54 | 74 ) ( 59 | 79 ) ( 4C | 6C ) ( 45 | 65 ) } // <[Ss][Tt][Yy][Ll][Ee]
|
||||
$i = { 3C ( 54 | 74 ) ( 41 | 61 ) ( 42 | 62 ) ( 4C | 6C ) ( 45 | 65 ) } // <[Tt][Aa][Bb][Ll][Ee]
|
||||
$j = { 3C ( 50 | 70 ) } // <[Pp]
|
||||
$k = { 3C ( 49 | 69 ) ( 4D | 6D ) ( 47 | 67 ) } // <[Ii][Mm][Gg]
|
||||
$l = { 3C ( 53 | 73 ) ( 50 |70 ) ( 41 | 61 ) ( 4E | 6E ) } // <[Ss][Pp][Aa][Nn]
|
||||
$m = { 3C ( 48 | 68 ) ( 52 | 72 | 31 | 32 | 33 | 34 | 35 | 36 ) } // <[Hh][Rr] <[Hh][1-6]
|
||||
$n = { 3C ( 54 | 74) ( 49 | 69 ) ( 54 | 74 ) ( 4C | 6C ) ( 45 | 65 ) 3E } // <[Tt][Ii][Tt][Ll][Ee]>
|
||||
condition:
|
||||
$a at 0 or
|
||||
$b at 0 or
|
||||
$c at 0 or
|
||||
$d at 0 or
|
||||
$e at 0 or
|
||||
$f at 0 or
|
||||
$g at 0 or
|
||||
$h at 0 or
|
||||
$i at 0 or
|
||||
$j at 0 or
|
||||
$k at 0 or
|
||||
$l at 0 or
|
||||
$m at 0 or
|
||||
$n at 0
|
||||
}
|
||||
|
||||
rule json_file
|
||||
{
|
||||
meta:
|
||||
type = "text"
|
||||
strings:
|
||||
$a = { 7B [0-5] 22 }
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule php_file
|
||||
{
|
||||
meta:
|
||||
type = "text"
|
||||
strings:
|
||||
$a = { 3c 3f 70 68 70 }
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule soap_file
|
||||
{
|
||||
meta:
|
||||
description = "Simple Object Access Protocol"
|
||||
type = "text"
|
||||
strings:
|
||||
$a = { 3C 73 6F 61 70 65 6E 76 3A 45 6E 76 65 6C 6F 70 65 } // <soapenv:Envelope xmlns
|
||||
$b = { 3C 73 3A 45 6E 76 65 6C 6F 70 65 } // <s:Envelope
|
||||
condition:
|
||||
$a at 0 or
|
||||
$b at 0
|
||||
}
|
||||
|
||||
rule xml_file
|
||||
{
|
||||
meta:
|
||||
type = "text"
|
||||
strings:
|
||||
$a = { 3C 3F ( 58 | 78) ( 4D | 6D ) ( 4C | 6C ) 20 76 65 72 73 69 6F 6E 3D } // <?[Xx][Mm][Ll] version=
|
||||
$b = { 3C 3F 78 6D 6C 3F 3E } // <?xml?>
|
||||
$c = { 3C 73 74 79 6C 65 53 68 65 65 74 20 78 6D 6C 6E 73 3D } // <styleSheet xmlns=
|
||||
$d = { 3C 77 6F 72 6B 62 6F 6F 6B 20 78 6D 6C 6E 73 } // <workbook xmlns
|
||||
$e = { 3C 78 6D 6C 20 78 6D 6C 6E 73 } // <xml xmlns
|
||||
$f = { 3C 69 6E 74 20 78 6D 6C 6E 73 } // <int xmlns
|
||||
condition:
|
||||
$a at 0 or
|
||||
$b at 0 or
|
||||
$c at 0 or
|
||||
$d at 0 or
|
||||
$e at 0 or
|
||||
$f at 0
|
||||
}
|
||||
|
||||
// Video Files
|
||||
|
||||
rule avi_file
|
||||
{
|
||||
meta:
|
||||
type = "video"
|
||||
strings:
|
||||
$a = { 52 49 46 46 ?? ?? ?? ?? 41 56 49 20 4C 49 53 54 }
|
||||
condition:
|
||||
$a at 0
|
||||
}
|
||||
|
||||
rule wmv_file
|
||||
{
|
||||
meta:
|
||||
type = "video"
|
||||
condition:
|
||||
uint32(0) == 0x75B22630 and uint32(4) == 0x11CF668E and uint32(8) == 0xAA00D9A6 and uint32(12) == 0x6CCE6200
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'strelka/map.jinja' import STRELKAMERGED %}
|
||||
|
||||
include:
|
||||
{% if STRELKAMERGED.backend.enabled %}
|
||||
- strelka.backend.enabled
|
||||
{% else %}
|
||||
- strelka.backend.disabled
|
||||
{% endif %}
|
||||
@@ -1,21 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-strelka-backend_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-strelka-backend
|
||||
- unless: grep -q so-strelka-backend /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,99 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import yara
|
||||
from datetime import datetime
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
def check_syntax(rule_file):
|
||||
try:
|
||||
# Testing if compilation throws a syntax error, don't save the result
|
||||
yara.compile(filepath=rule_file)
|
||||
return (True, rule_file, None)
|
||||
except yara.SyntaxError as e:
|
||||
# Return the error message for logging purposes
|
||||
return (False, rule_file, str(e))
|
||||
|
||||
def compile_yara_rules(rules_dir):
|
||||
compiled_dir = os.path.join(rules_dir, "compiled")
|
||||
compiled_rules_path = "/opt/so/saltstack/local/salt/strelka/rules/compiled/rules.compiled"
|
||||
rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True)
|
||||
files_to_compile = {}
|
||||
removed_count = 0
|
||||
success_count = 0
|
||||
|
||||
# Use ThreadPoolExecutor to parallelize syntax checks
|
||||
with ThreadPoolExecutor() as executor:
|
||||
results = executor.map(check_syntax, rule_files)
|
||||
|
||||
# Collect yara files and prepare for batch compilation
|
||||
ts = str(datetime.utcnow().isoformat())
|
||||
failure_ids = []
|
||||
success_ids = []
|
||||
for success, rule_file, error_message in results:
|
||||
rule_id = os.path.splitext(os.path.basename(rule_file))[0]
|
||||
if success:
|
||||
files_to_compile[os.path.basename(rule_file)] = rule_file
|
||||
success_count += 1
|
||||
success_ids.append(rule_id)
|
||||
else:
|
||||
failure_ids.append(rule_id)
|
||||
# Extract just the UUID from the rule file name
|
||||
log_entry = {
|
||||
"event_module": "soc",
|
||||
"event_dataset": "soc.detections",
|
||||
"log.level": "error",
|
||||
"error_message": error_message,
|
||||
"error_analysis": "Syntax Error",
|
||||
"detection_type": "YARA",
|
||||
"rule_uuid": rule_id,
|
||||
"error_type": "runtime_status"
|
||||
}
|
||||
with open('/opt/sensoroni/logs/detections_runtime-status_yara.log', 'a') as log_file:
|
||||
json.dump(log_entry, log_file)
|
||||
log_file.write('\n') # Ensure new entries start on new lines
|
||||
os.remove(rule_file)
|
||||
removed_count += 1
|
||||
|
||||
# Compile all remaining valid rules into a single file
|
||||
compiled_sha256=""
|
||||
if files_to_compile:
|
||||
compiled_rules = yara.compile(filepaths=files_to_compile)
|
||||
compiled_rules.save(compiled_rules_path)
|
||||
print(f"All remaining rules compiled and saved into {compiled_rules_path}")
|
||||
# Hash file
|
||||
with open(compiled_rules_path, 'rb') as hash_file:
|
||||
compiled_sha256=hashlib.sha256(hash_file.read()).hexdigest()
|
||||
# Remove the rules.compiled if there aren't any files to be compiled
|
||||
else:
|
||||
if os.path.exists(compiled_rules_path):
|
||||
os.remove(compiled_rules_path)
|
||||
|
||||
# Create compilation report
|
||||
compilation_report = {
|
||||
"timestamp": ts,
|
||||
"compiled_sha256": compiled_sha256,
|
||||
"failure": failure_ids,
|
||||
"success": success_ids
|
||||
}
|
||||
|
||||
# Write total
|
||||
with open('/opt/so/state/detections_yara_compilation-total.log', 'w+') as report_file:
|
||||
json.dump(compilation_report, report_file)
|
||||
|
||||
# Print summary of compilation results
|
||||
print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Compile YARA rules from the specified directory")
|
||||
parser.add_argument("rules_dir", help="Directory containing YARA rules to compile")
|
||||
args = parser.parse_args()
|
||||
|
||||
compile_yara_rules(args.rules_dir)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user