From e9341ee8d3ed902e4a58f51f502042a7d0a769a7 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 24 Dec 2025 10:40:23 -0600 Subject: [PATCH 01/17] remove usage of deprecated 'logs' integration in favor of 'filestream' --- .../grid-nodes_general/import-zeek-logs.json | 22 ++++++++++--- .../grid-nodes_general/kratos-logs.json | 31 ++++++++++++----- .../grid-nodes_general/zeek-logs.json | 24 ++++++++++---- .../grid-nodes_general/hydra-logs.json | 33 +++++++++++++++---- .../grid-nodes_general/idh-logs.json | 28 ++++++++++++---- .../grid-nodes_general/import-evtx-logs.json | 29 +++++++++++----- .../import-suricata-logs.json | 29 ++++++++++++---- .../grid-nodes_general/rita-logs.json | 32 ++++++++++++------ .../grid-nodes_general/so-ip-mappings.json | 28 +++++++++++----- .../soc-auth-sync-logs.json | 28 ++++++++++++---- .../soc-detections-logs.json | 31 ++++++++++++----- .../soc-salt-relay-logs.json | 30 +++++++++++++---- .../soc-sensoroni-logs.json | 30 ++++++++++++----- .../grid-nodes_general/soc-server-logs.json | 30 +++++++++++++---- .../grid-nodes_general/strelka-logs.json | 30 ++++++++++++----- .../grid-nodes_general/suricata-logs.json | 30 ++++++++++++----- 16 files changed, 343 insertions(+), 122 deletions(-) diff --git a/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/import-zeek-logs.json b/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/import-zeek-logs.json index 492db03dc..8f5f01a21 100644 --- a/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/import-zeek-logs.json +++ b/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/import-zeek-logs.json @@ -2,7 +2,7 @@ {%- raw -%} { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "import-zeek-logs", @@ -10,19 +10,31 @@ "description": "Zeek Import logs", "policy_id": "so-grid-nodes_general", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/nsm/import/*/zeek/logs/*.log" ], "data_stream.dataset": "import", - "tags": [], + "pipeline": "", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"], + "include_files": [], "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"", - "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n" + "tags": [], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } diff --git a/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/kratos-logs.json b/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/kratos-logs.json index f6b01cdff..545588521 100644 --- a/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/kratos-logs.json +++ b/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/kratos-logs.json @@ -11,36 +11,51 @@ {%- endif -%} { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "kratos-logs", - "namespace": "so", "description": "Kratos logs", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/opt/so/log/kratos/kratos.log" ], "data_stream.dataset": "kratos", - "tags": ["so-kratos"], + "pipeline": "kratos", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], {%- if valid_identities -%} "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}", {%- else -%} "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos", {%- endif -%} - "custom": "pipeline: kratos" + "tags": [ + "so-kratos" + ], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} - +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/zeek-logs.json b/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/zeek-logs.json index 5462dc861..8e4a5e23b 100644 --- a/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/zeek-logs.json +++ b/salt/elasticfleet/files/integrations-dynamic/grid-nodes_general/zeek-logs.json @@ -2,28 +2,38 @@ {%- raw -%} { "package": { - "name": "log", + "name": "filestream", "version": "" }, - "id": "zeek-logs", "name": "zeek-logs", "namespace": "so", "description": "Zeek logs", "policy_id": "so-grid-nodes_general", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/nsm/zeek/logs/current/*.log" ], "data_stream.dataset": "zeek", - "tags": [], + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"], + "include_files": [], "processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"", - "custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n" + "tags": [], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } @@ -31,4 +41,4 @@ }, "force": true } -{%- endraw -%} +{%- endraw -%} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/hydra-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/hydra-logs.json index f1b1dace9..a4f944ba5 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/hydra-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/hydra-logs.json @@ -1,26 +1,43 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "hydra-logs", - "namespace": "so", "description": "Hydra logs", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/opt/so/log/hydra/hydra.log" ], "data_stream.dataset": "hydra", - "tags": ["so-hydra"], - "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra", - "custom": "pipeline: hydra" + "pipeline": "hydra", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], + "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra", + "tags": [ + "so-hydra" + ], + "recursive_glob": true, + "ignore_older": "72h", + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } @@ -28,3 +45,5 @@ }, "force": true } + + diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/idh-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/idh-logs.json index 9f66c1937..fef9c57fb 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/idh-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/idh-logs.json @@ -1,30 +1,44 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "idh-logs", - "namespace": "so", "description": "IDH integration", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/nsm/idh/opencanary.log" ], "data_stream.dataset": "idh", - "tags": [], + "pipeline": "common", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], "processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary", - "custom": "pipeline: common" + "tags": [], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/import-evtx-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/import-evtx-logs.json index dd95e6337..50ffd5dc7 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/import-evtx-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/import-evtx-logs.json @@ -1,33 +1,46 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "import-evtx-logs", - "namespace": "so", "description": "Import Windows EVTX logs", "policy_id": "so-grid-nodes_general", - "vars": {}, + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/nsm/import/*/evtx/*.json" ], "data_stream.dataset": "import", - "custom": "", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import", "tags": [ "import" - ] + ], + "recursive_glob": true, + "ignore_older": "72h", + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/import-suricata-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/import-suricata-logs.json index c9b036e36..b8f3b0b29 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/import-suricata-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/import-suricata-logs.json @@ -1,30 +1,45 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "import-suricata-logs", - "namespace": "so", "description": "Import Suricata logs", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/nsm/import/*/suricata/eve*.json" ], "data_stream.dataset": "import", + "pipeline": "suricata.common", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], + "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n", "tags": [], - "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"", - "custom": "pipeline: suricata.common" + "recursive_glob": true, + "ignore_older": "72h", + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json index a97faaa5f..70259c3cf 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/rita-logs.json @@ -1,18 +1,17 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "rita-logs", - "namespace": "so", "description": "RITA Logs", "policy_id": "so-grid-nodes_general", - "vars": {}, + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ @@ -20,15 +19,28 @@ "/nsm/rita/exploded-dns.csv", "/nsm/rita/long-connections.csv" ], - "exclude_files": [], - "ignore_older": "72h", "data_stream.dataset": "rita", - "tags": [], + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], "processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita", - "custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']" + "tags": [], + "recursive_glob": true, + "ignore_older": "72h", + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } - } + }, + "force": true } diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/so-ip-mappings.json b/salt/elasticfleet/files/integrations/grid-nodes_general/so-ip-mappings.json index fdcd36815..a14e63559 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/so-ip-mappings.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/so-ip-mappings.json @@ -1,29 +1,41 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "so-ip-mappings", - "namespace": "so", "description": "IP Description mappings", "policy_id": "so-grid-nodes_general", - "vars": {}, + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/nsm/custom-mappings/ip-descriptions.csv" ], "data_stream.dataset": "hostnamemappings", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], + "processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n", "tags": [ "so-ip-mappings" ], - "processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n", - "custom": "" + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } @@ -31,5 +43,3 @@ }, "force": true } - - diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-auth-sync-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-auth-sync-logs.json index aa39c177b..f4fd38e9d 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-auth-sync-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-auth-sync-logs.json @@ -1,30 +1,44 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "soc-auth-sync-logs", - "namespace": "so", "description": "Security Onion - Elastic Auth Sync - Logs", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/opt/so/log/soc/sync.log" ], "data_stream.dataset": "soc", - "tags": ["so-soc"], + "pipeline": "common", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], "processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync", - "custom": "pipeline: common" + "tags": [], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json index 5649b481d..f1bdbc922 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-detections-logs.json @@ -1,35 +1,48 @@ { - "policy_id": "so-grid-nodes_general", "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "soc-detections-logs", "description": "Security Onion Console - Detections Logs", + "policy_id": "so-grid-nodes_general", "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/opt/so/log/soc/detections_runtime-status_sigma.log", "/opt/so/log/soc/detections_runtime-status_yara.log" ], - "exclude_files": [], - "ignore_older": "72h", "data_stream.dataset": "soc", + "pipeline": "common", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], + "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true", "tags": [ "so-soc" ], - "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true", - "custom": "pipeline: common" + "recursive_glob": true, + "ignore_older": "72h", + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-salt-relay-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-salt-relay-logs.json index cc92092e9..cb08d5b12 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-salt-relay-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-salt-relay-logs.json @@ -1,30 +1,46 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "soc-salt-relay-logs", - "namespace": "so", "description": "Security Onion - Salt Relay - Logs", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/opt/so/log/soc/salt-relay.log" ], "data_stream.dataset": "soc", - "tags": ["so-soc"], + "pipeline": "common", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], "processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay", - "custom": "pipeline: common" + "tags": [ + "so-soc" + ], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-sensoroni-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-sensoroni-logs.json index 61ad057f4..11e686c3d 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-sensoroni-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-sensoroni-logs.json @@ -1,30 +1,44 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "soc-sensoroni-logs", - "namespace": "so", "description": "Security Onion - Sensoroni - Logs", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/opt/so/log/sensoroni/sensoroni.log" ], "data_stream.dataset": "soc", - "tags": [], + "pipeline": "common", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true", - "custom": "pipeline: common" + "tags": [], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, - "force": true -} +"force": true +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-server-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-server-logs.json index a875e4bfc..decb6b22a 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/soc-server-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/soc-server-logs.json @@ -1,30 +1,46 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "soc-server-logs", - "namespace": "so", "description": "Security Onion Console Logs", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/opt/so/log/soc/sensoroni-server.log" ], "data_stream.dataset": "soc", - "tags": ["so-soc"], + "pipeline": "common", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], "processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true", - "custom": "pipeline: common" + "tags": [ + "so-soc" + ], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/strelka-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/strelka-logs.json index 89e9bbe8e..1f0203a91 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/strelka-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/strelka-logs.json @@ -1,30 +1,44 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "strelka-logs", - "namespace": "so", - "description": "Strelka logs", + "description": "Strelka Logs", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/nsm/strelka/log/strelka.log" ], "data_stream.dataset": "strelka", - "tags": [], + "pipeline": "strelka.file", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], "processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka", - "custom": "pipeline: strelka.file" + "tags": [], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} +} \ No newline at end of file diff --git a/salt/elasticfleet/files/integrations/grid-nodes_general/suricata-logs.json b/salt/elasticfleet/files/integrations/grid-nodes_general/suricata-logs.json index c3b04fd86..22fbf8726 100644 --- a/salt/elasticfleet/files/integrations/grid-nodes_general/suricata-logs.json +++ b/salt/elasticfleet/files/integrations/grid-nodes_general/suricata-logs.json @@ -1,30 +1,44 @@ { "package": { - "name": "log", + "name": "filestream", "version": "" }, "name": "suricata-logs", - "namespace": "so", "description": "Suricata integration", "policy_id": "so-grid-nodes_general", + "namespace": "so", "inputs": { - "logs-logfile": { + "filestream-filestream": { "enabled": true, "streams": { - "log.logs": { + "filestream.generic": { "enabled": true, "vars": { "paths": [ "/nsm/suricata/eve*.json" ], - "data_stream.dataset": "suricata", - "tags": [], + "data_stream.dataset": "filestream.generic", + "pipeline": "suricata.common", + "parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n", + "exclude_files": [ + "\\.gz$" + ], + "include_files": [], "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata", - "custom": "pipeline: suricata.common" + "tags": [], + "recursive_glob": true, + "clean_inactive": -1, + "harvester_limit": 0, + "fingerprint": false, + "fingerprint_offset": 0, + "fingerprint_length": "64", + "file_identity_native": true, + "exclude_lines": [], + "include_lines": [] } } } } }, "force": true -} +} \ No newline at end of file From 6de20c63d4c3835122b65aeadacefabd3a7634b1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 13 Jan 2026 16:20:57 -0500 Subject: [PATCH 02/17] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 86df31761..4f09e82ad 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.200 +2.4.201 From 3fb153c43e75bcf804458ab9ed458f4e97b061ac Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 13 Jan 2026 16:41:39 -0500 Subject: [PATCH 03/17] Add support for version 2.4.201 upgrades --- salt/manager/tools/sbin/soup | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 06fdbd70f..87de5baf0 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -427,6 +427,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180 [[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190 [[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200 + [[ "$INSTALLEDVERSION" == 2.4.200 ]] && up_to_2.4.201 true } @@ -459,6 +460,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180 [[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190 [[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200 + [[ "$POSTVERSION" == 2.4.200 ]] && post_to_2.4.201 true } @@ -645,6 +647,11 @@ post_to_2.4.200() { POSTVERSION=2.4.200 } +post_to_2.4.201() { + echo "Nothing to apply" + POSTVERSION=2.4.201 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -1317,6 +1324,12 @@ so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids } +up_to_2.4.201() { + echo "Nothing to do for 2.4.201" + + INSTALLEDVERSION=2.4.201 +} + determine_elastic_agent_upgrade() { if [[ $is_airgap -eq 0 ]]; then update_elastic_agent_airgap From e4225d6e9b0929d7f1eadde1d2da1f975b467252 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 15 Jan 2026 10:40:21 -0500 Subject: [PATCH 04/17] 2.4.201 --- DOWNLOAD_AND_VERIFY_ISO.md | 22 ++++++++++---------- sigs/securityonion-2.4.201-20260114.iso.sig | Bin 0 -> 566 bytes 2 files changed, 11 insertions(+), 11 deletions(-) create mode 100644 sigs/securityonion-2.4.201-20260114.iso.sig diff --git a/DOWNLOAD_AND_VERIFY_ISO.md b/DOWNLOAD_AND_VERIFY_ISO.md index a8d270efc..30da22f2f 100644 --- a/DOWNLOAD_AND_VERIFY_ISO.md +++ b/DOWNLOAD_AND_VERIFY_ISO.md @@ -1,17 +1,17 @@ -### 2.4.200-20251216 ISO image released on 2025/12/16 +### 2.4.201-20260114 ISO image released on 2026/1/15 ### Download and Verify -2.4.200-20251216 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.4.200-20251216.iso +2.4.201-20260114 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso -MD5: 07B38499952D1F2FD7B5AF10096D0043 -SHA1: 7F3A26839CA3CAEC2D90BB73D229D55E04C7D370 -SHA256: 8D3AC735873A2EA8527E16A6A08C34BD5018CBC0925AC4096E15A0C99F591D5F +MD5: 20E926E433203798512EF46E590C89B9 +SHA1: 779E4084A3E1A209B494493B8F5658508B6014FA +SHA256: 3D10E7C885AEC5C5D4F4E50F9644FF9728E8C0A2E36EBB8C96B32569685A7C40 Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.200-20251216.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS @@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2. Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.200-20251216.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.4.200-20251216.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.4.200-20251216.iso.sig securityonion-2.4.200-20251216.iso +gpg --verify securityonion-2.4.201-20260114.iso.sig securityonion-2.4.201-20260114.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Mon 15 Dec 2025 05:24:11 PM EST using RSA key ID FE507013 +gpg: Signature made Wed 14 Jan 2026 05:23:39 PM EST using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/sigs/securityonion-2.4.201-20260114.iso.sig b/sigs/securityonion-2.4.201-20260114.iso.sig new file mode 100644 index 0000000000000000000000000000000000000000..6a24a3e25055a7f06111a049e52bdf319a337774 GIT binary patch literal 566 zcmV-60?GY}0y6{v0SEvc79j-41gSkXz6^6dp_W8^5Ma0dP;e6k0%>R$YXAxf5PT3| zxBgIY6Oqgh|8r4Nn&}D$Wwsi^mnzhuFBRZ7fQPT9375+PBk_k?eJ)j_j_E2Eu-ZNsb?G{BUXR*Niea0r` zLogeB+K=(w&ZFFamtOE`9ncxqdsk_?rsMc=T${hDb~-_$JLK%mebq)5JJV6CKGfN! zawd}y0~^sfM?~Fgd1u(p^WWH&$~r-=BFSe~MjFj1{C4$eDGsNmglO`N`L+ZoGRH2+ zKBy^_*t5@)Zzt?d8T-zAK!@R6@#e}xcOT%?*3D2ypNJJOCr=icHD*FOfixqf0fxcP z#uPluPUwJ{QlrGp@2))2MIDP77VI^Jp*s~H9)J|uDPYwC&d z@H^qsS9fZ$)^qSTy}ZcB?Y|G?P~LgVGsWsWUjXrRov2Kn=>~LJsdF*u86k1Q;w>G% EFy~zqZ~y=R literal 0 HcmV?d00001 From 4f59e462357d4dea6daede72941deadf5807117d Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 15 Jan 2026 14:38:40 -0500 Subject: [PATCH 05/17] Add version 2.4.201 to discussion template --- .github/DISCUSSION_TEMPLATE/2-4.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/DISCUSSION_TEMPLATE/2-4.yml b/.github/DISCUSSION_TEMPLATE/2-4.yml index 563b71c90..b72464213 100644 --- a/.github/DISCUSSION_TEMPLATE/2-4.yml +++ b/.github/DISCUSSION_TEMPLATE/2-4.yml @@ -33,6 +33,7 @@ body: - 2.4.180 - 2.4.190 - 2.4.200 + - 2.4.201 - 2.4.210 - Other (please provide detail below) validations: From 0da0788e6bca21f176d223c5cc5034beb787ea26 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Thu, 15 Jan 2026 14:56:36 -0500 Subject: [PATCH 06/17] move function to be with the rest of its friends --- salt/manager/tools/sbin/soup | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 95af55903..1a6223558 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -942,6 +942,12 @@ up_to_2.4.200() { INSTALLEDVERSION=2.4.200 } +up_to_2.4.201() { + echo "Nothing to do for 2.4.201" + + INSTALLEDVERSION=2.4.201 +} + up_to_2.4.210() { # Elastic Update for this release, so download Elastic Agent files determine_elastic_agent_upgrade @@ -1345,12 +1351,6 @@ so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids } -up_to_2.4.201() { - echo "Nothing to do for 2.4.201" - - INSTALLEDVERSION=2.4.201 -} - determine_elastic_agent_upgrade() { if [[ $is_airgap -eq 0 ]]; then update_elastic_agent_airgap From 349d77ffdfa1bd8c05d0263b1168ed408acf7f84 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 15 Jan 2026 14:43:57 -0600 Subject: [PATCH 07/17] exclude kafka restart error --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index decb52abb..69a830c6b 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -130,6 +130,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Cancelling deferred write event maybeFenceReplicas because the event queue is now closed" # Kafka controller log during shutdown/restart fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then From 0cd3d7b5a8f9b124d45a478372c773fc64716aac Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 15 Jan 2026 15:17:22 -0600 Subject: [PATCH 08/17] deprecated kibana config --- salt/kibana/defaults.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/kibana/defaults.yaml b/salt/kibana/defaults.yaml index 078f826a0..580891973 100644 --- a/salt/kibana/defaults.yaml +++ b/salt/kibana/defaults.yaml @@ -25,11 +25,10 @@ kibana: discardCorruptObjects: "8.18.8" telemetry: enabled: False - security: - showInsecureClusterWarning: False xpack: security: secureCookies: true + showInsecureClusterWarning: false reporting: kibanaServer: hostname: localhost From 596bc178df16388e6da48ad5cfb29861dabba2bf Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 15 Jan 2026 15:18:18 -0600 Subject: [PATCH 09/17] ensure docker cp command follows container symlinks --- salt/elasticsearch/tools/sbin_jinja/so-catrust | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-catrust b/salt/elasticsearch/tools/sbin_jinja/so-catrust index 89cd9147d..14f9e5ca1 100644 --- a/salt/elasticsearch/tools/sbin_jinja/so-catrust +++ b/salt/elasticsearch/tools/sbin_jinja/so-catrust @@ -14,8 +14,9 @@ set -e # Check to see if we have extracted the ca cert. if [ ! -f /opt/so/saltstack/local/salt/elasticsearch/cacerts ]; then docker run -v /etc/pki/ca.crt:/etc/ssl/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:$ELASTIC_AGENT_TARBALL_VERSION -keystore /usr/share/elasticsearch/jdk/lib/security/cacerts -alias SOSCA -import -file /etc/ssl/ca.crt -storepass changeit -noprompt - docker cp so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts - docker cp so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem + # Make sure symbolic links are followed when copying from container + docker cp -L so-elasticsearchca:/usr/share/elasticsearch/jdk/lib/security/cacerts /opt/so/saltstack/local/salt/elasticsearch/cacerts + docker cp -L so-elasticsearchca:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem docker rm so-elasticsearchca echo "" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem echo "sosca" >> /opt/so/saltstack/local/salt/elasticsearch/tls-ca-bundle.pem From d430ed6727b302f340ac2295e2f2dd8663e26217 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 15 Jan 2026 15:25:28 -0600 Subject: [PATCH 10/17] false positive --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 69a830c6b..41ad2aa93 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -161,6 +161,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error') + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading component template" # false positive (elasticsearch index or template names contain 'error') EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error') fi From 2133ada3a1ce138f8e0715956cb3fba269a76aac Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 16 Jan 2026 13:09:08 -0600 Subject: [PATCH 11/17] add additional retries within scripts before salt re-runs the entire script --- .../tools/sbin_jinja/so-elastic-agent-grid-upgrade | 2 +- .../tools/sbin_jinja/so-elastic-fleet-es-url-update | 2 +- .../tools/sbin_jinja/so-elastic-fleet-outputs-update | 4 ++-- .../tools/sbin_jinja/so-elastic-fleet-urls-update | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-grid-upgrade b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-grid-upgrade index 449d26c99..e756f5df8 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-grid-upgrade +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-grid-upgrade @@ -14,7 +14,7 @@ if ! is_manager_node; then fi # Get current list of Grid Node Agents that need to be upgraded -RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true") +RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null) # Check to make sure that the server responded with good data - else, bail from script CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON") diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-es-url-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-es-url-update index 3da6b3e78..17086bf1a 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-es-url-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-es-url-update @@ -26,7 +26,7 @@ function update_es_urls() { } # Get current list of Fleet Elasticsearch URLs -RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch') +RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch' --retry 3 --retry-delay 30 --fail 2>/dev/null) # Check to make sure that the server responded with good data - else, bail from script CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 58baadca5..f045bf753 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -142,7 +142,7 @@ function update_kafka_outputs() { {% if GLOBALS.pipeline == "KAFKA" %} # Get current list of Kafka Outputs - RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka') + RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka' --retry 3 --retry-delay 30 --fail 2>/dev/null) # Check to make sure that the server responded with good data - else, bail from script CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") @@ -168,7 +168,7 @@ function update_kafka_outputs() { {# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #} {% else %} # Get current list of Logstash Outputs - RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash') + RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' --retry 3 --retry-delay 30 --fail 2>/dev/null) # Check to make sure that the server responded with good data - else, bail from script CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-urls-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-urls-update index 5f7637cd3..d841b39e4 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-urls-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-urls-update @@ -23,7 +23,7 @@ function update_fleet_urls() { } # Get current list of Fleet Server URLs -RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default') +RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' --retry 3 --retry-delay 30 --fail 2>/dev/null) # Check to make sure that the server responded with good data - else, bail from script CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") From 5c63111002862a581e429efd3035ac55b37c5673 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Fri, 16 Jan 2026 16:42:24 -0500 Subject: [PATCH 12/17] add timing to scripts to allow for debugging delays --- .../so-elasticsearch-ilm-policy-load | 116 +++++++++++++++ .../so-elasticsearch-templates-load | 138 ++++++++++++++++++ 2 files changed, 254 insertions(+) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-ilm-policy-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-ilm-policy-load index 04a7a8ab0..13123020c 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-ilm-policy-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-ilm-policy-load @@ -4,32 +4,148 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +# Logging configuration +LOG_FILE="/opt/so/log/elasticsearch/so-elasticsearch-ilm-policy-load.log" +LOG_DIR=$(dirname "$LOG_FILE") + +# Ensure log directory exists +if [[ ! -d "$LOG_DIR" ]]; then + mkdir -p "$LOG_DIR" +fi + +# Timing variables +SCRIPT_START_TIME=$(date +%s%3N) +LAST_CHECKPOINT_TIME=$SCRIPT_START_TIME + +# Get elapsed time in milliseconds since script start +get_elapsed_ms() { + local now=$(date +%s%3N) + echo $((now - SCRIPT_START_TIME)) +} + +# Get time since last checkpoint and update checkpoint +get_checkpoint_ms() { + local now=$(date +%s%3N) + local elapsed=$((now - LAST_CHECKPOINT_TIME)) + LAST_CHECKPOINT_TIME=$now + echo $elapsed +} + +# Logging function with timing +log() { + local level=$1 + shift + local message="$*" + local timestamp=$(date '+%Y-%m-%d %H:%M:%S') + local elapsed_ms=$(get_elapsed_ms) + local elapsed_sec=$(echo "scale=3; $elapsed_ms / 1000" | bc) + echo "[$timestamp] [+${elapsed_sec}s] [$level] $message" | tee -a "$LOG_FILE" +} + +log_info() { + log "INFO" "$@" +} + +log_warn() { + log "WARN" "$@" +} + +log_error() { + log "ERROR" "$@" +} + +# Log with checkpoint timing (shows time since last checkpoint) +log_timing() { + local checkpoint_ms=$(get_checkpoint_ms) + local checkpoint_sec=$(echo "scale=3; $checkpoint_ms / 1000" | bc) + local elapsed_ms=$(get_elapsed_ms) + local elapsed_sec=$(echo "scale=3; $elapsed_ms / 1000" | bc) + local timestamp=$(date '+%Y-%m-%d %H:%M:%S') + echo "[$timestamp] [+${elapsed_sec}s] [TIMING] $* (took ${checkpoint_sec}s)" | tee -a "$LOG_FILE" +} + +log_info "========== Starting Elasticsearch ILM policy load ==========" + +LAST_CHECKPOINT_TIME=$(date +%s%3N) . /usr/sbin/so-common +log_timing "Sourced so-common" {%- from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %} +policy_count=0 +total_policy_time_ms=0 + {%- for index, settings in ES_INDEX_SETTINGS.items() %} {%- if settings.policy is defined %} {%- if index == 'so-logs-detections.alerts' %} echo echo "Setting up so-logs-detections.alerts-so policy..." + log_info "Setting up so-logs-detections.alerts-so policy..." + LAST_CHECKPOINT_TIME=$(date +%s%3N) + policy_start=$(date +%s%3N) curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-so" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }' + policy_end=$(date +%s%3N) + policy_duration=$((policy_end - policy_start)) + total_policy_time_ms=$((total_policy_time_ms + policy_duration)) + policy_count=$((policy_count + 1)) + log_timing "Completed so-logs-detections.alerts-so policy (${policy_duration}ms)" echo {%- elif index == 'so-logs-soc' %} echo echo "Setting up so-soc-logs policy..." + log_info "Setting up so-soc-logs policy..." + LAST_CHECKPOINT_TIME=$(date +%s%3N) + policy_start=$(date +%s%3N) curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/so-soc-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }' + policy_end=$(date +%s%3N) + policy_duration=$((policy_end - policy_start)) + total_policy_time_ms=$((total_policy_time_ms + policy_duration)) + policy_count=$((policy_count + 1)) + log_timing "Completed so-soc-logs policy (${policy_duration}ms)" echo echo echo "Setting up {{ index }}-logs policy..." + log_info "Setting up {{ index }}-logs policy..." + LAST_CHECKPOINT_TIME=$(date +%s%3N) + policy_start=$(date +%s%3N) curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }' + policy_end=$(date +%s%3N) + policy_duration=$((policy_end - policy_start)) + total_policy_time_ms=$((total_policy_time_ms + policy_duration)) + policy_count=$((policy_count + 1)) + log_timing "Completed {{ index }}-logs policy (${policy_duration}ms)" echo {%- else %} echo echo "Setting up {{ index }}-logs policy..." + log_info "Setting up {{ index }}-logs policy..." + LAST_CHECKPOINT_TIME=$(date +%s%3N) + policy_start=$(date +%s%3N) curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }' + policy_end=$(date +%s%3N) + policy_duration=$((policy_end - policy_start)) + total_policy_time_ms=$((total_policy_time_ms + policy_duration)) + policy_count=$((policy_count + 1)) + log_timing "Completed {{ index }}-logs policy (${policy_duration}ms)" echo {%- endif %} {%- endif %} {%- endfor %} echo + +# Final timing summary +total_elapsed_ms=$(get_elapsed_ms) +total_elapsed_sec=$(echo "scale=3; $total_elapsed_ms / 1000" | bc) +avg_policy_ms=0 +if [[ $policy_count -gt 0 ]]; then + avg_policy_ms=$((total_policy_time_ms / policy_count)) +fi + +log_info "========== TIMING SUMMARY ==========" +log_info "Total policies processed: $policy_count" +log_info "Total policy load time: ${total_policy_time_ms}ms" +log_info "Average time per policy: ${avg_policy_ms}ms" +log_info "Total script time: ${total_elapsed_sec}s" +log_info "====================================" + +log_info "========== Elasticsearch ILM policy load complete (${total_elapsed_sec}s) ==========" diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index 4ac1b4d5f..e274dbc90 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -6,20 +6,84 @@ {%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %} {% from 'vars/globals.map.jinja' import GLOBALS %} +# Logging configuration +LOG_FILE="/opt/so/log/elasticsearch/so-elasticsearch-templates-load.log" +LOG_DIR=$(dirname "$LOG_FILE") + +# Ensure log directory exists +if [[ ! -d "$LOG_DIR" ]]; then + mkdir -p "$LOG_DIR" +fi + +# Timing variables +SCRIPT_START_TIME=$(date +%s%3N) +LAST_CHECKPOINT_TIME=$SCRIPT_START_TIME + +# Get elapsed time in milliseconds since script start +get_elapsed_ms() { + local now=$(date +%s%3N) + echo $((now - SCRIPT_START_TIME)) +} + +# Get time since last checkpoint and update checkpoint +get_checkpoint_ms() { + local now=$(date +%s%3N) + local elapsed=$((now - LAST_CHECKPOINT_TIME)) + LAST_CHECKPOINT_TIME=$now + echo $elapsed +} + +# Logging function with timing +log() { + local level=$1 + shift + local message="$*" + local timestamp=$(date '+%Y-%m-%d %H:%M:%S') + local elapsed_ms=$(get_elapsed_ms) + local elapsed_sec=$(echo "scale=3; $elapsed_ms / 1000" | bc) + echo "[$timestamp] [+${elapsed_sec}s] [$level] $message" | tee -a "$LOG_FILE" +} + +log_info() { + log "INFO" "$@" +} + +log_warn() { + log "WARN" "$@" +} + +log_error() { + log "ERROR" "$@" +} + +# Log with checkpoint timing (shows time since last checkpoint) +log_timing() { + local checkpoint_ms=$(get_checkpoint_ms) + local checkpoint_sec=$(echo "scale=3; $checkpoint_ms / 1000" | bc) + local elapsed_ms=$(get_elapsed_ms) + local elapsed_sec=$(echo "scale=3; $elapsed_ms / 1000" | bc) + local timestamp=$(date '+%Y-%m-%d %H:%M:%S') + echo "[$timestamp] [+${elapsed_sec}s] [TIMING] $* (took ${checkpoint_sec}s)" | tee -a "$LOG_FILE" +} + STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt +log_info "========== Starting Elasticsearch templates load ==========" + if [[ -f $STATE_FILE_INITIAL ]]; then # The initial template load has already run. As this is a subsequent load, all dependencies should # already be satisified. Therefore, immediately exit/abort this script upon any template load failure # since this is an unrecoverable failure. should_exit_on_failure=1 + log_info "State file $STATE_FILE_INITIAL exists - this is a subsequent template load" else # This is the initial template load, and there likely are some components not yet setup in Elasticsearch. # Therefore load as many templates as possible at this time and if an error occurs proceed to the next # template. But if at least one template fails to load do not mark the templates as having been loaded. # This will allow the next load to resume the load of the templates that failed to load initially. should_exit_on_failure=0 + log_info "This is the initial template load" echo "This is the initial template load" fi @@ -27,30 +91,51 @@ fi pgrep soup > /dev/null && should_exit_on_failure=0 load_failures=0 +template_count=0 +total_template_time_ms=0 load_template() { uri=$1 file=$2 + + local start_time=$(date +%s%3N) echo "Loading template file $i" if ! retry 3 1 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"; then + local end_time=$(date +%s%3N) + local duration=$((end_time - start_time)) + total_template_time_ms=$((total_template_time_ms + duration)) + template_count=$((template_count + 1)) if [[ $should_exit_on_failure -eq 1 ]]; then + log_error "FAILED template $file (${duration}ms)" fail "Could not load template file: $file" else load_failures=$((load_failures+1)) + log_warn "FAILED template $file (${duration}ms) - failure count: $load_failures" echo "Incremented load failure counter: $load_failures" fi + else + local end_time=$(date +%s%3N) + local duration=$((end_time - start_time)) + total_template_time_ms=$((total_template_time_ms + duration)) + template_count=$((template_count + 1)) + log_info "OK template $file (${duration}ms)" fi } if [ ! -f $STATE_FILE_SUCCESS ]; then echo "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load." + log_info "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load." + LAST_CHECKPOINT_TIME=$(date +%s%3N) . /usr/sbin/so-common + log_timing "Sourced so-common" {% if GLOBALS.role != 'so-heavynode' %} if [ -f /usr/sbin/so-elastic-fleet-common ]; then + LAST_CHECKPOINT_TIME=$(date +%s%3N) . /usr/sbin/so-elastic-fleet-common + log_timing "Sourced so-elastic-fleet-common" fi {% endif %} @@ -68,14 +153,20 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then if [ -f "$file" ]; then # Wait for ElasticSearch to initialize echo -n "Waiting for ElasticSearch..." + log_info "Waiting for ElasticSearch to initialize..." + LAST_CHECKPOINT_TIME=$(date +%s%3N) retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" + log_timing "ElasticSearch connection established" {% if GLOBALS.role != 'so-heavynode' %} TEMPLATE="logs-endpoint.alerts@package" + LAST_CHECKPOINT_TIME=$(date +%s%3N) INSTALLED=$(so-elasticsearch-query _component_template/$TEMPLATE | jq -r .component_templates[0].name) + log_timing "Checked for installed package template $TEMPLATE" if [ "$INSTALLED" != "$TEMPLATE" ]; then echo echo "Packages not yet installed." echo + log_warn "Packages not yet installed - exiting" exit 0 fi {% endif %} @@ -84,40 +175,56 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then cd ${ELASTICSEARCH_TEMPLATES}/component/ecs + log_info "===== Loading ECS component templates from $(pwd) =====" echo "Loading ECS component templates..." + LAST_CHECKPOINT_TIME=$(date +%s%3N) + ecs_count=0 for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1) load_template "_component_template/${TEMPLATE}-mappings" "$i" + ecs_count=$((ecs_count + 1)) done + log_timing "Completed $ecs_count ECS component templates" echo cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent + log_info "===== Loading Elastic Agent component templates from $(pwd) =====" echo "Loading Elastic Agent component templates..." + LAST_CHECKPOINT_TIME=$(date +%s%3N) {% if GLOBALS.role == 'so-heavynode' %} component_pattern="so-*" {% else %} component_pattern="*" {% endif %} + agent_count=0 for i in $component_pattern; do TEMPLATE=${i::-5} load_template "_component_template/$TEMPLATE" "$i" + agent_count=$((agent_count + 1)) done + log_timing "Completed $agent_count Elastic Agent component templates" echo # Load SO-specific component templates cd ${ELASTICSEARCH_TEMPLATES}/component/so + log_info "===== Loading Security Onion component templates from $(pwd) =====" echo "Loading Security Onion component templates..." + LAST_CHECKPOINT_TIME=$(date +%s%3N) + so_count=0 for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); load_template "_component_template/$TEMPLATE" "$i" + so_count=$((so_count + 1)) done + log_timing "Completed $so_count Security Onion component templates" echo # Load SO index templates cd ${ELASTICSEARCH_TEMPLATES}/index + log_info "===== Loading Security Onion index templates from $(pwd) =====" echo "Loading Security Onion index templates..." shopt -s extglob {% if GLOBALS.role == 'so-heavynode' %} @@ -131,35 +238,66 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then # In this situation, the script will treat the skipped template as a temporary failure # and allow the templates to be loaded again on the next run or highstate, whichever # comes first. + LAST_CHECKPOINT_TIME=$(date +%s%3N) COMPONENT_LIST=$(so-elasticsearch-component-templates-list) + log_timing "Retrieved component templates list" + LAST_CHECKPOINT_TIME=$(date +%s%3N) + index_count=0 + skipped_count=0 for i in $pattern; do TEMPLATE=${i::-14} COMPONENT_PATTERN=${TEMPLATE:3} MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery") if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then load_failures=$((load_failures+1)) + skipped_count=$((skipped_count + 1)) + log_warn "SKIPPED - Component template does not exist for $COMPONENT_PATTERN" echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures" else load_template "_index_template/$TEMPLATE" "$i" + index_count=$((index_count + 1)) fi done + log_timing "Completed $index_count index templates ($skipped_count skipped)" else {% if GLOBALS.role == 'so-heavynode' %} echo "Common template does not exist. Exiting..." + log_warn "Common template does not exist. Exiting..." {% else %} echo "Elastic Fleet not configured. Exiting..." + log_warn "Elastic Fleet not configured. Exiting..." {% endif %} exit 0 fi cd - >/dev/null + # Final timing summary + total_elapsed_ms=$(get_elapsed_ms) + total_elapsed_sec=$(echo "scale=3; $total_elapsed_ms / 1000" | bc) + avg_template_ms=0 + if [[ $template_count -gt 0 ]]; then + avg_template_ms=$((total_template_time_ms / template_count)) + fi + + log_info "========== TIMING SUMMARY ==========" + log_info "Total templates processed: $template_count" + log_info "Total template load time: ${total_template_time_ms}ms" + log_info "Average time per template: ${avg_template_ms}ms" + log_info "Total script time: ${total_elapsed_sec}s" + log_info "====================================" + if [[ $load_failures -eq 0 ]]; then echo "All templates loaded successfully" + log_info "All templates loaded successfully" touch $STATE_FILE_SUCCESS else echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate" + log_warn "Encountered $load_failures templates that were unable to load" fi else echo "Templates already loaded" + log_info "Templates already loaded (state file $STATE_FILE_SUCCESS exists)" fi + +log_info "========== Elasticsearch templates load complete ($(echo "scale=3; $(get_elapsed_ms) / 1000" | bc)s) ==========" From 82d5115b3fc7b9ed66710c3e11d4505d19b194c0 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Fri, 16 Jan 2026 16:43:10 -0500 Subject: [PATCH 13/17] rerun so-elasticsearch-templates-load during setup --- salt/elasticfleet/enabled.sls | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index db10a7182..fc9c38aaf 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -188,6 +188,19 @@ so-elastic-defend-manage-filters-file-watch: {% endif %} {% endif %} +{# this block exists to rerun so-elasticsearch-templates-load during setup #} +{# currently the script has a check if /usr/sbin/so-elastic-fleet-common exists before allowing templates to load #} +{# that prevents the templates from being loaded during the initial highstate #} +{% if GLOBALS.is_manager %} +so-elasticsearch-templates-elasticfleet-ready: + cmd.run: + - name: /usr/sbin/so-elasticsearch-templates-load + - cwd: /opt/so + - require: + - docker_container: so-elastic-fleet + - onlyif: pgrep so-setup +{% endif %} + delete_so-elastic-fleet_so-status.disabled: file.uncomment: - name: /opt/so/conf/so-status/so-status.conf From 074158b4950cf7801269e9870c0c149399145c4e Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Fri, 16 Jan 2026 17:42:00 -0500 Subject: [PATCH 14/17] discard so-elasticsearch-templates-load running again during setup --- salt/elasticfleet/enabled.sls | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index fc9c38aaf..db10a7182 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -188,19 +188,6 @@ so-elastic-defend-manage-filters-file-watch: {% endif %} {% endif %} -{# this block exists to rerun so-elasticsearch-templates-load during setup #} -{# currently the script has a check if /usr/sbin/so-elastic-fleet-common exists before allowing templates to load #} -{# that prevents the templates from being loaded during the initial highstate #} -{% if GLOBALS.is_manager %} -so-elasticsearch-templates-elasticfleet-ready: - cmd.run: - - name: /usr/sbin/so-elasticsearch-templates-load - - cwd: /opt/so - - require: - - docker_container: so-elastic-fleet - - onlyif: pgrep so-setup -{% endif %} - delete_so-elastic-fleet_so-status.disabled: file.uncomment: - name: /opt/so/conf/so-status/so-status.conf From 17532fe49d19a5ab84b81b60e81fd801fb64f305 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Fri, 16 Jan 2026 17:42:58 -0500 Subject: [PATCH 15/17] run a final highstate on managers prior to verify --- setup/so-setup | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/setup/so-setup b/setup/so-setup index d09e8fc35..43619fb68 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -831,6 +831,10 @@ if ! [[ -f $install_opt_file ]]; then fi checkin_at_boot set_initial_firewall_access + # run a final highstate before enabling scheduled highstates. + # this will ensure so-elasticsearch-ilm-policy-load and so-elasticsearch-templates-load have a chance to run after elasticfleet is setup + info "Running final highstate for setup" + logCmd "salt-call state.highstate -l info" logCmd "salt-call schedule.enable -linfo --local" verify_setup else From 9905d23976487972283bc47e5fb2ef94371e27d8 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Fri, 16 Jan 2026 18:27:24 -0500 Subject: [PATCH 16/17] inform which state is being applied --- salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index 446fc6c9a..5e0dc0c69 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -241,9 +241,11 @@ printf '%s\n'\ "" >> "$global_pillar_file" # Call Elastic-Fleet Salt State +printf "\nApplying elasticfleet state" salt-call state.apply elasticfleet queue=True # Generate installers & install Elastic Agent on the node so-elastic-agent-gen-installers +printf "\nApplying elasticfleet.install_agent_grid state" salt-call state.apply elasticfleet.install_agent_grid queue=True exit 0 From 6f4b96b61b353d3bde392fd18f1ffd5993b66ea9 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Fri, 16 Jan 2026 18:31:45 -0500 Subject: [PATCH 17/17] removing time logging changes --- .../so-elasticsearch-ilm-policy-load | 116 --------------- .../so-elasticsearch-templates-load | 138 ------------------ 2 files changed, 254 deletions(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-ilm-policy-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-ilm-policy-load index 13123020c..04a7a8ab0 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-ilm-policy-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-ilm-policy-load @@ -4,148 +4,32 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -# Logging configuration -LOG_FILE="/opt/so/log/elasticsearch/so-elasticsearch-ilm-policy-load.log" -LOG_DIR=$(dirname "$LOG_FILE") - -# Ensure log directory exists -if [[ ! -d "$LOG_DIR" ]]; then - mkdir -p "$LOG_DIR" -fi - -# Timing variables -SCRIPT_START_TIME=$(date +%s%3N) -LAST_CHECKPOINT_TIME=$SCRIPT_START_TIME - -# Get elapsed time in milliseconds since script start -get_elapsed_ms() { - local now=$(date +%s%3N) - echo $((now - SCRIPT_START_TIME)) -} - -# Get time since last checkpoint and update checkpoint -get_checkpoint_ms() { - local now=$(date +%s%3N) - local elapsed=$((now - LAST_CHECKPOINT_TIME)) - LAST_CHECKPOINT_TIME=$now - echo $elapsed -} - -# Logging function with timing -log() { - local level=$1 - shift - local message="$*" - local timestamp=$(date '+%Y-%m-%d %H:%M:%S') - local elapsed_ms=$(get_elapsed_ms) - local elapsed_sec=$(echo "scale=3; $elapsed_ms / 1000" | bc) - echo "[$timestamp] [+${elapsed_sec}s] [$level] $message" | tee -a "$LOG_FILE" -} - -log_info() { - log "INFO" "$@" -} - -log_warn() { - log "WARN" "$@" -} - -log_error() { - log "ERROR" "$@" -} - -# Log with checkpoint timing (shows time since last checkpoint) -log_timing() { - local checkpoint_ms=$(get_checkpoint_ms) - local checkpoint_sec=$(echo "scale=3; $checkpoint_ms / 1000" | bc) - local elapsed_ms=$(get_elapsed_ms) - local elapsed_sec=$(echo "scale=3; $elapsed_ms / 1000" | bc) - local timestamp=$(date '+%Y-%m-%d %H:%M:%S') - echo "[$timestamp] [+${elapsed_sec}s] [TIMING] $* (took ${checkpoint_sec}s)" | tee -a "$LOG_FILE" -} - -log_info "========== Starting Elasticsearch ILM policy load ==========" - -LAST_CHECKPOINT_TIME=$(date +%s%3N) . /usr/sbin/so-common -log_timing "Sourced so-common" {%- from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %} -policy_count=0 -total_policy_time_ms=0 - {%- for index, settings in ES_INDEX_SETTINGS.items() %} {%- if settings.policy is defined %} {%- if index == 'so-logs-detections.alerts' %} echo echo "Setting up so-logs-detections.alerts-so policy..." - log_info "Setting up so-logs-detections.alerts-so policy..." - LAST_CHECKPOINT_TIME=$(date +%s%3N) - policy_start=$(date +%s%3N) curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-so" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }' - policy_end=$(date +%s%3N) - policy_duration=$((policy_end - policy_start)) - total_policy_time_ms=$((total_policy_time_ms + policy_duration)) - policy_count=$((policy_count + 1)) - log_timing "Completed so-logs-detections.alerts-so policy (${policy_duration}ms)" echo {%- elif index == 'so-logs-soc' %} echo echo "Setting up so-soc-logs policy..." - log_info "Setting up so-soc-logs policy..." - LAST_CHECKPOINT_TIME=$(date +%s%3N) - policy_start=$(date +%s%3N) curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/so-soc-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }' - policy_end=$(date +%s%3N) - policy_duration=$((policy_end - policy_start)) - total_policy_time_ms=$((total_policy_time_ms + policy_duration)) - policy_count=$((policy_count + 1)) - log_timing "Completed so-soc-logs policy (${policy_duration}ms)" echo echo echo "Setting up {{ index }}-logs policy..." - log_info "Setting up {{ index }}-logs policy..." - LAST_CHECKPOINT_TIME=$(date +%s%3N) - policy_start=$(date +%s%3N) curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }' - policy_end=$(date +%s%3N) - policy_duration=$((policy_end - policy_start)) - total_policy_time_ms=$((total_policy_time_ms + policy_duration)) - policy_count=$((policy_count + 1)) - log_timing "Completed {{ index }}-logs policy (${policy_duration}ms)" echo {%- else %} echo echo "Setting up {{ index }}-logs policy..." - log_info "Setting up {{ index }}-logs policy..." - LAST_CHECKPOINT_TIME=$(date +%s%3N) - policy_start=$(date +%s%3N) curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }' - policy_end=$(date +%s%3N) - policy_duration=$((policy_end - policy_start)) - total_policy_time_ms=$((total_policy_time_ms + policy_duration)) - policy_count=$((policy_count + 1)) - log_timing "Completed {{ index }}-logs policy (${policy_duration}ms)" echo {%- endif %} {%- endif %} {%- endfor %} echo - -# Final timing summary -total_elapsed_ms=$(get_elapsed_ms) -total_elapsed_sec=$(echo "scale=3; $total_elapsed_ms / 1000" | bc) -avg_policy_ms=0 -if [[ $policy_count -gt 0 ]]; then - avg_policy_ms=$((total_policy_time_ms / policy_count)) -fi - -log_info "========== TIMING SUMMARY ==========" -log_info "Total policies processed: $policy_count" -log_info "Total policy load time: ${total_policy_time_ms}ms" -log_info "Average time per policy: ${avg_policy_ms}ms" -log_info "Total script time: ${total_elapsed_sec}s" -log_info "====================================" - -log_info "========== Elasticsearch ILM policy load complete (${total_elapsed_sec}s) ==========" diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index e274dbc90..4ac1b4d5f 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -6,84 +6,20 @@ {%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %} {% from 'vars/globals.map.jinja' import GLOBALS %} -# Logging configuration -LOG_FILE="/opt/so/log/elasticsearch/so-elasticsearch-templates-load.log" -LOG_DIR=$(dirname "$LOG_FILE") - -# Ensure log directory exists -if [[ ! -d "$LOG_DIR" ]]; then - mkdir -p "$LOG_DIR" -fi - -# Timing variables -SCRIPT_START_TIME=$(date +%s%3N) -LAST_CHECKPOINT_TIME=$SCRIPT_START_TIME - -# Get elapsed time in milliseconds since script start -get_elapsed_ms() { - local now=$(date +%s%3N) - echo $((now - SCRIPT_START_TIME)) -} - -# Get time since last checkpoint and update checkpoint -get_checkpoint_ms() { - local now=$(date +%s%3N) - local elapsed=$((now - LAST_CHECKPOINT_TIME)) - LAST_CHECKPOINT_TIME=$now - echo $elapsed -} - -# Logging function with timing -log() { - local level=$1 - shift - local message="$*" - local timestamp=$(date '+%Y-%m-%d %H:%M:%S') - local elapsed_ms=$(get_elapsed_ms) - local elapsed_sec=$(echo "scale=3; $elapsed_ms / 1000" | bc) - echo "[$timestamp] [+${elapsed_sec}s] [$level] $message" | tee -a "$LOG_FILE" -} - -log_info() { - log "INFO" "$@" -} - -log_warn() { - log "WARN" "$@" -} - -log_error() { - log "ERROR" "$@" -} - -# Log with checkpoint timing (shows time since last checkpoint) -log_timing() { - local checkpoint_ms=$(get_checkpoint_ms) - local checkpoint_sec=$(echo "scale=3; $checkpoint_ms / 1000" | bc) - local elapsed_ms=$(get_elapsed_ms) - local elapsed_sec=$(echo "scale=3; $elapsed_ms / 1000" | bc) - local timestamp=$(date '+%Y-%m-%d %H:%M:%S') - echo "[$timestamp] [+${elapsed_sec}s] [TIMING] $* (took ${checkpoint_sec}s)" | tee -a "$LOG_FILE" -} - STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt -log_info "========== Starting Elasticsearch templates load ==========" - if [[ -f $STATE_FILE_INITIAL ]]; then # The initial template load has already run. As this is a subsequent load, all dependencies should # already be satisified. Therefore, immediately exit/abort this script upon any template load failure # since this is an unrecoverable failure. should_exit_on_failure=1 - log_info "State file $STATE_FILE_INITIAL exists - this is a subsequent template load" else # This is the initial template load, and there likely are some components not yet setup in Elasticsearch. # Therefore load as many templates as possible at this time and if an error occurs proceed to the next # template. But if at least one template fails to load do not mark the templates as having been loaded. # This will allow the next load to resume the load of the templates that failed to load initially. should_exit_on_failure=0 - log_info "This is the initial template load" echo "This is the initial template load" fi @@ -91,51 +27,30 @@ fi pgrep soup > /dev/null && should_exit_on_failure=0 load_failures=0 -template_count=0 -total_template_time_ms=0 load_template() { uri=$1 file=$2 - - local start_time=$(date +%s%3N) echo "Loading template file $i" if ! retry 3 1 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"; then - local end_time=$(date +%s%3N) - local duration=$((end_time - start_time)) - total_template_time_ms=$((total_template_time_ms + duration)) - template_count=$((template_count + 1)) if [[ $should_exit_on_failure -eq 1 ]]; then - log_error "FAILED template $file (${duration}ms)" fail "Could not load template file: $file" else load_failures=$((load_failures+1)) - log_warn "FAILED template $file (${duration}ms) - failure count: $load_failures" echo "Incremented load failure counter: $load_failures" fi - else - local end_time=$(date +%s%3N) - local duration=$((end_time - start_time)) - total_template_time_ms=$((total_template_time_ms + duration)) - template_count=$((template_count + 1)) - log_info "OK template $file (${duration}ms)" fi } if [ ! -f $STATE_FILE_SUCCESS ]; then echo "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load." - log_info "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load." - LAST_CHECKPOINT_TIME=$(date +%s%3N) . /usr/sbin/so-common - log_timing "Sourced so-common" {% if GLOBALS.role != 'so-heavynode' %} if [ -f /usr/sbin/so-elastic-fleet-common ]; then - LAST_CHECKPOINT_TIME=$(date +%s%3N) . /usr/sbin/so-elastic-fleet-common - log_timing "Sourced so-elastic-fleet-common" fi {% endif %} @@ -153,20 +68,14 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then if [ -f "$file" ]; then # Wait for ElasticSearch to initialize echo -n "Waiting for ElasticSearch..." - log_info "Waiting for ElasticSearch to initialize..." - LAST_CHECKPOINT_TIME=$(date +%s%3N) retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" - log_timing "ElasticSearch connection established" {% if GLOBALS.role != 'so-heavynode' %} TEMPLATE="logs-endpoint.alerts@package" - LAST_CHECKPOINT_TIME=$(date +%s%3N) INSTALLED=$(so-elasticsearch-query _component_template/$TEMPLATE | jq -r .component_templates[0].name) - log_timing "Checked for installed package template $TEMPLATE" if [ "$INSTALLED" != "$TEMPLATE" ]; then echo echo "Packages not yet installed." echo - log_warn "Packages not yet installed - exiting" exit 0 fi {% endif %} @@ -175,56 +84,40 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then cd ${ELASTICSEARCH_TEMPLATES}/component/ecs - log_info "===== Loading ECS component templates from $(pwd) =====" echo "Loading ECS component templates..." - LAST_CHECKPOINT_TIME=$(date +%s%3N) - ecs_count=0 for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1) load_template "_component_template/${TEMPLATE}-mappings" "$i" - ecs_count=$((ecs_count + 1)) done - log_timing "Completed $ecs_count ECS component templates" echo cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent - log_info "===== Loading Elastic Agent component templates from $(pwd) =====" echo "Loading Elastic Agent component templates..." - LAST_CHECKPOINT_TIME=$(date +%s%3N) {% if GLOBALS.role == 'so-heavynode' %} component_pattern="so-*" {% else %} component_pattern="*" {% endif %} - agent_count=0 for i in $component_pattern; do TEMPLATE=${i::-5} load_template "_component_template/$TEMPLATE" "$i" - agent_count=$((agent_count + 1)) done - log_timing "Completed $agent_count Elastic Agent component templates" echo # Load SO-specific component templates cd ${ELASTICSEARCH_TEMPLATES}/component/so - log_info "===== Loading Security Onion component templates from $(pwd) =====" echo "Loading Security Onion component templates..." - LAST_CHECKPOINT_TIME=$(date +%s%3N) - so_count=0 for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); load_template "_component_template/$TEMPLATE" "$i" - so_count=$((so_count + 1)) done - log_timing "Completed $so_count Security Onion component templates" echo # Load SO index templates cd ${ELASTICSEARCH_TEMPLATES}/index - log_info "===== Loading Security Onion index templates from $(pwd) =====" echo "Loading Security Onion index templates..." shopt -s extglob {% if GLOBALS.role == 'so-heavynode' %} @@ -238,66 +131,35 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then # In this situation, the script will treat the skipped template as a temporary failure # and allow the templates to be loaded again on the next run or highstate, whichever # comes first. - LAST_CHECKPOINT_TIME=$(date +%s%3N) COMPONENT_LIST=$(so-elasticsearch-component-templates-list) - log_timing "Retrieved component templates list" - LAST_CHECKPOINT_TIME=$(date +%s%3N) - index_count=0 - skipped_count=0 for i in $pattern; do TEMPLATE=${i::-14} COMPONENT_PATTERN=${TEMPLATE:3} MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery") if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then load_failures=$((load_failures+1)) - skipped_count=$((skipped_count + 1)) - log_warn "SKIPPED - Component template does not exist for $COMPONENT_PATTERN" echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures" else load_template "_index_template/$TEMPLATE" "$i" - index_count=$((index_count + 1)) fi done - log_timing "Completed $index_count index templates ($skipped_count skipped)" else {% if GLOBALS.role == 'so-heavynode' %} echo "Common template does not exist. Exiting..." - log_warn "Common template does not exist. Exiting..." {% else %} echo "Elastic Fleet not configured. Exiting..." - log_warn "Elastic Fleet not configured. Exiting..." {% endif %} exit 0 fi cd - >/dev/null - # Final timing summary - total_elapsed_ms=$(get_elapsed_ms) - total_elapsed_sec=$(echo "scale=3; $total_elapsed_ms / 1000" | bc) - avg_template_ms=0 - if [[ $template_count -gt 0 ]]; then - avg_template_ms=$((total_template_time_ms / template_count)) - fi - - log_info "========== TIMING SUMMARY ==========" - log_info "Total templates processed: $template_count" - log_info "Total template load time: ${total_template_time_ms}ms" - log_info "Average time per template: ${avg_template_ms}ms" - log_info "Total script time: ${total_elapsed_sec}s" - log_info "====================================" - if [[ $load_failures -eq 0 ]]; then echo "All templates loaded successfully" - log_info "All templates loaded successfully" touch $STATE_FILE_SUCCESS else echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate" - log_warn "Encountered $load_failures templates that were unable to load" fi else echo "Templates already loaded" - log_info "Templates already loaded (state file $STATE_FILE_SUCCESS exists)" fi - -log_info "========== Elasticsearch templates load complete ($(echo "scale=3; $(get_elapsed_ms) / 1000" | bc)s) =========="