From a77157391cc23e2318cb41edaac6fe81fe62f518 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 17 Sep 2025 10:42:05 -0400 Subject: [PATCH 01/67] remove idstools --- pillar/top.sls | 6 -- salt/allowed_states.map.jinja | 1 - salt/common/tools/sbin/so-image-common | 3 - salt/docker/defaults.yaml | 5 - salt/docker/soc_docker.yaml | 3 +- salt/idstools/config.sls | 65 ------------- salt/idstools/defaults.yaml | 10 -- salt/idstools/disabled.sls | 31 ------- salt/idstools/enabled.sls | 91 ------------------- salt/idstools/etc/disable.conf | 16 ---- salt/idstools/etc/enable.conf | 16 ---- salt/idstools/etc/modify.conf | 12 --- salt/idstools/etc/rulecat.conf | 23 ----- salt/idstools/init.sls | 13 --- salt/idstools/map.jinja | 7 -- salt/idstools/rules/extraction.rules | 26 ------ salt/idstools/rules/filters.rules | 11 --- salt/idstools/rules/local.rules | 1 - salt/idstools/soc_idstools.yaml | 72 --------------- salt/idstools/sostatus.sls | 21 ----- salt/idstools/sync_files.sls | 37 -------- salt/idstools/tools/sbin/so-idstools-restart | 12 --- salt/idstools/tools/sbin/so-idstools-start | 12 --- salt/idstools/tools/sbin/so-idstools-stop | 12 --- salt/idstools/tools/sbin_jinja/so-rule-update | 40 -------- salt/logrotate/defaults.yaml | 10 -- salt/logrotate/soc_logrotate.yaml | 7 -- salt/manager/tools/sbin/so-minion | 16 ---- salt/salt/files/engines.conf | 24 ----- salt/soc/defaults.yaml | 62 ++++++++++++- salt/soc/enabled.sls | 2 +- salt/soc/merged.map.jinja | 59 +++++++++++- salt/soc/soc_soc.yaml | 46 ++++++++++ salt/suricata/defaults.yaml | 2 +- salt/top.sls | 5 - setup/so-functions | 8 +- setup/so-variables | 6 -- 37 files changed, 169 insertions(+), 624 deletions(-) delete mode 100644 salt/idstools/config.sls delete mode 100644 salt/idstools/defaults.yaml delete mode 100644 salt/idstools/disabled.sls delete mode 100644 salt/idstools/enabled.sls delete mode 100644 salt/idstools/etc/disable.conf delete mode 100644 salt/idstools/etc/enable.conf delete mode 100644 salt/idstools/etc/modify.conf delete mode 100644 salt/idstools/etc/rulecat.conf delete mode 100644 salt/idstools/init.sls delete mode 100644 salt/idstools/map.jinja delete mode 100644 salt/idstools/rules/extraction.rules delete mode 100644 salt/idstools/rules/filters.rules delete mode 100644 salt/idstools/rules/local.rules delete mode 100644 salt/idstools/soc_idstools.yaml delete mode 100644 salt/idstools/sostatus.sls delete mode 100644 salt/idstools/sync_files.sls delete mode 100755 salt/idstools/tools/sbin/so-idstools-restart delete mode 100755 salt/idstools/tools/sbin/so-idstools-start delete mode 100755 salt/idstools/tools/sbin/so-idstools-stop delete mode 100755 salt/idstools/tools/sbin_jinja/so-rule-update diff --git a/pillar/top.sls b/pillar/top.sls index 1fdb59deb..a1114b80c 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -43,8 +43,6 @@ base: - secrets - manager.soc_manager - manager.adv_manager - - idstools.soc_idstools - - idstools.adv_idstools - logstash.nodes - logstash.soc_logstash - logstash.adv_logstash @@ -117,8 +115,6 @@ base: - elastalert.adv_elastalert - manager.soc_manager - manager.adv_manager - - idstools.soc_idstools - - idstools.adv_idstools - soc.soc_soc - soc.adv_soc - kibana.soc_kibana @@ -158,8 +154,6 @@ base: {% endif %} - secrets - healthcheck.standalone - - idstools.soc_idstools - - idstools.adv_idstools - kratos.soc_kratos - kratos.adv_kratos - hydra.soc_hydra diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 068722ca2..346ed7f12 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -38,7 +38,6 @@ 'hydra', 'elasticfleet', 'elastic-fleet-package-registry', - 'idstools', 'suricata.manager', 'utility' ] %} diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 7fd35d5ac..e2fe4f715 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -25,7 +25,6 @@ container_list() { if [ $MANAGERCHECK == 'so-import' ]; then TRUSTED_CONTAINERS=( "so-elasticsearch" - "so-idstools" "so-influxdb" "so-kibana" "so-kratos" @@ -49,7 +48,6 @@ container_list() { "so-elastic-fleet-package-registry" "so-elasticsearch" "so-idh" - "so-idstools" "so-influxdb" "so-kafka" "so-kibana" @@ -71,7 +69,6 @@ container_list() { ) else TRUSTED_CONTAINERS=( - "so-idstools" "so-elasticsearch" "so-logstash" "so-nginx" diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index 2d7ad4e1c..456a187d6 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -24,11 +24,6 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] - 'so-idstools': - final_octet: 25 - custom_bind_mounts: [] - extra_hosts: [] - extra_env: [] 'so-influxdb': final_octet: 26 port_bindings: diff --git a/salt/docker/soc_docker.yaml b/salt/docker/soc_docker.yaml index dacbf2302..3c4475236 100644 --- a/salt/docker/soc_docker.yaml +++ b/salt/docker/soc_docker.yaml @@ -41,7 +41,6 @@ docker: forcedType: "[]string" so-elastic-fleet: *dockerOptions so-elasticsearch: *dockerOptions - so-idstools: *dockerOptions so-influxdb: *dockerOptions so-kibana: *dockerOptions so-kratos: *dockerOptions @@ -102,4 +101,4 @@ docker: multiline: True forcedType: "[]string" so-zeek: *dockerOptions - so-kafka: *dockerOptions \ No newline at end of file + so-kafka: *dockerOptions diff --git a/salt/idstools/config.sls b/salt/idstools/config.sls deleted file mode 100644 index cea75ab9a..000000000 --- a/salt/idstools/config.sls +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -include: - - idstools.sync_files - -idstoolslogdir: - file.directory: - - name: /opt/so/log/idstools - - user: 939 - - group: 939 - - makedirs: True - -idstools_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://idstools/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 - -# If this is used, exclude so-rule-update -#idstools_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://idstools/tools/sbin_jinja -# - user: 939 -# - group: 939 -# - file_mode: 755 -# - template: jinja - -idstools_so-rule-update: - file.managed: - - name: /usr/sbin/so-rule-update - - source: salt://idstools/tools/sbin_jinja/so-rule-update - - user: 939 - - group: 939 - - mode: 755 - - template: jinja - -suricatacustomdirsfile: - file.directory: - - name: /nsm/rules/detect-suricata/custom_file - - user: 939 - - group: 939 - - makedirs: True - -suricatacustomdirsurl: - file.directory: - - name: /nsm/rules/detect-suricata/custom_temp - - user: 939 - - group: 939 - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/idstools/defaults.yaml b/salt/idstools/defaults.yaml deleted file mode 100644 index 1be100cec..000000000 --- a/salt/idstools/defaults.yaml +++ /dev/null @@ -1,10 +0,0 @@ -idstools: - enabled: False - config: - urls: [] - ruleset: ETOPEN - oinkcode: "" - sids: - enabled: [] - disabled: [] - modify: [] diff --git a/salt/idstools/disabled.sls b/salt/idstools/disabled.sls deleted file mode 100644 index ab0e10d7a..000000000 --- a/salt/idstools/disabled.sls +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -include: - - idstools.sostatus - -so-idstools: - docker_container.absent: - - force: True - -so-idstools_so-status.disabled: - file.comment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-idstools$ - -so-rule-update: - cron.absent: - - identifier: so-rule-update - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/idstools/enabled.sls b/salt/idstools/enabled.sls deleted file mode 100644 index 365b38772..000000000 --- a/salt/idstools/enabled.sls +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'docker/docker.map.jinja' import DOCKER %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set proxy = salt['pillar.get']('manager:proxy') %} - -include: - - idstools.config - - idstools.sostatus - -so-idstools: - docker_container.running: - - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-idstools:{{ GLOBALS.so_version }} - - hostname: so-idstools - - user: socore - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-idstools'].ip }} - {% if proxy %} - - environment: - - http_proxy={{ proxy }} - - https_proxy={{ proxy }} - - no_proxy={{ salt['pillar.get']('manager:no_proxy') }} - {% if DOCKER.containers['so-idstools'].extra_env %} - {% for XTRAENV in DOCKER.containers['so-idstools'].extra_env %} - - {{ XTRAENV }} - {% endfor %} - {% endif %} - {% elif DOCKER.containers['so-idstools'].extra_env %} - - environment: - {% for XTRAENV in DOCKER.containers['so-idstools'].extra_env %} - - {{ XTRAENV }} - {% endfor %} - {% endif %} - - binds: - - /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro - - /opt/so/rules/nids/suri:/opt/so/rules/nids/suri:rw - - /nsm/rules/:/nsm/rules/:rw - {% if DOCKER.containers['so-idstools'].custom_bind_mounts %} - {% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %} - - {{ BIND }} - {% endfor %} - {% endif %} - - extra_hosts: - - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {% if DOCKER.containers['so-idstools'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-idstools'].extra_hosts %} - - {{ XTRAHOST }} - {% endfor %} - {% endif %} - - watch: - - file: idstoolsetcsync - - file: idstools_so-rule-update - -delete_so-idstools_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-idstools$ - -so-rule-update: - cron.present: - - name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download_cron.log 2>&1 - - identifier: so-rule-update - - user: root - - minute: '1' - - hour: '7' - -# order this last to give so-idstools container time to be ready -run_so-rule-update: - cmd.run: - - name: '/usr/sbin/so-rule-update > /opt/so/log/idstools/download_idstools_state.log 2>&1' - - require: - - docker_container: so-idstools - - onchanges: - - file: idstools_so-rule-update - - file: idstoolsetcsync - - file: synclocalnidsrules - - order: last - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/idstools/etc/disable.conf b/salt/idstools/etc/disable.conf deleted file mode 100644 index 84144a495..000000000 --- a/salt/idstools/etc/disable.conf +++ /dev/null @@ -1,16 +0,0 @@ -{%- set disabled_sids = salt['pillar.get']('idstools:sids:disabled', {}) -%} -# idstools - disable.conf - -# Example of disabling a rule by signature ID (gid is optional). -# 1:2019401 -# 2019401 - -# Example of disabling a rule by regular expression. -# - All regular expression matches are case insensitive. -# re:hearbleed -# re:MS(0[7-9]|10)-\d+ -{%- if disabled_sids != None %} -{%- for sid in disabled_sids %} -{{ sid }} -{%- endfor %} -{%- endif %} \ No newline at end of file diff --git a/salt/idstools/etc/enable.conf b/salt/idstools/etc/enable.conf deleted file mode 100644 index 5da0bfc61..000000000 --- a/salt/idstools/etc/enable.conf +++ /dev/null @@ -1,16 +0,0 @@ -{%- set enabled_sids = salt['pillar.get']('idstools:sids:enabled', {}) -%} -# idstools-rulecat - enable.conf - -# Example of enabling a rule by signature ID (gid is optional). -# 1:2019401 -# 2019401 - -# Example of enabling a rule by regular expression. -# - All regular expression matches are case insensitive. -# re:hearbleed -# re:MS(0[7-9]|10)-\d+ -{%- if enabled_sids != None %} -{%- for sid in enabled_sids %} -{{ sid }} -{%- endfor %} -{%- endif %} \ No newline at end of file diff --git a/salt/idstools/etc/modify.conf b/salt/idstools/etc/modify.conf deleted file mode 100644 index 4ea75ada2..000000000 --- a/salt/idstools/etc/modify.conf +++ /dev/null @@ -1,12 +0,0 @@ -{%- set modify_sids = salt['pillar.get']('idstools:sids:modify', {}) -%} -# idstools-rulecat - modify.conf - -# Format: "" "" - -# Example changing the seconds for rule 2019401 to 3600. -#2019401 "seconds \d+" "seconds 3600" -{%- if modify_sids != None %} -{%- for sid in modify_sids %} -{{ sid }} -{%- endfor %} -{%- endif %} \ No newline at end of file diff --git a/salt/idstools/etc/rulecat.conf b/salt/idstools/etc/rulecat.conf deleted file mode 100644 index e4ec611db..000000000 --- a/salt/idstools/etc/rulecat.conf +++ /dev/null @@ -1,23 +0,0 @@ -{%- from 'vars/globals.map.jinja' import GLOBALS -%} -{%- from 'soc/merged.map.jinja' import SOCMERGED -%} ---suricata-version=7.0.3 ---merged=/opt/so/rules/nids/suri/all.rules ---output=/nsm/rules/detect-suricata/custom_temp ---local=/opt/so/rules/nids/suri/local.rules -{%- if GLOBALS.md_engine == "SURICATA" %} ---local=/opt/so/rules/nids/suri/extraction.rules ---local=/opt/so/rules/nids/suri/filters.rules -{%- endif %} ---url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules ---disable=/opt/so/idstools/etc/disable.conf ---enable=/opt/so/idstools/etc/enable.conf ---modify=/opt/so/idstools/etc/modify.conf -{%- if SOCMERGED.config.server.modules.suricataengine.customRulesets %} - {%- for ruleset in SOCMERGED.config.server.modules.suricataengine.customRulesets %} - {%- if 'url' in ruleset %} ---url={{ ruleset.url }} - {%- elif 'file' in ruleset %} ---local={{ ruleset.file }} - {%- endif %} - {%- endfor %} -{%- endif %} diff --git a/salt/idstools/init.sls b/salt/idstools/init.sls deleted file mode 100644 index ac1d51717..000000000 --- a/salt/idstools/init.sls +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'idstools/map.jinja' import IDSTOOLSMERGED %} - -include: -{% if IDSTOOLSMERGED.enabled %} - - idstools.enabled -{% else %} - - idstools.disabled -{% endif %} diff --git a/salt/idstools/map.jinja b/salt/idstools/map.jinja deleted file mode 100644 index 97d12279b..000000000 --- a/salt/idstools/map.jinja +++ /dev/null @@ -1,7 +0,0 @@ -{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one - or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at - https://securityonion.net/license; you may not use this file except in compliance with the - Elastic License 2.0. #} - -{% import_yaml 'idstools/defaults.yaml' as IDSTOOLSDEFAULTS with context %} -{% set IDSTOOLSMERGED = salt['pillar.get']('idstools', IDSTOOLSDEFAULTS.idstools, merge=True) %} diff --git a/salt/idstools/rules/extraction.rules b/salt/idstools/rules/extraction.rules deleted file mode 100644 index 3ebbd41b1..000000000 --- a/salt/idstools/rules/extraction.rules +++ /dev/null @@ -1,26 +0,0 @@ -# Extract all PDF mime type -alert http any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100000; rev:1;) -alert smtp any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100001; rev:1;) -alert nfs any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100002; rev:1;) -alert smb any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100003; rev:1;) -# Extract EXE/DLL file types -alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100004; rev:1;) -alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100005; rev:1;) -alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100006; rev:1;) -alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100007; rev:1;) -alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100008; rev:1;) -alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100009; rev:1;) -alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100010; rev:1;) -alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100011; rev:1;) - -# Extract all Zip files -alert http any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100012; rev:1;) -alert smtp any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100013; rev:1;) -alert nfs any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100014; rev:1;) -alert smb any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100015; rev:1;) - -# Extract Word Docs -alert http any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100016; rev:1;) -alert smtp any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100017; rev:1;) -alert nfs any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100018; rev:1;) -alert smb any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100019; rev:1;) \ No newline at end of file diff --git a/salt/idstools/rules/filters.rules b/salt/idstools/rules/filters.rules deleted file mode 100644 index 051d1913f..000000000 --- a/salt/idstools/rules/filters.rules +++ /dev/null @@ -1,11 +0,0 @@ -# Start the filters at sid 1200000 -# Example of filtering out *google.com from being in the dns log. -#config dns any any -> any any (dns.query; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200000;) -# Example of filtering out *google.com from being in the http log. -#config http any any -> any any (http.host; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200001;) -# Example of filtering out someuseragent from being in the http log. -#config http any any -> any any (http.user_agent; content:"someuseragent"; config: logging disable, type tx, scope tx; sid:1200002;) -# Example of filtering out Google's certificate from being in the ssl log. -#config tls any any -> any any (tls.fingerprint; content:"4f:a4:5e:58:7e:d9:db:20:09:d7:b6:c7:ff:58:c4:7b:dc:3f:55:b4"; config: logging disable, type tx, scope tx; sid:1200003;) -# Example of filtering out a md5 of a file from being in the files log. -#config fileinfo any any -> any any (fileinfo.filemd5; content:"7a125dc69c82d5caf94d3913eecde4b5"; config: logging disable, type tx, scope tx; sid:1200004;) diff --git a/salt/idstools/rules/local.rules b/salt/idstools/rules/local.rules deleted file mode 100644 index ac11dfa58..000000000 --- a/salt/idstools/rules/local.rules +++ /dev/null @@ -1 +0,0 @@ -# Add your custom Suricata rules in this file. \ No newline at end of file diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml deleted file mode 100644 index 4f7a53e91..000000000 --- a/salt/idstools/soc_idstools.yaml +++ /dev/null @@ -1,72 +0,0 @@ -idstools: - enabled: - description: Enables or disables the IDStools process which is used by the Detection system. - config: - oinkcode: - description: Enter your registration code or oinkcode for paid NIDS rulesets. - title: Registration Code - global: True - forcedType: string - helpLink: rules.html - ruleset: - description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Suricata --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.' - global: True - regex: ETPRO\b|ETOPEN\b - helpLink: rules.html - urls: - description: This is a list of additional rule download locations. This feature is currently disabled. - global: True - multiline: True - forcedType: "[]string" - readonly: True - helpLink: rules.html - sids: - disabled: - description: Contains the list of NIDS rules (or regex patterns) disabled across the grid. This setting is readonly; Use the Detections screen to disable rules. - global: True - multiline: True - forcedType: "[]string" - regex: \d*|re:.* - helpLink: managing-alerts.html - readonlyUi: True - advanced: true - enabled: - description: Contains the list of NIDS rules (or regex patterns) enabled across the grid. This setting is readonly; Use the Detections screen to enable rules. - global: True - multiline: True - forcedType: "[]string" - regex: \d*|re:.* - helpLink: managing-alerts.html - readonlyUi: True - advanced: true - modify: - description: Contains the list of NIDS rules (SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM"). This setting is readonly; Use the Detections screen to modify rules. - global: True - multiline: True - forcedType: "[]string" - helpLink: managing-alerts.html - readonlyUi: True - advanced: true - rules: - local__rules: - description: Contains the list of custom NIDS rules applied to the grid. This setting is readonly; Use the Detections screen to adjust rules. - file: True - global: True - advanced: True - title: Local Rules - helpLink: local-rules.html - readonlyUi: True - filters__rules: - description: If you are using Suricata for metadata, then you can set custom filters for that metadata here. - file: True - global: True - advanced: True - title: Filter Rules - helpLink: suricata.html - extraction__rules: - description: If you are using Suricata for metadata, then you can set a list of MIME types for file extraction here. - file: True - global: True - advanced: True - title: Extraction Rules - helpLink: suricata.html diff --git a/salt/idstools/sostatus.sls b/salt/idstools/sostatus.sls deleted file mode 100644 index 408b10742..000000000 --- a/salt/idstools/sostatus.sls +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -append_so-idstools_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-idstools - - unless: grep -q so-idstools /opt/so/conf/so-status/so-status.conf - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/idstools/sync_files.sls b/salt/idstools/sync_files.sls deleted file mode 100644 index cdacfaa74..000000000 --- a/salt/idstools/sync_files.sls +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - -idstoolsdir: - file.directory: - - name: /opt/so/conf/idstools/etc - - user: 939 - - group: 939 - - makedirs: True - -idstoolsetcsync: - file.recurse: - - name: /opt/so/conf/idstools/etc - - source: salt://idstools/etc - - user: 939 - - group: 939 - - template: jinja - -rulesdir: - file.directory: - - name: /opt/so/rules/nids/suri - - user: 939 - - group: 939 - - makedirs: True - -# Don't show changes because all.rules can be large -synclocalnidsrules: - file.recurse: - - name: /opt/so/rules/nids/suri/ - - source: salt://idstools/rules/ - - user: 939 - - group: 939 - - show_changes: False - - include_pat: 'E@.rules' diff --git a/salt/idstools/tools/sbin/so-idstools-restart b/salt/idstools/tools/sbin/so-idstools-restart deleted file mode 100755 index f2abbd0a5..000000000 --- a/salt/idstools/tools/sbin/so-idstools-restart +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-restart idstools $1 diff --git a/salt/idstools/tools/sbin/so-idstools-start b/salt/idstools/tools/sbin/so-idstools-start deleted file mode 100755 index e17b5e521..000000000 --- a/salt/idstools/tools/sbin/so-idstools-start +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-start idstools $1 diff --git a/salt/idstools/tools/sbin/so-idstools-stop b/salt/idstools/tools/sbin/so-idstools-stop deleted file mode 100755 index f2d188d06..000000000 --- a/salt/idstools/tools/sbin/so-idstools-stop +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -/usr/sbin/so-stop idstools $1 diff --git a/salt/idstools/tools/sbin_jinja/so-rule-update b/salt/idstools/tools/sbin_jinja/so-rule-update deleted file mode 100755 index 9ac09ed15..000000000 --- a/salt/idstools/tools/sbin_jinja/so-rule-update +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -# if this script isn't already running -if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then - - . /usr/sbin/so-common - -{%- from 'vars/globals.map.jinja' import GLOBALS %} -{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %} - -{%- set proxy = salt['pillar.get']('manager:proxy') %} -{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %} - -{%- if proxy %} -# Download the rules from the internet - export http_proxy={{ proxy }} - export https_proxy={{ proxy }} - export no_proxy="{{ noproxy }}" -{%- endif %} - - mkdir -p /nsm/rules/suricata - chown -R socore:socore /nsm/rules/suricata -{%- if not GLOBALS.airgap %} -# Download the rules from the internet -{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %} - docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force -{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %} - docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }} -{%- endif %} -{%- endif %} - - - argstr="" - for arg in "$@"; do - argstr="${argstr} \"${arg}\"" - done - - docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}" - -fi diff --git a/salt/logrotate/defaults.yaml b/salt/logrotate/defaults.yaml index 2f7247ff2..4df6713ef 100644 --- a/salt/logrotate/defaults.yaml +++ b/salt/logrotate/defaults.yaml @@ -1,15 +1,5 @@ logrotate: config: - /opt/so/log/idstools/*_x_log: - - daily - - rotate 14 - - missingok - - copytruncate - - compress - - create - - extension .log - - dateext - - dateyesterday /opt/so/log/nginx/*_x_log: - daily - rotate 14 diff --git a/salt/logrotate/soc_logrotate.yaml b/salt/logrotate/soc_logrotate.yaml index 56f879e4f..21b54755e 100644 --- a/salt/logrotate/soc_logrotate.yaml +++ b/salt/logrotate/soc_logrotate.yaml @@ -1,12 +1,5 @@ logrotate: config: - "/opt/so/log/idstools/*_x_log": - description: List of logrotate options for this file. - title: /opt/so/log/idstools/*.log - advanced: True - multiline: True - global: True - forcedType: "[]string" "/opt/so/log/nginx/*_x_log": description: List of logrotate options for this file. title: /opt/so/log/nginx/*.log diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 34ebdaeec..c91a7a793 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -603,16 +603,6 @@ function add_kratos_to_minion() { fi } -function add_idstools_to_minion() { - printf '%s\n'\ - "idstools:"\ - " enabled: True"\ - " " >> $PILLARFILE - if [ $? -ne 0 ]; then - log "ERROR" "Failed to add idstools configuration to $PILLARFILE" - return 1 - fi -} function add_elastic_fleet_package_registry_to_minion() { printf '%s\n'\ @@ -740,7 +730,6 @@ function createEVAL() { add_soc_to_minion || return 1 add_registry_to_minion || return 1 add_kratos_to_minion || return 1 - add_idstools_to_minion || return 1 add_elastic_fleet_package_registry_to_minion || return 1 } @@ -761,7 +750,6 @@ function createSTANDALONE() { add_soc_to_minion || return 1 add_registry_to_minion || return 1 add_kratos_to_minion || return 1 - add_idstools_to_minion || return 1 add_elastic_fleet_package_registry_to_minion || return 1 } @@ -778,7 +766,6 @@ function createMANAGER() { add_soc_to_minion || return 1 add_registry_to_minion || return 1 add_kratos_to_minion || return 1 - add_idstools_to_minion || return 1 add_elastic_fleet_package_registry_to_minion || return 1 } @@ -795,7 +782,6 @@ function createMANAGERSEARCH() { add_soc_to_minion || return 1 add_registry_to_minion || return 1 add_kratos_to_minion || return 1 - add_idstools_to_minion || return 1 add_elastic_fleet_package_registry_to_minion || return 1 } @@ -810,7 +796,6 @@ function createIMPORT() { add_soc_to_minion || return 1 add_registry_to_minion || return 1 add_kratos_to_minion || return 1 - add_idstools_to_minion || return 1 add_elastic_fleet_package_registry_to_minion || return 1 } @@ -895,7 +880,6 @@ function createMANAGERHYPE() { add_soc_to_minion || return 1 add_registry_to_minion || return 1 add_kratos_to_minion || return 1 - add_idstools_to_minion || return 1 add_elastic_fleet_package_registry_to_minion || return 1 } diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 15d55e18f..8192ee201 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -6,30 +6,6 @@ engines: interval: 60 - pillarWatch: fpa: - - files: - - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls - - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls - pillar: idstools.config.ruleset - default: ETOPEN - actions: - from: - '*': - to: - '*': - - cmd.run: - cmd: /usr/sbin/so-rule-update - - files: - - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls - - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls - pillar: idstools.config.oinkcode - default: '' - actions: - from: - '*': - to: - '*': - - cmd.run: - cmd: /usr/sbin/so-rule-update - files: - /opt/so/saltstack/local/pillar/global/soc_global.sls - /opt/so/saltstack/local/pillar/global/adv_global.sls diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 0e3e50240..8b4708d38 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1554,12 +1554,72 @@ soc: disableRegex: [] enableRegex: [] failAfterConsecutiveErrorCount: 10 - communityRulesFile: /nsm/rules/suricata/emerging-all.rules rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint stateFilePath: /opt/sensoroni/fingerprints/suricataengine.state integrityCheckFrequencySeconds: 1200 ignoredSidRanges: - '1100000-1101000' + rulesetSources: + default: + - name: Emerging-Threats + description: "Emerging Threats ruleset - To enable ET Pro, enter your license key below. Leave empty for ET Open (free) rules." + licenseKey: "" + enabled: true + sourceType: url + sourcePath: 'https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz' + urlHash: "https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz.md5" + license: "BSD" + excludeFiles: + - "*deleted*" + - "*retired*" + proxyURL: "" + proxyUsername: "" + proxyPassword: "" + proxyCACert: "" + insecureSkipVerify: false + readOnly: true + deleteUnreferenced: true + - name: local-rules + id: local-rules + description: "Local custom rules from files (*.rules) in a directory on the filesystem" + license: "custom" + sourceType: directory + sourcePath: /nsm/rules/local/ + readOnly: false + deleteUnreferenced: false + enabled: false + excludeFiles: + - "*backup*" + airgap: + - name: Emerging-Threats + description: "Emerging Threats ruleset - To enable ET Pro, enter your license key below. Leave empty for ET Open (free) rules." + licenseKey: "" + enabled: true + sourceType: url + sourcePath: 'https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz' + urlHash: "https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz.md5" + license: "BSD" + excludeFiles: + - "*deleted*" + - "*retired*" + proxyURL: "" + proxyUsername: "" + proxyPassword: "" + proxyCACert: "" + insecureSkipVerify: false + readOnly: true + deleteUnreferenced: true + - name: local-rules + id: local-rules + description: "Local custom rules from files (*.rules) in a directory on the filesystem" + license: "custom" + sourceType: directory + sourcePath: /nsm/rules/local/ + readOnly: false + deleteUnreferenced: false + enabled: false + excludeFiles: + - "*backup*" navigator: intervalMinutes: 30 outputPath: /opt/sensoroni/navigator diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 09e2c16a8..6dd7b71ae 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -27,7 +27,7 @@ so-soc: - /opt/so/conf/strelka:/opt/sensoroni/yara:rw - /opt/so/conf/sigma:/opt/sensoroni/sigma:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw - - /opt/so/rules/nids/suri:/opt/sensoroni/nids:ro + - /opt/so/rules/nids/suri:/opt/sensoroni/nids:rw - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw - /nsm/soc/uploads:/nsm/soc/uploads:rw diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index e053ce63f..b43ccaf1b 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -50,17 +50,74 @@ {% do SOCMERGED.config.server.modules.elastalertengine.update({'enabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.enabledSigmaRules.default}) %} {% endif %} -{# set elastalertengine.rulesRepos and strelkaengine.rulesRepos based on airgap or not #} +{# set elastalertengine.rulesRepos, strelkaengine.rulesRepos, and suricataengine.rulesetSources based on airgap or not #} {% if GLOBALS.airgap %} {% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.airgap}) %} {% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.airgap}) %} +{#% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is mapping %#} +{% do SOCMERGED.config.server.modules.suricataengine.update({'rulesetSources': SOCMERGED.config.server.modules.suricataengine.rulesetSources.airgap}) %} +{#% endif %#} {% do SOCMERGED.config.server.update({'airgapEnabled': true}) %} {% else %} {% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.default}) %} {% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.default}) %} +{#% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is mapping %#} +{% do SOCMERGED.config.server.modules.suricataengine.update({'rulesetSources': SOCMERGED.config.server.modules.suricataengine.rulesetSources.default}) %} +{#% endif %#} {% do SOCMERGED.config.server.update({'airgapEnabled': false}) %} {% endif %} + +{# Define the Detections custom ruleset that should always be present #} +{% set CUSTOM_RULESET = { + 'name': 'custom', + 'description': 'User-created custom rules created via the Detections module in the SOC UI', + 'sourceType': 'elasticsearch', + 'sourcePath': 'so_detection.ruleset:__custom__', + 'readOnly': false, + 'deleteUnreferenced': false, + 'license': 'Custom', + 'enabled': true +} %} + +{# Always append the custom ruleset to suricataengine.rulesetSources if not already present #} +{% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %} +{% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %} +{% set custom_names = SOCMERGED.config.server.modules.suricataengine.rulesetSources | selectattr('name', 'equalto', 'custom') | list %} +{% if custom_names | length == 0 %} +{% do SOCMERGED.config.server.modules.suricataengine.rulesetSources.append(CUSTOM_RULESET) %} +{% endif %} +{% endif %} +{% endif %} + +{# Transform Emerging-Threats ruleset based on license key #} +{% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %} +{% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %} +{% for ruleset in SOCMERGED.config.server.modules.suricataengine.rulesetSources %} +{% if ruleset.name == 'Emerging-Threats' %} +{% if ruleset.licenseKey and ruleset.licenseKey != '' %} +{# License key is defined - transform to ETPRO #} +{% do ruleset.update({ + 'name': 'ETPRO', + 'sourcePath': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz', + 'urlHash': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz.md5', + 'license': 'Commercial' + }) %} +{% else %} +{# No license key - explicitly set to ETOPEN #} +{% do ruleset.update({ + 'name': 'ETOPEN', + 'sourcePath': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz', + 'urlHash': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz.md5', + 'license': 'BSD' + }) %} +{% endif %} +{% endif %} +{% endfor %} +{% endif %} +{% endif %} + + {# set playbookRepos based on airgap or not #} {% if GLOBALS.airgap %} {% do SOCMERGED.config.server.modules.playbook.update({'playbookRepos': SOCMERGED.config.server.modules.playbook.playbookRepos.airgap}) %} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index b292d1460..623df4ea3 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -543,6 +543,52 @@ soc: advanced: True forcedType: "[]string" helpLink: detections.html#rule-engine-status + rulesetSources: + default: &serulesetSources + description: "Ruleset sources for Suricata rules. Supports URL downloads and local directories. Refer to the linked documentation for details on how to configure this setting." + global: True + advanced: False + forcedType: "[]{}" + helpLink: suricata.html + syntax: json + uiElements: + - field: name + label: Ruleset Name (This will be the name of the ruleset in the UI) + required: True + readonly: True + - field: description + label: Description + - field: enabled + label: Enabled (If false, existing rules & overrides will be removed) + forcedType: bool + required: True + - field: licenseKey + label: License Key + required: False + - field: sourceType + label: Source Type + required: True + options: + - url + - directory + - field: sourcePath + label: Source Path (full url or directory path) + required: True + - field: excludeFiles + label: Exclude Files (list of file names to exclude, separated by commas) + required: False + - field: license + label: Ruleset License + required: True + - field: readOnly + label: Read Only + forcedType: bool + required: False + - field: deleteUnreferenced + label: Delete Unreferenced + forcedType: bool + required: False + airgap: *serulesetSources navigator: intervalMinutes: description: How often to generate the Navigator Layers. (minutes) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index d819d1cf9..b4c615157 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -459,7 +459,7 @@ suricata: append: "yes" default-rule-path: /etc/suricata/rules rule-files: - - all.rules + - all-rulesets.rules classification-file: /etc/suricata/classification.config reference-config-file: /etc/suricata/reference.config threshold-file: /etc/suricata/threshold.conf diff --git a/salt/top.sls b/salt/top.sls index a75346462..c465307c0 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -74,7 +74,6 @@ base: - sensoroni - telegraf - firewall - - idstools - suricata.manager - healthcheck - elasticsearch @@ -106,7 +105,6 @@ base: - firewall - sensoroni - telegraf - - idstools - suricata.manager - healthcheck - elasticsearch @@ -142,7 +140,6 @@ base: - sensoroni - telegraf - backup.config_backup - - idstools - suricata.manager - elasticsearch - logstash @@ -177,7 +174,6 @@ base: - sensoroni - telegraf - backup.config_backup - - idstools - suricata.manager - elasticsearch - logstash @@ -208,7 +204,6 @@ base: - sensoroni - telegraf - firewall - - idstools - suricata.manager - pcap - elasticsearch diff --git a/setup/so-functions b/setup/so-functions index 522446be4..334dc4a0d 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -829,7 +829,6 @@ create_manager_pillars() { backup_pillar docker_pillar redis_pillar - idstools_pillar kratos_pillar hydra_pillar soc_pillar @@ -1295,11 +1294,6 @@ ls_heapsize() { } -idstools_pillar() { - title "Ading IDSTOOLS pillar options" - touch $adv_idstools_pillar_file -} - nginx_pillar() { title "Creating the NGINX pillar" [[ -z "$TESTING" ]] && return @@ -1475,7 +1469,7 @@ make_some_dirs() { mkdir -p $local_salt_dir/salt/firewall/portgroups mkdir -p $local_salt_dir/salt/firewall/ports - for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos hydra idstools idh elastalert stig global kafka versionlock hypervisor vm; do + for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos hydra idh elastalert stig global kafka versionlock hypervisor vm; do mkdir -p $local_salt_dir/pillar/$THEDIR touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls diff --git a/setup/so-variables b/setup/so-variables index fc253df0a..a0d7aadc1 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -166,12 +166,6 @@ export hydra_pillar_file adv_hydra_pillar_file="$local_salt_dir/pillar/hydra/adv_hydra.sls" export adv_hydra_pillar_file -idstools_pillar_file="$local_salt_dir/pillar/idstools/soc_idstools.sls" -export idstools_pillar_file - -adv_idstools_pillar_file="$local_salt_dir/pillar/idstools/adv_idstools.sls" -export adv_idstools_pillar_file - nginx_pillar_file="$local_salt_dir/pillar/nginx/soc_nginx.sls" export nginx_pillar_file From 11518f6eea8b78753da9f0cc93c48ac9f70f94b7 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 11 Nov 2025 13:41:32 -0500 Subject: [PATCH 02/67] idstools removal refactor --- salt/manager/tools/sbin/soup | 123 +++++++++++++++++++++++ salt/soc/enabled.sls | 1 + salt/soc/soc_soc.yaml | 4 +- salt/suricata/config.sls | 11 +- salt/suricata/files/threshold.conf.jinja | 35 ------- 5 files changed, 131 insertions(+), 43 deletions(-) delete mode 100644 salt/suricata/files/threshold.conf.jinja diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index f32b6edf8..d6ad73d63 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -426,6 +426,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170 [[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180 [[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190 + [[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200 true } @@ -457,6 +458,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170 [[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180 [[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190 + [[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200 true } @@ -636,6 +638,11 @@ post_to_2.4.190() { POSTVERSION=2.4.190 } +post_to_2.4.200() { + echo "Nothing to apply" + POSTVERSION=2.4.200 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -903,6 +910,13 @@ up_to_2.4.190() { INSTALLEDVERSION=2.4.190 } +up_to_2.4.200() { + echo "Migrating idstools config" + suricata_idstools_removal + + INSTALLEDVERSION=2.4.200 +} + add_hydra_pillars() { mkdir -p /opt/so/saltstack/local/pillar/hydra touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls @@ -986,6 +1000,8 @@ rollover_index() { } suricata_idstools_migration() { + # For 2.4.70 + #Backup the pillars for idstools mkdir -p /nsm/backup/detections-migration/idstools rsync -av /opt/so/saltstack/local/pillar/idstools/* /nsm/backup/detections-migration/idstools @@ -1086,6 +1102,113 @@ playbook_migration() { echo "Playbook Migration is complete...." } +suricata_idstools_removal() { +# For SOUPs beginning with 2.4.200 + +# Create syncBlock file +cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF +Suricata rulset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs +EOF + +# Track if we have custom configs +CUSTOM_CONFIGS_FOUND=0 + +# ETPRO Check +ETPRO=$(grep "--etpro" /usr/sbin/so-rule-update) +if [[ -n "$ETPRO" ]]; then + echo "Grid is using ETPRO." + # Add ETPRO yaml to SOC pillar file + if [[ $is_airgap -eq 0 ]]; then + #TODO /opt/so/saltstack/local/pillar/soc/soc_soc.sls + else + #TODO /opt/so/saltstack/local/pillar/soc/soc_soc.sls + fi +fi + +#idstools conf parse +RULECAT_CONF="/opt/so/conf/idstools/etc/rulecat.conf" + +# Parse RULECAT_CONF and check for custom configs +if [[ ! -f "$RULECAT_CONF" ]]; then + echo "Warning: $RULECAT_CONF not found - leaving syncBlock." + return 0 +fi + +echo "Parsing $RULECAT_CONF for custom configurations..." + +# Default values to check against +DEFAULT_URL="--url=http://MANAGER:7788/suricata/emerging-all.rules" +DEFAULT_DISABLE="--disable=/opt/so/idstools/etc/disable.conf" +DEFAULT_ENABLE="--enable=/opt/so/idstools/etc/enable.conf" +DEFAULT_MODIFY="--modify=/opt/so/idstools/etc/modify.conf" + +# Valid --local patterns +VALID_LOCAL_PATTERNS=( + "/opt/so/rules/nids/suri/local.rules" # 2/24 + "/opt/so/rules/nids/suri/extraction.rules" # 2/24 + "/opt/so/rules/nids/suri/filters.rules" # 2/24 + "/opt/so/rules/nids/extraction.rules" # 9/23 + "/opt/so/rules/nids/filters.rules" # 9/23 + "/opt/so/rules/nids/local.rules" # 8/23 + "/opt/so/rules/nids/sorules/extraction.rules" # 8/23 + "/opt/so/rules/nids/sorules/filters.rules" # 8/23 + ) + +# Parse each line in the config file +while IFS= read -r line; do + # Skip empty lines and comments + [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue + + # Check for non-default --local parameter + if [[ "$line" =~ ^--local= ]]; then + local_path="${line#--local=}" + is_valid=0 + for pattern in "${VALID_LOCAL_PATTERNS[@]}"; do + if [[ "$local_path" == "$pattern" ]]; then + is_valid=1 + break + fi + done + if [[ $is_valid -eq 0 ]]; then + echo "Custom --local parameter detected: $line" + echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + return 0 + fi + fi + + # Check for non-default --url parameter (default contains 7788) + if [[ "$line" =~ ^--url= ]] && [[ ! "$line" =~ 7788 ]]; then + echo "Custom --url parameter detected: $line" + echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + return 0 + fi + + # Sanity checks for other parameters + if [[ "$line" =~ ^--disable= ]] && [[ "$line" != "$DEFAULT_DISABLE" ]]; then + echo "Custom --disable parameter detected: $line" + echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + return 0 + fi + + if [[ "$line" =~ ^--enable= ]] && [[ "$line" != "$DEFAULT_ENABLE" ]]; then + echo "Custom --enable parameter detected: $line" + echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + return 0 + fi + + if [[ "$line" =~ ^--modify= ]] && [[ "$line" != "$DEFAULT_MODIFY" ]]; then + echo "Custom --modify parameter detected: $line" + echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + return 0 + fi + +done < "$RULECAT_CONF" + +# If we reach here, no custom configs were found +echo "idstools migration completed successfully - removing Suricata engine syncBlock" +rm -f /opt/so/conf/soc/fingerprints/suricataengine.syncBlock +} + determine_elastic_agent_upgrade() { if [[ $is_airgap -eq 0 ]]; then update_elastic_agent_airgap diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 62f3f85ba..62873ebdd 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -26,6 +26,7 @@ so-soc: - /nsm/rules:/nsm/rules:rw - /opt/so/conf/strelka:/opt/sensoroni/yara:rw - /opt/so/conf/sigma:/opt/sensoroni/sigma:rw + - /opt/so/conf/suricata:/opt/sensoroni/suricata:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw - /opt/so/rules/nids/suri:/opt/sensoroni/nids:rw - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index bc67a5295..85e49def6 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -590,11 +590,11 @@ soc: label: Ruleset License required: True - field: readOnly - label: Read Only + label: Read Only (Prevents changes to the rule itself - can still be enabled/disabled/tuned) forcedType: bool required: False - field: deleteUnreferenced - label: Delete Unreferenced + label: Delete Unreferenced (Deletes rules that are no longer referenced by ruleset source) forcedType: bool required: False airgap: *serulesetSources diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 00364f384..a43dd95a3 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -53,13 +53,15 @@ suridir: file.directory: - name: /opt/so/conf/suricata - user: 940 - - group: 940 + - group: 939 + - mode: 775 suriruledir: file.directory: - name: /opt/so/conf/suricata/rules - user: 940 - - group: 940 + - group: 939 + - mode: 775 - makedirs: True surilogdir: @@ -124,10 +126,7 @@ suriconfig: surithresholding: file.managed: - name: /opt/so/conf/suricata/threshold.conf - - source: salt://suricata/files/threshold.conf.jinja - - user: 940 - - group: 940 - - template: jinja + - replace: False suriclassifications: file.managed: diff --git a/salt/suricata/files/threshold.conf.jinja b/salt/suricata/files/threshold.conf.jinja deleted file mode 100644 index a439dad96..000000000 --- a/salt/suricata/files/threshold.conf.jinja +++ /dev/null @@ -1,35 +0,0 @@ -{% import_yaml 'suricata/thresholding/sids.yaml' as THRESHOLDING %} -{% if THRESHOLDING -%} - - {% for EACH_SID in THRESHOLDING -%} - {% for ACTIONS_LIST in THRESHOLDING[EACH_SID] -%} - {% for EACH_ACTION in ACTIONS_LIST -%} - - {%- if EACH_ACTION == 'threshold' %} -{{ EACH_ACTION }} gen_id {{ ACTIONS_LIST[EACH_ACTION].gen_id }}, sig_id {{ EACH_SID }}, type {{ ACTIONS_LIST[EACH_ACTION].type }}, track {{ ACTIONS_LIST[EACH_ACTION].track }}, count {{ ACTIONS_LIST[EACH_ACTION].count }}, seconds {{ ACTIONS_LIST[EACH_ACTION].seconds }} - - {%- elif EACH_ACTION == 'rate_filter' %} - {%- if ACTIONS_LIST[EACH_ACTION].new_action not in ['drop','reject'] %} -{{ EACH_ACTION }} gen_id {{ ACTIONS_LIST[EACH_ACTION].gen_id }}, sig_id {{ EACH_SID }}, track {{ ACTIONS_LIST[EACH_ACTION].track }}, count {{ ACTIONS_LIST[EACH_ACTION].count }}, seconds {{ ACTIONS_LIST[EACH_ACTION].seconds }}, new_action {{ ACTIONS_LIST[EACH_ACTION].new_action }}, timeout {{ ACTIONS_LIST[EACH_ACTION].timeout }} - {%- else %} -##### Security Onion does not support drop or reject actions for rate_filter -##### {{ EACH_ACTION }} gen_id {{ ACTIONS_LIST[EACH_ACTION].gen_id }}, sig_id {{ EACH_SID }}, track {{ ACTIONS_LIST[EACH_ACTION].track }}, count {{ ACTIONS_LIST[EACH_ACTION].count }}, seconds {{ ACTIONS_LIST[EACH_ACTION].seconds }}, new_action {{ ACTIONS_LIST[EACH_ACTION].new_action }}, timeout {{ ACTIONS_LIST[EACH_ACTION].timeout }} - {%- endif %} - - {%- elif EACH_ACTION == 'suppress' %} - {%- if ACTIONS_LIST[EACH_ACTION].track is defined %} -{{ EACH_ACTION }} gen_id {{ ACTIONS_LIST[EACH_ACTION].gen_id }}, sig_id {{ EACH_SID }}, track {{ ACTIONS_LIST[EACH_ACTION].track }}, ip {{ ACTIONS_LIST[EACH_ACTION].ip }} - {%- else %} -{{ EACH_ACTION }} gen_id {{ ACTIONS_LIST[EACH_ACTION].gen_id }}, sig_id {{ EACH_SID }} - {%- endif %} - - {%- endif %} - - {%- endfor %} - {%- endfor %} - {%- endfor %} - -{%- else %} -##### Navigate to suricata > thresholding > SIDS in SOC to define thresholding - -{%- endif %} From 13789bc56f7660763190f8f94be65253b205dcdd Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 11 Nov 2025 13:45:37 -0500 Subject: [PATCH 03/67] idstools removal refactor --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index d6ad73d63..ed33481ee 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1120,8 +1120,10 @@ if [[ -n "$ETPRO" ]]; then # Add ETPRO yaml to SOC pillar file if [[ $is_airgap -eq 0 ]]; then #TODO /opt/so/saltstack/local/pillar/soc/soc_soc.sls + echo "TODO" else #TODO /opt/so/saltstack/local/pillar/soc/soc_soc.sls + echo "TODO" fi fi From 3a8a6bf5ff9eaa9c3d075539e14f0182c146e6b0 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 11 Nov 2025 14:12:51 -0500 Subject: [PATCH 04/67] idstools removal refactor --- salt/manager/tools/sbin/soup | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index ed33481ee..f063314f9 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1114,8 +1114,9 @@ EOF CUSTOM_CONFIGS_FOUND=0 # ETPRO Check -ETPRO=$(grep "--etpro" /usr/sbin/so-rule-update) +ETPRO=$(grep "\--etpro" /usr/sbin/so-rule-update) if [[ -n "$ETPRO" ]]; then + ETPRO_KEY=$(echo "$ETPRO" | awk -F'--etpro=' '{print $2}' | awk '{print $1}') echo "Grid is using ETPRO." # Add ETPRO yaml to SOC pillar file if [[ $is_airgap -eq 0 ]]; then From 55bbbdb58d27cfbe15c233c3c71daa925d4bc723 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 11 Nov 2025 14:34:28 -0500 Subject: [PATCH 05/67] idstools removal refactor --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index f063314f9..774ca0430 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1131,6 +1131,8 @@ fi #idstools conf parse RULECAT_CONF="/opt/so/conf/idstools/etc/rulecat.conf" +echo "Checking $RULECAT_CONF for custom configurations..." + # Parse RULECAT_CONF and check for custom configs if [[ ! -f "$RULECAT_CONF" ]]; then echo "Warning: $RULECAT_CONF not found - leaving syncBlock." From 1f24796eba263c238875d71e4e037aa980f02f5a Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 12 Nov 2025 08:48:47 -0500 Subject: [PATCH 06/67] Fix ETPRO check --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 774ca0430..3fa8b8c56 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1114,7 +1114,7 @@ EOF CUSTOM_CONFIGS_FOUND=0 # ETPRO Check -ETPRO=$(grep "\--etpro" /usr/sbin/so-rule-update) +ETPRO=$(grep "\--etpro" /usr/sbin/so-rule-update || true) if [[ -n "$ETPRO" ]]; then ETPRO_KEY=$(echo "$ETPRO" | awk -F'--etpro=' '{print $2}' | awk '{print $1}') echo "Grid is using ETPRO." From 81d7c313af01ee750e1e25d51778e9ac4c65f08b Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 12 Nov 2025 11:11:01 -0500 Subject: [PATCH 07/67] remove dupe --- salt/suricata/config.sls | 6 ------ 1 file changed, 6 deletions(-) diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index beda362d3..3379697bc 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -10,12 +10,6 @@ {% from 'suricata/map.jinja' import SURICATAMERGED %} {% from 'bpf/suricata.map.jinja' import SURICATABPF, SURICATA_BPF_STATUS, SURICATA_BPF_CALC %} -suridir: - file.directory: - - name: /opt/so/conf/suricata - - user: 940 - - group: 940 - {% if GLOBALS.pcap_engine in ["SURICATA", "TRANSITION"] %} {% from 'bpf/pcap.map.jinja' import PCAPBPF, PCAP_BPF_STATUS, PCAP_BPF_CALC %} # BPF compilation and configuration From 573dded921696cf1d765e98d2a9332c303ab2828 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 13 Nov 2025 09:25:20 -0500 Subject: [PATCH 08/67] refactor to hash --- salt/manager/tools/sbin/soup | 207 +++++++++++++++++++---------------- 1 file changed, 110 insertions(+), 97 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 5e7d9fbc7..0fb83d73f 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -639,7 +639,9 @@ post_to_2.4.190() { } post_to_2.4.200() { - echo "Nothing to apply" + echo "Initiating Suricata idstools migration..." + suricata_idstools_removal_post + POSTVERSION=2.4.200 } @@ -911,8 +913,8 @@ up_to_2.4.190() { } up_to_2.4.200() { - echo "Migrating idstools config" - suricata_idstools_removal + echo "Backing up idstools config..." + suricata_idstools_removal_pre INSTALLEDVERSION=2.4.200 } @@ -1102,116 +1104,127 @@ playbook_migration() { echo "Playbook Migration is complete...." } -suricata_idstools_removal() { -# For SOUPs beginning with 2.4.200 +suricata_idstools_removal_pre() { +# For SOUPs beginning with 2.4.200 - pre SOUP checks # Create syncBlock file cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF -Suricata rulset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs +Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs EOF -# Track if we have custom configs -CUSTOM_CONFIGS_FOUND=0 +# TODO - backup custom rules & overrides +mkdir -p /nsm/backup/detections-migration/2-4-200 +cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200 +cp /opt/so/conf/idstools/etc/rulecat.conf /nsm/backup/detections-migration/2-4-200 -# ETPRO Check -ETPRO=$(grep "\--etpro" /usr/sbin/so-rule-update || true) -if [[ -n "$ETPRO" ]]; then - ETPRO_KEY=$(echo "$ETPRO" | awk -F'--etpro=' '{print $2}' | awk '{print $1}') - echo "Grid is using ETPRO." - # Add ETPRO yaml to SOC pillar file - if [[ $is_airgap -eq 0 ]]; then - #TODO /opt/so/saltstack/local/pillar/soc/soc_soc.sls - echo "TODO" - else - #TODO /opt/so/saltstack/local/pillar/soc/soc_soc.sls - echo "TODO" +} + +suricata_idstools_removal_post() { +# For SOUPs beginning with 2.4.200 - post SOUP checks + +echo "Checking idstools configuration for custom modifications..." + +# Normalize file content for consistent hashing +# Args: $1 - file path +normalize_file() { + local file="$1" + + if [[ ! -f "$file" ]]; then + echo "FILE_NOT_FOUND" + return 1 fi -fi -#idstools conf parse -RULECAT_CONF="/opt/so/conf/idstools/etc/rulecat.conf" + # Strip whitespace, normalize hostname, remove blank lines + sed -E \ + -e 's/^[[:space:]]+//; s/[[:space:]]+$//' \ + -e '/^$/d' \ + -e 's|--url=http://[^:]+:7788|--url=http://MANAGER:7788|' \ + "$file" +} -echo "Checking $RULECAT_CONF for custom configurations..." +# Hash normalized content +hash_file() { + local file="$1" -# Parse RULECAT_CONF and check for custom configs -if [[ ! -f "$RULECAT_CONF" ]]; then - echo "Warning: $RULECAT_CONF not found - leaving syncBlock." - return 0 -fi + local normalized=$(normalize_file "$file") -echo "Parsing $RULECAT_CONF for custom configurations..." + if [[ "$normalized" == "FILE_NOT_FOUND" ]]; then + echo "FILE_NOT_FOUND" + return 1 + fi -# Default values to check against -DEFAULT_URL="--url=http://MANAGER:7788/suricata/emerging-all.rules" -DEFAULT_DISABLE="--disable=/opt/so/idstools/etc/disable.conf" -DEFAULT_ENABLE="--enable=/opt/so/idstools/etc/enable.conf" -DEFAULT_MODIFY="--modify=/opt/so/idstools/etc/modify.conf" + echo -n "$normalized" | sha256sum | awk '{print $1}' +} -# Valid --local patterns -VALID_LOCAL_PATTERNS=( - "/opt/so/rules/nids/suri/local.rules" # 2/24 - "/opt/so/rules/nids/suri/extraction.rules" # 2/24 - "/opt/so/rules/nids/suri/filters.rules" # 2/24 - "/opt/so/rules/nids/extraction.rules" # 9/23 - "/opt/so/rules/nids/filters.rules" # 9/23 - "/opt/so/rules/nids/local.rules" # 8/23 - "/opt/so/rules/nids/sorules/extraction.rules" # 8/23 - "/opt/so/rules/nids/sorules/filters.rules" # 8/23 - ) +# Known-default hashes +KNOWN_SO_RULE_UPDATE_HASHES=( + "8f1fe1cb65c08aab78830315b952785c7ccdcc108c5c0474f427e29d4e39ee5f" # non-Airgap + "d23ac5a962c709dcb888103effb71444df72b46009b6c426e280dbfbc7d74d40" # Airgap +) -# Parse each line in the config file -while IFS= read -r line; do - # Skip empty lines and comments - [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue - - # Check for non-default --local parameter - if [[ "$line" =~ ^--local= ]]; then - local_path="${line#--local=}" - is_valid=0 - for pattern in "${VALID_LOCAL_PATTERNS[@]}"; do - if [[ "$local_path" == "$pattern" ]]; then - is_valid=1 - break - fi - done - if [[ $is_valid -eq 0 ]]; then - echo "Custom --local parameter detected: $line" - echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock - return 0 +KNOWN_RULECAT_CONF_HASHES=( + "17fc663a83b30d4ba43ac6643666b0c96343c5ea6ea833fe6a8362fe415b666b" # default +) + +# Check a config file against known hashes +# Args: $1 - file path, $2 - array name of known hashes +check_config_file() { + local file="$1" + local known_hashes_array="$2" + local file_display_name=$(basename "$file") + + if [[ ! -f "$file" ]]; then + echo "Warning: $file not found" + return 1 + fi + + echo "Hashing $file..." + local file_hash=$(hash_file "$file") + + if [[ "$file_hash" == "FILE_NOT_FOUND" ]]; then + echo "Warning: Could not read $file" + return 1 + fi + + echo " Hash: $file_hash" + + # Check if hash matches any known default + local match_found=0 + local -n known_hashes=$known_hashes_array + for known_hash in "${known_hashes[@]}"; do + if [[ "$file_hash" == "$known_hash" ]]; then + match_found=1 + echo " Matches known default configuration" + break fi - fi - - # Check for non-default --url parameter (default contains 7788) - if [[ "$line" =~ ^--url= ]] && [[ ! "$line" =~ 7788 ]]; then - echo "Custom --url parameter detected: $line" - echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock - return 0 - fi - - # Sanity checks for other parameters - if [[ "$line" =~ ^--disable= ]] && [[ "$line" != "$DEFAULT_DISABLE" ]]; then - echo "Custom --disable parameter detected: $line" - echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock - return 0 - fi - - if [[ "$line" =~ ^--enable= ]] && [[ "$line" != "$DEFAULT_ENABLE" ]]; then - echo "Custom --enable parameter detected: $line" - echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock - return 0 - fi - - if [[ "$line" =~ ^--modify= ]] && [[ "$line" != "$DEFAULT_MODIFY" ]]; then - echo "Custom --modify parameter detected: $line" - echo "Custom configuration found: $line" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock - return 0 - fi - -done < "$RULECAT_CONF" + done -# If we reach here, no custom configs were found -echo "idstools migration completed successfully - removing Suricata engine syncBlock" -rm -f /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + if [[ $match_found -eq 0 ]]; then + echo "Does not match known default - custom configuration detected" + echo "Custom $file_display_name detected (hash: $file_hash)" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + return 1 + fi + + return 0 +} + +# Check so-rule-update and rulecat.conf +SO_RULE_UPDATE="/nsm/backup/detections-migration/2-4-200/so-rule-update" +RULECAT_CONF="/nsm/backup/detections-migration/2-4-200/rulecat.conf" + +custom_found=0 + +check_config_file "$SO_RULE_UPDATE" "KNOWN_SO_RULE_UPDATE_HASHES" || custom_found=1 +check_config_file "$RULECAT_CONF" "KNOWN_RULECAT_CONF_HASHES" || custom_found=1 + +# If no custom configs found, remove syncBlock +if [[ $custom_found -eq 0 ]]; then + echo "idstools migration completed successfully - removing Suricata engine syncBlock" + rm -f /opt/so/conf/soc/fingerprints/suricataengine.syncBlock +else + echo "Custom idstools configuration detected - syncBlock remains in place" + echo "Review /opt/so/conf/soc/fingerprints/suricataengine.syncBlock for details" +fi } determine_elastic_agent_upgrade() { From 37b3fd9b7b26a4e4267a0b2cbe07d8a7fd8bc8ed Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 13 Nov 2025 10:41:12 -0500 Subject: [PATCH 09/67] add detections backup --- salt/manager/tools/sbin/soup | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0fb83d73f..cb5ec65d4 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1112,11 +1112,35 @@ cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs EOF -# TODO - backup custom rules & overrides +# Backup custom rules & overrides mkdir -p /nsm/backup/detections-migration/2-4-200 cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200 cp /opt/so/conf/idstools/etc/rulecat.conf /nsm/backup/detections-migration/2-4-200 +if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then + python3 /opt/so/conf/soc/so-detections-backup.py + + # Verify backup by comparing counts + echo "Verifying detection overrides backup..." + es_override_count=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -k -L \ + "https://localhost:9200/so-detection/_count" \ + -H "Content-Type: application/json" \ + -d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') + + backup_override_count=$(find /nsm/backup/detections/repo/*/overrides -type f 2>/dev/null | wc -l) + + echo " Elasticsearch overrides: $es_override_count" + echo " Backed up overrides: $backup_override_count" + + if [[ "$es_override_count" -eq "$backup_override_count" ]]; then + echo " Override backup verified successfully" + else + echo " Warning: Override counts do not match" + fi +else + echo "SOC Detections backup script not found, skipping detection backup" +fi + } suricata_idstools_removal_post() { From b2606b6094c2c387db32502b6d55931b65035468 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 13 Nov 2025 14:10:51 -0500 Subject: [PATCH 10/67] fix perms --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index cb5ec65d4..fae574cae 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1108,6 +1108,8 @@ suricata_idstools_removal_pre() { # For SOUPs beginning with 2.4.200 - pre SOUP checks # Create syncBlock file +install -d -o 939 -g 939 -m 755 /opt/so/conf/soc/fingerprints +install -o 939 -g 939 -m 644 /dev/null /opt/so/conf/soc/fingerprints/suricataengine.syncBlock cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs EOF From f047677d8a2c4df46fc67ea87f93e766d34a332c Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 14 Nov 2025 09:03:08 -0500 Subject: [PATCH 11/67] Check correct files --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fae574cae..64f1880d7 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1235,8 +1235,8 @@ check_config_file() { } # Check so-rule-update and rulecat.conf -SO_RULE_UPDATE="/nsm/backup/detections-migration/2-4-200/so-rule-update" -RULECAT_CONF="/nsm/backup/detections-migration/2-4-200/rulecat.conf" +SO_RULE_UPDATE="/usr/sbin/so-rule-update" +RULECAT_CONF="/opt/so/conf/idstools/etc/rulecat.conf" custom_found=0 From 431e5abf89a609247815d2384f1cdacf1601b5cf Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 14 Nov 2025 09:39:33 -0500 Subject: [PATCH 12/67] Extract ETPRO key if found --- salt/manager/tools/sbin/soup | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 64f1880d7..84b4c7903 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1134,10 +1134,15 @@ if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then echo " Elasticsearch overrides: $es_override_count" echo " Backed up overrides: $backup_override_count" - if [[ "$es_override_count" -eq "$backup_override_count" ]]; then - echo " Override backup verified successfully" + if [[ "$es_override_count" -gt 0 ]]; then + if [[ "$backup_override_count" -gt 0 ]]; then + echo " Override backup verified successfully" + else + echo " Error: Elasticsearch has $es_override_count overrides but backup has 0 files" + exit 1 + fi else - echo " Warning: Override counts do not match" + echo " No overrides to backup" fi else echo "SOC Detections backup script not found, skipping detection backup" @@ -1228,6 +1233,15 @@ check_config_file() { if [[ $match_found -eq 0 ]]; then echo "Does not match known default - custom configuration detected" echo "Custom $file_display_name detected (hash: $file_hash)" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + + # If this is so-rule-update, check for ETPRO key + if [[ "$file_display_name" == "so-rule-update" ]]; then + etpro_key=$(grep -oP '\-\-etpro=\K[0-9a-fA-F]+' "$file" 2>/dev/null || true) + if [[ -n "$etpro_key" ]]; then + echo "ETPRO key found: $etpro_key" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + fi + fi + return 1 fi From de4424fab0b666ab4299fa3ba259a1b84ba60a78 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 14 Nov 2025 19:15:51 -0600 Subject: [PATCH 13/67] remove typos --- salt/elasticsearch/defaults.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 592f47a2b..bbfaf3244 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -245,7 +245,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-detection: index_sorting: false index_template: @@ -584,7 +583,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-import: index_sorting: false index_template: @@ -932,7 +930,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-hydra: close: 30 delete: 365 @@ -1043,7 +1040,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-lists: index_sorting: false index_template: @@ -3123,7 +3119,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-logs-system_x_application: index_sorting: false index_template: From af7f7d0728eb7e537c08f45836d79682e69d8f39 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 17 Nov 2025 12:00:08 -0500 Subject: [PATCH 14/67] Fix file paths --- salt/soc/enabled.sls | 3 ++- salt/suricata/config.sls | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 62873ebdd..0319c6c81 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -28,7 +28,8 @@ so-soc: - /opt/so/conf/sigma:/opt/sensoroni/sigma:rw - /opt/so/conf/suricata:/opt/sensoroni/suricata:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw - - /opt/so/rules/nids/suri:/opt/sensoroni/nids:rw + - /opt/so/saltstack/local/salt/suricata/rules:/opt/sensoroni/suricata/rules:rw + - /opt/so/saltstack/local/salt/suricata/files:/opt/sensoroni/suricata/threshold:rw - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw - /nsm/soc/uploads:/nsm/soc/uploads:rw diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 3379697bc..685aa66e7 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -158,7 +158,10 @@ suriconfig: surithresholding: file.managed: - name: /opt/so/conf/suricata/threshold.conf - - replace: False + - source: salt://suricata/files/threshold.conf + - user: 940 + - group: 940 + - contents: 'This file is managed by Security Onion. Do not modify by hand.' suriclassifications: file.managed: From 1b55642c868287bb2bb49d9adbb022bcb9c586a5 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 18 Nov 2025 09:58:14 -0500 Subject: [PATCH 15/67] Refactor rules location --- salt/allowed_states.map.jinja | 1 - salt/suricata/config.sls | 8 +++----- salt/suricata/enabled.sls | 2 +- salt/suricata/manager.sls | 30 ------------------------------ salt/suricata/rules/PLACEHOLDER | 0 salt/top.sls | 5 ----- 6 files changed, 4 insertions(+), 42 deletions(-) delete mode 100644 salt/suricata/manager.sls create mode 100644 salt/suricata/rules/PLACEHOLDER diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index c41573522..2393f92d7 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -38,7 +38,6 @@ 'hydra', 'elasticfleet', 'elastic-fleet-package-registry', - 'suricata.manager', 'utility' ] %} diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 685aa66e7..c7c687bae 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -90,7 +90,7 @@ suridir: suriruledir: file.directory: - - name: /opt/so/conf/suricata/rules + - name: /opt/so/rules/suricata - user: 940 - group: 939 - mode: 775 @@ -118,12 +118,10 @@ suridatadir: - mode: 770 - makedirs: True -# salt:// would resolve to /opt/so/rules/nids because of the defined file_roots and -# not existing under /opt/so/saltstack/local/salt or /opt/so/saltstack/default/salt surirulesync: file.recurse: - - name: /opt/so/conf/suricata/rules/ - - source: salt://suri/ + - name: /opt/so/rules/suricata/ + - source: salt://suricata/rules/ - user: 940 - group: 940 - show_changes: False diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index 34e9f2e4c..1576a0629 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -36,7 +36,7 @@ so-suricata: - /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro - /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro - /opt/so/conf/suricata/classification.config:/etc/suricata/classification.config:ro - - /opt/so/conf/suricata/rules:/etc/suricata/rules:ro + - /opt/so/rules/suricata:/etc/suricata/rules:ro - /opt/so/log/suricata/:/var/log/suricata/:rw - /nsm/suricata/:/nsm/:rw - /nsm/suricata/extracted:/var/log/suricata//filestore:rw diff --git a/salt/suricata/manager.sls b/salt/suricata/manager.sls deleted file mode 100644 index 3d5183556..000000000 --- a/salt/suricata/manager.sls +++ /dev/null @@ -1,30 +0,0 @@ -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls in allowed_states %} - -surilocaldir: - file.directory: - - name: /opt/so/saltstack/local/salt/suricata - - user: socore - - group: socore - - makedirs: True - -ruleslink: - file.symlink: - - name: /opt/so/saltstack/local/salt/suricata/rules - - user: socore - - group: socore - - target: /opt/so/rules/nids/suri - -refresh_salt_master_fileserver_suricata_ruleslink: - salt.runner: - - name: fileserver.update - - onchanges: - - file: ruleslink - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/suricata/rules/PLACEHOLDER b/salt/suricata/rules/PLACEHOLDER new file mode 100644 index 000000000..e69de29bb diff --git a/salt/top.sls b/salt/top.sls index 613878860..d80806564 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -74,7 +74,6 @@ base: - sensoroni - telegraf - firewall - - suricata.manager - healthcheck - elasticsearch - elastic-fleet-package-registry @@ -105,7 +104,6 @@ base: - firewall - sensoroni - telegraf - - suricata.manager - healthcheck - elasticsearch - logstash @@ -140,7 +138,6 @@ base: - sensoroni - telegraf - backup.config_backup - - suricata.manager - elasticsearch - logstash - redis @@ -174,7 +171,6 @@ base: - sensoroni - telegraf - backup.config_backup - - suricata.manager - elasticsearch - logstash - redis @@ -204,7 +200,6 @@ base: - sensoroni - telegraf - firewall - - suricata.manager - pcap - elasticsearch - elastic-fleet-package-registry From 148ef7ef21dd40a6008d9a5e59afd4fa4f318f2b Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 18 Nov 2025 11:57:30 -0500 Subject: [PATCH 16/67] add default ruleset --- salt/soc/defaults.yaml | 8 ++++++++ salt/suricata/config.sls | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 65cdd385d..53cbb10e1 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1586,6 +1586,14 @@ soc: insecureSkipVerify: false readOnly: true deleteUnreferenced: true + - name: ABUSECH-SSLBL + deleteUnreferenced: true + description: 'Abuse.ch SSL Blacklist' + enabled: false + license: CC0-1.0 + readOnly: true + sourcePath: https://sslbl.abuse.ch/blacklist/sslblacklist_tls_cert.tar.gz + sourceType: url - name: local-rules id: local-rules description: "Local custom rules from files (*.rules) in a directory on the filesystem" diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index c7c687bae..7ce605e0b 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -159,7 +159,7 @@ surithresholding: - source: salt://suricata/files/threshold.conf - user: 940 - group: 940 - - contents: 'This file is managed by Security Onion. Do not modify by hand.' + - onlyif: salt://suricata/files/threshold.conf suriclassifications: file.managed: From a155f450362854ca65f037ce713386aea931103d Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 19 Nov 2025 13:24:29 -0600 Subject: [PATCH 17/67] always update annotation / defaults for managed integrations --- salt/manager/managed_soc_annotations.sls | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/salt/manager/managed_soc_annotations.sls b/salt/manager/managed_soc_annotations.sls index d8f175df6..64f45d40d 100644 --- a/salt/manager/managed_soc_annotations.sls +++ b/salt/manager/managed_soc_annotations.sls @@ -25,9 +25,7 @@ {% set index_settings = es.get('index_settings', {}) %} {% set input = index_settings.get('so-logs', {}) %} {% for k in matched_integration_names %} - {% if k not in index_settings %} - {% set _ = index_settings.update({k: input}) %} - {% endif %} + {% set _ = index_settings.update({k: input}) %} {% endfor %} {% for k in addon_integration_keys %} {% if k not in matched_integration_names and k in index_settings %} @@ -45,10 +43,8 @@ {% set es = data.get('elasticsearch', {}) %} {% set index_settings = es.get('index_settings', {}) %} {% for k in matched_integration_names %} - {% if k not in index_settings %} - {% set input = ADDON_INTEGRATION_DEFAULTS[k] %} - {% set _ = index_settings.update({k: input})%} - {% endif %} + {% set input = ADDON_INTEGRATION_DEFAULTS[k] %} + {% set _ = index_settings.update({k: input})%} {% endfor %} {% for k in addon_integration_keys %} {% if k not in matched_integration_names and k in index_settings %} From b52dd53e2906e92af253c09cb5b2475caa47c77e Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 19 Nov 2025 13:24:55 -0600 Subject: [PATCH 18/67] advanced ilm actions --- .../integration-defaults.map.jinja | 22 +++ salt/elasticsearch/defaults.yaml | 32 +++ salt/elasticsearch/soc_elasticsearch.yaml | 186 ++++++++++++++++++ salt/elasticsearch/template.map.jinja | 87 ++++++++ 4 files changed, 327 insertions(+) diff --git a/salt/elasticfleet/integration-defaults.map.jinja b/salt/elasticfleet/integration-defaults.map.jinja index 500a9e63c..69ce7f3af 100644 --- a/salt/elasticfleet/integration-defaults.map.jinja +++ b/salt/elasticfleet/integration-defaults.map.jinja @@ -121,6 +121,9 @@ "phases": { "cold": { "actions": { + "allocate":{ + "number_of_replicas": "" + }, "set_priority": {"priority": 0} }, "min_age": "60d" @@ -137,12 +140,31 @@ "max_age": "30d", "max_primary_shard_size": "50gb" }, + "forcemerge":{ + "max_num_segments": "" + }, + "shrink":{ + "max_primary_shard_size": "", + "method": "COUNT", + "number_of_shards": "" + }, "set_priority": {"priority": 100} }, "min_age": "0ms" }, "warm": { "actions": { + "allocate": { + "number_of_replicas": "" + }, + "forcemerge": { + "max_num_segments": "" + }, + "shrink":{ + "max_primary_shard_size": "", + "method": "COUNT", + "number_of_shards": "" + }, "set_priority": {"priority": 50} }, "min_age": "30d" diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index bbfaf3244..5cfb9a0e0 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -72,6 +72,8 @@ elasticsearch: actions: set_priority: priority: 0 + allocate: + number_of_replicas: "" min_age: 60d delete: actions: @@ -84,11 +86,25 @@ elasticsearch: max_primary_shard_size: 50gb set_priority: priority: 100 + forcemerge: + max_num_segments: "" + shrink: + max_primary_shard_size: "" + method: COUNT + number_of_shards: "" min_age: 0ms warm: actions: set_priority: priority: 50 + forcemerge: + max_num_segments: "" + shrink: + max_primary_shard_size: "" + method: COUNT + number_of_shards: "" + allocate: + number_of_replicas: "" min_age: 30d so-case: index_sorting: false @@ -1123,6 +1139,8 @@ elasticsearch: actions: set_priority: priority: 0 + allocate: + number_of_replicas: "" min_age: 60d delete: actions: @@ -1135,11 +1153,25 @@ elasticsearch: max_primary_shard_size: 50gb set_priority: priority: 100 + forcemerge: + max_num_segments: "" + shrink: + max_primary_shard_size: "" + method: COUNT + number_of_shards: "" min_age: 0ms warm: actions: set_priority: priority: 50 + allocate: + number_of_replicas: "" + forcemerge: + max_num_segments: "" + shrink: + max_primary_shard_size: "" + method: COUNT + number_of_shards: "" min_age: 30d so-logs-detections_x_alerts: index_sorting: false diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 097a53296..27d5654b5 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -131,6 +131,47 @@ elasticsearch: description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. global: True helpLink: elasticsearch.html + shrink: + method: + description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size. + options: + - COUNT + - SIZE + global: True + advanced: True + forcedType: string + number_of_shards: + title: shard count + description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'. + global: True + forcedType: int + advanced: True + max_primary_shard_size: + title: max shard size + description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. + regex: ^(?:[0-9]+(?:gb|tb|pb)|)$ + global: True + forcedType: string + advanced: True + allow_write_after_shrink: + description: Allow writes after shrink. + global: True + forcedType: bool + default: False + advanced: True + forcemerge: + max_num_segments: + description: Reduce the number of segments in each index shard and clean up deleted documents. + global: True + forcedType: int + advanced: True + index_codec: + title: compression + description: Use higher compression for stored fields at the cost of slower performance. + forcedType: bool + global: True + default: False + advanced: True cold: min_age: description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier. @@ -144,6 +185,12 @@ elasticsearch: description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. global: True helpLink: elasticsearch.html + allocate: + number_of_replicas: + description: Set the number of replicas. Remains the same as the previous phase by default. + forcedType: int + global: True + advanced: True warm: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier. @@ -158,6 +205,52 @@ elasticsearch: forcedType: int global: True helpLink: elasticsearch.html + shrink: + method: + description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size. + options: + - COUNT + - SIZE + global: True + advanced: True + number_of_shards: + title: shard count + description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'. + global: True + forcedType: int + advanced: True + max_primary_shard_size: + title: max shard size + description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. + regex: ^(?:[0-9]+(?:gb|tb|pb)|)$ + global: True + forcedType: string + advanced: True + allow_write_after_shrink: + description: Allow writes after shrink. + global: True + forcedType: bool + default: False + advanced: True + forcemerge: + max_num_segments: + description: Reduce the number of segments in each index shard and clean up deleted documents. + global: True + forcedType: int + advanced: True + index_codec: + title: compression + description: Use higher compression for stored fields at the cost of slower performance. + forcedType: bool + global: True + default: False + advanced: True + allocate: + number_of_replicas: + description: Set the number of replicas. Remains the same as the previous phase by default. + forcedType: int + global: True + advanced: True delete: min_age: description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion. @@ -287,6 +380,47 @@ elasticsearch: global: True advanced: True helpLink: elasticsearch.html + shrink: + method: + description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size. + options: + - COUNT + - SIZE + global: True + advanced: True + forcedType: string + number_of_shards: + title: shard count + description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'. + global: True + forcedType: int + advanced: True + max_primary_shard_size: + title: max shard size + description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. + regex: ^(?:[0-9]+(?:gb|tb|pb)|)$ + global: True + forcedType: string + advanced: True + allow_write_after_shrink: + description: Allow writes after shrink. + global: True + forcedType: bool + default: False + advanced: True + forcemerge: + max_num_segments: + description: Reduce the number of segments in each index shard and clean up deleted documents. + global: True + forcedType: int + advanced: True + index_codec: + title: compression + description: Use higher compression for stored fields at the cost of slower performance. + forcedType: bool + global: True + default: False + advanced: True warm: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier. @@ -314,6 +448,52 @@ elasticsearch: global: True advanced: True helpLink: elasticsearch.html + shrink: + method: + description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size. + options: + - COUNT + - SIZE + global: True + advanced: True + number_of_shards: + title: shard count + description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'. + global: True + forcedType: int + advanced: True + max_primary_shard_size: + title: max shard size + description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. + regex: ^(?:[0-9]+(?:gb|tb|pb)|)$ + global: True + forcedType: string + advanced: True + allow_write_after_shrink: + description: Allow writes after shrink. + global: True + forcedType: bool + default: False + advanced: True + forcemerge: + max_num_segments: + description: Reduce the number of segments in each index shard and clean up deleted documents. + global: True + forcedType: int + advanced: True + index_codec: + title: compression + description: Use higher compression for stored fields at the cost of slower performance. + forcedType: bool + global: True + default: False + advanced: True + allocate: + number_of_replicas: + description: Set the number of replicas. Remains the same as the previous phase by default. + forcedType: int + global: True + advanced: True cold: min_age: description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier. @@ -330,6 +510,12 @@ elasticsearch: global: True advanced: True helpLink: elasticsearch.html + allocate: + number_of_replicas: + description: Set the number of replicas. Remains the same as the previous phase by default. + forcedType: int + global: True + advanced: True delete: min_age: description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion. diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index 414d8a6b4..904ab862c 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -61,5 +61,92 @@ {% do settings.index_template.template.settings.index.pop('sort') %} {% endif %} {% endif %} + +{# advanced ilm actions #} +{% if settings.policy is defined and settings.policy.phases is defined %} +{# start HOT actions #} +{# only run if hot action is defined for this index #} +{% if settings.policy.phases.hot is defined and settings.policy.phases.hot.actions is defined %} +{% set HA = settings.policy.phases.hot.actions %} +{% if HA.shrink is defined %} +{% if HA.shrink.method is defined %} +{% if HA.shrink.method == 'COUNT' and HA.shrink.number_of_shards is defined and HA.shrink.number_of_shards %} +{# remove max_primary_shard_size value when doing shrink operation by count vs size #} +{% do HA.shrink.pop('max_primary_shard_size', none) %} +{% elif HA.shrink.method == 'SIZE' and HA.shrink.max_primary_shard_size is defined and HA.shrink.max_primary_shard_size %} +{# remove number_of_shards value when doing shrink operation by size vs count #} +{% do HA.shrink.pop('number_of_shards', none) %} +{% else %} +{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #} +{% do HA.pop('shrink', none) %} +{% endif %} +{% endif %} +{% endif %} +{# always remove method since its only used for SOC config, not in the actual ilm policy #} +{% if HA.shrink is defined %} +{% do HA.shrink.pop('method', none) %} +{% endif %} +{# end shrink action #} +{# start force merge #} +{% if HA.forcemerge is defined %} +{% if HA.forcemerge.index_codec is defined and HA.forcemerge.index_codec %} +{% do HA.forcemerge.update({'index_codec': 'best_compression'}) %} +{% else %} +{% do HA.forcemerge.pop('index_codec', none) %} +{% endif %} +{% if HA.forcemerge.max_num_segments is defined and not HA.forcemerge.max_num_segments %} +{# max_num_segments is empty, drop it #} +{% do HA.pop('forcemerge', none) %} +{% endif %} +{% endif %} +{# end force merge #} +{% endif %} +{# end HOT actions #} +{# Start WARM actions #} +{# only run if warm action is defined for this index #} +{% if settings.policy.phases.warm is defined and settings.policy.phases.warm.actions is defined %} +{% set WA = settings.policy.phases.warm.actions %} +{# start warm shrink action #} +{% if WA.shrink is defined %} +{% if WA.shrink.method is defined %} +{% if WA.shrink.method == 'COUNT' and WA.shrink.number_of_shards is defined and WA.shrink.number_of_shards %} +{# remove max_primary_shard_size value when doing shrink operation by count vs size #} +{% do WA.shrink.pop('max_primary_shard_size', none) %} +{% elif WA.shrink.method == 'SIZE' and WA.shrink.max_primary_shard_size is defined and WA.shrink.max_primary_shard_size %} +{# remove number_of_shards value when doing shrink operation by size vs count #} +{% do WA.shrink.pop('number_of_shards', none) %} +{% else %} +{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #} +{% do WA.pop('shrink', none) %} +{% endif %} +{% endif %} +{% endif %} +{# always remove method since its only used for SOC config, not in the actual ilm policy #} +{% if WA.shrink is defined %} +{% do WA.shrink.pop('method', none) %} +{% endif %} +{# end shrink action #} +{# start force merge #} +{% if WA.forcemerge is defined %} +{% if WA.forcemerge.index_codec is defined and WA.forcemerge.index_codec %} +{% do WA.forcemerge.update({'index_codec': 'best_compression'}) %} +{% else %} +{% do WA.forcemerge.pop('index_codec', none) %} +{% endif %} +{% if WA.forcemerge.max_num_segments is defined and not WA.forcemerge.max_num_segments %} +{# max_num_segments is empty, drop it #} +{% do WA.pop('forcemerge', none) %} +{% endif %} +{% endif %} +{# end force merge #} +{% if WA.allocate is defined %} +{% if WA.allocate.number_of_replicas is defined and not WA.allocate.number_of_replicas %} +{% do WA.pop('allocate', none) %} +{% endif %} +{% endif %} +{% endif %} +{# end WARM actions #} +{% endif %} + {% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %} {% endfor %} From bce7a20d8b61146d8c600253adcb8febec66dc55 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 21 Nov 2025 14:19:51 -0600 Subject: [PATCH 19/67] soc configurable EA logstash output adv settings --- salt/elasticfleet/defaults.yaml | 8 +++++ salt/elasticfleet/soc_elasticfleet.yaml | 40 +++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 0f013e320..a3132d3f4 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -10,6 +10,14 @@ elasticfleet: grid_enrollment: '' defend_filters: enable_auto_configuration: False + outputs: + logstash: + bulk_max_size: '' + worker: '' + queue_mem_events: '' + timeout: '' + loadbalance: False + compression_level: '' subscription_integrations: False auto_upgrade_integrations: False logging: diff --git a/salt/elasticfleet/soc_elasticfleet.yaml b/salt/elasticfleet/soc_elasticfleet.yaml index 450e044e6..d7c324855 100644 --- a/salt/elasticfleet/soc_elasticfleet.yaml +++ b/salt/elasticfleet/soc_elasticfleet.yaml @@ -50,6 +50,46 @@ elasticfleet: global: True forcedType: bool helpLink: elastic-fleet.html + outputs: + logstash: + bulk_max_size: + description: The maximum number of events to bulk in a single Logstash request. + global: True + forcedType: int + advanced: True + helpLink: elastic-fleet.html + worker: + description: The number of workers per configured host publishing events. + global: True + forcedType: int + advanced: true + helpLink: elastic-fleet.html + queue_mem_events: + title: queued events + description: The number of events the queue can store. This value should be evenly divisible by the smaller of 'bulk_max_size' to avoid sending partial batches to the output. + global: True + forcedType: int + advanced: True + helpLink: elastic-fleet.html + timeout: + description: The number of seconds to wait for responses from the Logstash server before timing out. Eg 30s + regex: ^[0-9]+s$ + advanced: True + global: True + helpLink: elastic-fleet.html + loadbalance: + description: If true and multiple Logstash hosts are configured, the output plugin load balances published events onto all Logstash hosts. If false, the output plugin sends all events to one host (determined at random) and switches to another host if the selected one becomes unresponsive. + forcedType: bool + advanced: True + global: True + helpLink: elastic-fleet.html + compression: + description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression). + regex: ^[1-9]$ + forcedType: int + advanced: True + global: True + helpLink: elastic-fleet.html server: custom_fqdn: description: Custom FQDN for Agents to connect to. One per line. From 4490ea763594b320315740d1435120cf993693f0 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 21 Nov 2025 14:21:17 -0600 Subject: [PATCH 20/67] format EA logstash output adv config items --- salt/elasticfleet/config.map.jinja | 34 ++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 salt/elasticfleet/config.map.jinja diff --git a/salt/elasticfleet/config.map.jinja b/salt/elasticfleet/config.map.jinja new file mode 100644 index 000000000..b95a3e895 --- /dev/null +++ b/salt/elasticfleet/config.map.jinja @@ -0,0 +1,34 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + +{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} + +{# advanced config_yaml options for elasticfleet logstash output #} +{% set ADV_OUTPUT_LOGSTASH_RAW = ELASTICFLEETMERGED.config.outputs.logstash %} +{% set ADV_OUTPUT_LOGSTASH = {} %} +{% for k, v in ADV_OUTPUT_LOGSTASH_RAW.items() %} +{% if v != "" and v is not none %} +{% if k == 'queue_mem_events' %} +{# rename queue_mem_events queue.mem.events #} +{% do ADV_OUTPUT_LOGSTASH.update({'queue.mem.events':v}) %} +{% elif k == 'loadbalance' %} +{% if v %} +{# only include loadbalance config when its True #} +{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %} +{% endif %} +{% else %} +{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %} +{% endif %} +{% endif %} +{% endfor %} + +{% set LOGSTASH_CONFIG_YAML_RAW = [] %} +{% if ADV_OUTPUT_LOGSTASH %} +{% for k, v in ADV_OUTPUT_LOGSTASH.items() %} +{% do LOGSTASH_CONFIG_YAML_RAW.append(k ~ ': ' ~ v) %} +{% endfor %} +{% endif %} + +{% set LOGSTASH_CONFIG_YAML = LOGSTASH_CONFIG_YAML_RAW | join('\\n') if LOGSTASH_CONFIG_YAML_RAW else '' %} From 1fb00c8eb696d91eb8c0e16ce96976c3ecdb298d Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 21 Nov 2025 14:22:42 -0600 Subject: [PATCH 21/67] update so-elastic-fleet-outputs-update to use advanced output options when set, else empty "". Also trigger update_logstash_outputs() when hash of config_yaml has changed --- .../so-elastic-fleet-outputs-update | 23 ++++++++++++++----- 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 9efe8a19d..de9b5f93f 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -3,11 +3,13 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # this file except in compliance with the Elastic License 2.0. -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} +{%- from 'vars/globals.map.jinja' import GLOBALS %} +{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} +{%- from 'elasticfleet/config.map.jinja' import LOGSTASH_CONFIG_YAML %} . /usr/sbin/so-common +FORCE_UPDATE=false # Only run on Managers if ! is_manager_node; then printf "Not a Manager Node... Exiting" @@ -22,7 +24,7 @@ function update_logstash_outputs() { --arg UPDATEDLIST "$NEW_LIST_JSON" \ --argjson SECRETS "$SECRETS" \ --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": $SSL_CONFIG,"secrets": $SECRETS}') else JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ @@ -97,9 +99,18 @@ function update_kafka_outputs() { exit 1 fi + CURRENT_LOGSTASH_ADV_CONFIG=$(jq -r '.item.config_yaml // ""' <<< "$RAW_JSON") + CURRENT_LOGSTASH_ADV_CONFIG_HASH=$(sha256sum <<< "$CURRENT_LOGSTASH_ADV_CONFIG" | awk '{print $1}') + NEW_LOGSTASH_ADV_CONFIG=$'{{ LOGSTASH_CONFIG_YAML }}' + NEW_LOGSTASH_ADV_CONFIG_HASH=$(sha256sum <<< "$NEW_LOGSTASH_ADV_CONFIG" | awk '{print $1}') + + if [ "$CURRENT_LOGSTASH_ADV_CONFIG_HASH" != "$NEW_LOGSTASH_ADV_CONFIG_HASH" ]; then + FORCE_UPDATE=true + fi + # Get the current list of Logstash outputs & hash them CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") - CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}') declare -a NEW_LIST=() @@ -148,10 +159,10 @@ function update_kafka_outputs() { # Sort & hash the new list of Logstash Outputs NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") -NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') +NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}') # Compare the current & new list of outputs - if different, update the Logstash outputs -if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then +if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then printf "\nHashes match - no update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" From 99cb51482fb1b03c32af7375ab6c1b57517b2687 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 21 Nov 2025 14:32:58 -0600 Subject: [PATCH 22/67] unneeded 'set' --- salt/manager/managed_soc_annotations.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/manager/managed_soc_annotations.sls b/salt/manager/managed_soc_annotations.sls index 64f45d40d..4357b53a2 100644 --- a/salt/manager/managed_soc_annotations.sls +++ b/salt/manager/managed_soc_annotations.sls @@ -25,11 +25,11 @@ {% set index_settings = es.get('index_settings', {}) %} {% set input = index_settings.get('so-logs', {}) %} {% for k in matched_integration_names %} - {% set _ = index_settings.update({k: input}) %} + {% do index_settings.update({k: input}) %} {% endfor %} {% for k in addon_integration_keys %} {% if k not in matched_integration_names and k in index_settings %} - {% set _ = index_settings.pop(k) %} + {% do index_settings.pop(k) %} {% endif %} {% endfor %} {{ data }} @@ -44,11 +44,11 @@ {% set index_settings = es.get('index_settings', {}) %} {% for k in matched_integration_names %} {% set input = ADDON_INTEGRATION_DEFAULTS[k] %} - {% set _ = index_settings.update({k: input})%} + {% do index_settings.update({k: input})%} {% endfor %} {% for k in addon_integration_keys %} {% if k not in matched_integration_names and k in index_settings %} - {% set _ = index_settings.pop(k) %} + {% do index_settings.pop(k) %} {% endif %} {% endfor %} {{ data }} From b80ec95fa8e16345b356d40726bd1ce41d1aeef9 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 21 Nov 2025 14:41:03 -0600 Subject: [PATCH 23/67] update regex, revert to default will allow setting value back to '' | None --- salt/elasticsearch/soc_elasticsearch.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 27d5654b5..7fd4f8329 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -149,7 +149,7 @@ elasticsearch: max_primary_shard_size: title: max shard size description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. - regex: ^(?:[0-9]+(?:gb|tb|pb)|)$ + regex: ^[0-9]+(?:gb|tb|pb)$ global: True forcedType: string advanced: True @@ -222,7 +222,7 @@ elasticsearch: max_primary_shard_size: title: max shard size description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. - regex: ^(?:[0-9]+(?:gb|tb|pb)|)$ + regex: ^[0-9]+(?:gb|tb|pb)$ global: True forcedType: string advanced: True @@ -398,7 +398,7 @@ elasticsearch: max_primary_shard_size: title: max shard size description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. - regex: ^(?:[0-9]+(?:gb|tb|pb)|)$ + regex: ^[0-9]+(?:gb|tb|pb)$ global: True forcedType: string advanced: True @@ -465,7 +465,7 @@ elasticsearch: max_primary_shard_size: title: max shard size description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. - regex: ^(?:[0-9]+(?:gb|tb|pb)|)$ + regex: ^[0-9]+(?:gb|tb|pb)$ global: True forcedType: string advanced: True From 415ea07a4ff43e73242c5f121797758921fb976b Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 21 Nov 2025 16:04:26 -0600 Subject: [PATCH 24/67] clean up --- salt/elasticsearch/template.map.jinja | 113 +++++++++----------------- 1 file changed, 38 insertions(+), 75 deletions(-) diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index 904ab862c..659823df8 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -64,88 +64,51 @@ {# advanced ilm actions #} {% if settings.policy is defined and settings.policy.phases is defined %} -{# start HOT actions #} -{# only run if hot action is defined for this index #} -{% if settings.policy.phases.hot is defined and settings.policy.phases.hot.actions is defined %} -{% set HA = settings.policy.phases.hot.actions %} -{% if HA.shrink is defined %} -{% if HA.shrink.method is defined %} -{% if HA.shrink.method == 'COUNT' and HA.shrink.number_of_shards is defined and HA.shrink.number_of_shards %} -{# remove max_primary_shard_size value when doing shrink operation by count vs size #} -{% do HA.shrink.pop('max_primary_shard_size', none) %} -{% elif HA.shrink.method == 'SIZE' and HA.shrink.max_primary_shard_size is defined and HA.shrink.max_primary_shard_size %} -{# remove number_of_shards value when doing shrink operation by size vs count #} -{% do HA.shrink.pop('number_of_shards', none) %} -{% else %} -{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #} -{% do HA.pop('shrink', none) %} +{% set PHASE_NAMES = ["hot", "warm", "cold"] %} +{% for P in PHASE_NAMES %} +{% if settings.policy.phases[P] is defined and settings.policy.phases[P].actions is defined %} +{% set PHASE = settings.policy.phases[P].actions %} +{# remove allocate action if number_of_replicas isn't configured #} +{% if PHASE.allocate is defined %} +{% if PHASE.allocate.number_of_replicas is defined and not PHASE.allocate.number_of_replicas %} +{% do PHASE.pop('allocate', none) %} {% endif %} {% endif %} -{% endif %} -{# always remove method since its only used for SOC config, not in the actual ilm policy #} -{% if HA.shrink is defined %} -{% do HA.shrink.pop('method', none) %} -{% endif %} -{# end shrink action #} -{# start force merge #} -{% if HA.forcemerge is defined %} -{% if HA.forcemerge.index_codec is defined and HA.forcemerge.index_codec %} -{% do HA.forcemerge.update({'index_codec': 'best_compression'}) %} -{% else %} -{% do HA.forcemerge.pop('index_codec', none) %} -{% endif %} -{% if HA.forcemerge.max_num_segments is defined and not HA.forcemerge.max_num_segments %} -{# max_num_segments is empty, drop it #} -{% do HA.pop('forcemerge', none) %} -{% endif %} -{% endif %} -{# end force merge #} -{% endif %} -{# end HOT actions #} -{# Start WARM actions #} -{# only run if warm action is defined for this index #} -{% if settings.policy.phases.warm is defined and settings.policy.phases.warm.actions is defined %} -{% set WA = settings.policy.phases.warm.actions %} -{# start warm shrink action #} -{% if WA.shrink is defined %} -{% if WA.shrink.method is defined %} -{% if WA.shrink.method == 'COUNT' and WA.shrink.number_of_shards is defined and WA.shrink.number_of_shards %} -{# remove max_primary_shard_size value when doing shrink operation by count vs size #} -{% do WA.shrink.pop('max_primary_shard_size', none) %} -{% elif WA.shrink.method == 'SIZE' and WA.shrink.max_primary_shard_size is defined and WA.shrink.max_primary_shard_size %} -{# remove number_of_shards value when doing shrink operation by size vs count #} -{% do WA.shrink.pop('number_of_shards', none) %} -{% else %} -{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #} -{% do WA.pop('shrink', none) %} +{# start shrink action #} +{% if PHASE.shrink is defined %} +{% if PHASE.shrink.method is defined %} +{% if PHASE.shrink.method == 'COUNT' and PHASE.shrink.number_of_shards is defined and PHASE.shrink.number_of_shards %} +{# remove max_primary_shard_size value when doing shrink operation by count vs size #} +{% do PHASE.shrink.pop('max_primary_shard_size', none) %} +{% elif PHASE.shrink.method == 'SIZE' and PHASE.shrink.max_primary_shard_size is defined and PHASE.shrink.max_primary_shard_size %} +{# remove number_of_shards value when doing shrink operation by size vs count #} +{% do PHASE.shrink.pop('number_of_shards', none) %} +{% else %} +{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #} +{% do PHASE.pop('shrink', none) %} +{% endif %} {% endif %} {% endif %} -{% endif %} -{# always remove method since its only used for SOC config, not in the actual ilm policy #} -{% if WA.shrink is defined %} -{% do WA.shrink.pop('method', none) %} -{% endif %} -{# end shrink action #} -{# start force merge #} -{% if WA.forcemerge is defined %} -{% if WA.forcemerge.index_codec is defined and WA.forcemerge.index_codec %} -{% do WA.forcemerge.update({'index_codec': 'best_compression'}) %} -{% else %} -{% do WA.forcemerge.pop('index_codec', none) %} +{# always remove shrink method since its only used for SOC config, not in the actual ilm policy #} +{% if PHASE.shrink is defined %} +{% do PHASE.shrink.pop('method', none) %} {% endif %} -{% if WA.forcemerge.max_num_segments is defined and not WA.forcemerge.max_num_segments %} -{# max_num_segments is empty, drop it #} -{% do WA.pop('forcemerge', none) %} +{# end shrink action #} +{# start force merge #} +{% if PHASE.forcemerge is defined %} +{% if PHASE.forcemerge.index_codec is defined and PHASE.forcemerge.index_codec %} +{% do PHASE.forcemerge.update({'index_codec': 'best_compression'}) %} +{% else %} +{% do PHASE.forcemerge.pop('index_codec', none) %} +{% endif %} +{% if PHASE.forcemerge.max_num_segments is defined and not PHASE.forcemerge.max_num_segments %} +{# max_num_segments is empty, drop it #} +{% do PHASE.pop('forcemerge', none) %} +{% endif %} {% endif %} +{# end force merge #} {% endif %} -{# end force merge #} -{% if WA.allocate is defined %} -{% if WA.allocate.number_of_replicas is defined and not WA.allocate.number_of_replicas %} -{% do WA.pop('allocate', none) %} -{% endif %} -{% endif %} -{% endif %} -{# end WARM actions #} +{% endfor %} {% endif %} {% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %} From 3339b50dafbb08ba503bd842f41339c99ce484f8 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 21 Nov 2025 16:39:45 -0600 Subject: [PATCH 25/67] drop forcemerge when max_num_segements doesn't exist or empty --- salt/elasticsearch/template.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index 659823df8..b726445ed 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -101,7 +101,7 @@ {% else %} {% do PHASE.forcemerge.pop('index_codec', none) %} {% endif %} -{% if PHASE.forcemerge.max_num_segments is defined and not PHASE.forcemerge.max_num_segments %} +{% if PHASE.forcemerge.max_num_segments is not defined or not PHASE.forcemerge.max_num_segments %} {# max_num_segments is empty, drop it #} {% do PHASE.pop('forcemerge', none) %} {% endif %} From cc8fb96047544a62f73b357fd86b70fb0e8fe5d2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 24 Nov 2025 11:12:09 -0600 Subject: [PATCH 26/67] valid config for number_of_replicas in allocate action includes 0 --- salt/elasticsearch/template.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index b726445ed..2563f8e23 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -70,7 +70,7 @@ {% set PHASE = settings.policy.phases[P].actions %} {# remove allocate action if number_of_replicas isn't configured #} {% if PHASE.allocate is defined %} -{% if PHASE.allocate.number_of_replicas is defined and not PHASE.allocate.number_of_replicas %} +{% if PHASE.allocate.number_of_replicas is not defined or PHASE.allocate.number_of_replicas == "" %} {% do PHASE.pop('allocate', none) %} {% endif %} {% endif %} From ced3af818c8b18c1a6961b4b1c4b57680c2e26a4 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 25 Nov 2025 13:51:50 -0500 Subject: [PATCH 27/67] Refactor for Airgap --- salt/manager/init.sls | 1 + salt/soc/config.sls | 34 +++++++++++- salt/soc/defaults.yaml | 46 ++++++++++++---- ...tections_custom_repo_template_readme.jinja | 55 +++++++++++++++++++ salt/soc/merged.map.jinja | 11 ++++ salt/suricata/config.sls | 14 +++++ salt/suricata/files/so_extraction.rules | 26 +++++++++ salt/suricata/files/so_filters.rules | 12 ++++ 8 files changed, 188 insertions(+), 11 deletions(-) create mode 100644 salt/suricata/files/so_extraction.rules create mode 100644 salt/suricata/files/so_filters.rules diff --git a/salt/manager/init.sls b/salt/manager/init.sls index f59c33652..da829c1ce 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -206,6 +206,7 @@ git_config_set_safe_dirs: - multivar: - /nsm/rules/custom-local-repos/local-sigma - /nsm/rules/custom-local-repos/local-yara + - /nsm/rules/custom-local-repos/local-suricata - /nsm/securityonion-resources - /opt/so/conf/soc/ai_summary_repos/securityonion-resources - /nsm/airgap-resources/playbooks diff --git a/salt/soc/config.sls b/salt/soc/config.sls index 78a495e0a..7e2beefa0 100644 --- a/salt/soc/config.sls +++ b/salt/soc/config.sls @@ -215,7 +215,6 @@ socsensoronirepos: - mode: 775 - makedirs: True - create_custom_local_yara_repo_template: git.present: - name: /nsm/rules/custom-local-repos/local-yara @@ -249,6 +248,39 @@ add_readme_custom_local_sigma_repo_template: - context: repo_type: "sigma" +create_custom_local_suricata_repo_template: + git.present: + - name: /nsm/rules/custom-local-repos/local-suricata + - bare: False + - force: True + +add_readme_custom_local_suricata_repo_template: + file.managed: + - name: /nsm/rules/custom-local-repos/local-suricata/README + - source: salt://soc/files/soc/detections_custom_repo_template_readme.jinja + - user: 939 + - group: 939 + - template: jinja + - context: + repo_type: "suricata" + +etpro_airgap_folder: + file.directory: + - name: /nsm/rules/custom-local-repos/local-etpro-suricata + - user: 939 + - group: 939 + - makedirs: True + +add_readme_etpro_airgap_template: + file.managed: + - name: /nsm/rules/custom-local-repos/local-etpro-suricata/README + - source: salt://soc/files/soc/detections_custom_repo_template_readme.jinja + - user: 939 + - group: 939 + - template: jinja + - context: + repo_type: "suricata-etpro" + socore_own_custom_repos: file.directory: - name: /nsm/rules/custom-local-repos/ diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 53cbb10e1..685d0744c 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1595,16 +1595,29 @@ soc: sourcePath: https://sslbl.abuse.ch/blacklist/sslblacklist_tls_cert.tar.gz sourceType: url - name: local-rules - id: local-rules - description: "Local custom rules from files (*.rules) in a directory on the filesystem" + description: "Local rules from files (*.rules) in a directory on the filesystem" license: "custom" sourceType: directory - sourcePath: /nsm/rules/local/ + sourcePath: /nsm/rules/custom-local-repos/local-suricata readOnly: false deleteUnreferenced: false + enabled: true + - name: SO_FILTERS + deleteUnreferenced: true + description: Filter rules for when Suricata is set as the metadata engine enabled: false - excludeFiles: - - "*backup*" + license: Elastic-2.0 + readOnly: true + sourcePath: /nsm/rules/suricata/so_filters.rules + sourceType: directory + - name: SO_EXTRACTIONS + description: Extraction rules for when Suricata is set as the metadata engine + deleteUnreferenced: true + enabled: false + license: Elastic-2.0 + readOnly: true + sourcePath: /nsm/rules/suricata/so_extraction.rules + sourceType: directory airgap: - name: Emerging-Threats description: "Emerging Threats ruleset - To enable ET Pro, enter your license key below. Leave empty for ET Open (free) rules." @@ -1625,16 +1638,29 @@ soc: readOnly: true deleteUnreferenced: true - name: local-rules - id: local-rules - description: "Local custom rules from files (*.rules) in a directory on the filesystem" + description: "Local rules from files (*.rules) in a directory on the filesystem" license: "custom" sourceType: directory - sourcePath: /nsm/rules/local/ + sourcePath: /nsm/rules/custom-local-repos/local-suricata readOnly: false deleteUnreferenced: false + enabled: true + - name: SO_FILTERS + deleteUnreferenced: true + description: Filter rules for when Suricata is set as the metadata engine enabled: false - excludeFiles: - - "*backup*" + license: Elastic-2.0 + readOnly: true + sourcePath: /nsm/rules/suricata/so_filters.rules + sourceType: directory + - name: SO_EXTRACTIONS + description: Extraction rules for when Suricata is set as the metadata engine + deleteUnreferenced: true + enabled: false + license: Elastic-2.0 + readOnly: true + sourcePath: /nsm/rules/suricata/so_extraction.rules + sourceType: directory navigator: intervalMinutes: 30 outputPath: /opt/sensoroni/navigator diff --git a/salt/soc/files/soc/detections_custom_repo_template_readme.jinja b/salt/soc/files/soc/detections_custom_repo_template_readme.jinja index 228a467bf..060b8ec6e 100644 --- a/salt/soc/files/soc/detections_custom_repo_template_readme.jinja +++ b/salt/soc/files/soc/detections_custom_repo_template_readme.jinja @@ -45,6 +45,61 @@ Finally, commit it: The next time the Strelka / YARA engine syncs, the new rule should be imported If there are errors, review the sync log to troubleshoot further. +{% elif repo_type == 'suricata' %} +# Suricata Local Custom Rules Repository + +This folder has already been initialized as a git repo +and your Security Onion grid is configured to import any Suricata rule files found here. + +Just add your rule file and commit it. + +For example: + +** Note: If this is your first time making changes to this repo, you may run into the following error: + +fatal: detected dubious ownership in repository at '/nsm/rules/custom-local-repos/local-suricata' +To add an exception for this directory, call: + git config --global --add safe.directory /nsm/rules/custom-local-repos/local-suricata + +This means that the user you are running commands as does not match the user that is used for this git repo (socore). +You will need to make sure your rule files are accessible to the socore user, so either su to socore +or add the exception and then chown the rule files later. + +Also, you will be asked to set some configuration: +``` +Author identity unknown +*** Please tell me who you are. +Run + git config --global user.email "you@example.com" + git config --global user.name "Your Name" +to set your account's default identity. +Omit --global to set the identity only in this repository. +``` + +Run these commands, ommitting the `--global`. + +With that out of the way: + +First, create the rule file with a .rules extension: +`vi my_custom_rules.rules` + +Next, use git to stage the new rule to be committed: +`git add my_custom_rules.rules` + +Finally, commit it: +`git commit -m "Initial commit of my_custom_rule.rules"` + +The next time the Suricata engine syncs, the new rule/s should be imported +If there are errors, review the sync log to troubleshoot further. + +{% elif repo_type == 'suricata-etpro' %} +# Suricata ETPRO - Airgap + +This folder has been initialized for use with ETPRO during Airgap deployment. + +Just add your ETPRO rule/s file to this folder and the Suricata engine will import them. + +If there are errors, review the sync log to troubleshoot further. {% elif repo_type == 'sigma' %} # Sigma Local Custom Rules Repository diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index b43ccaf1b..cd23a17f3 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -90,6 +90,17 @@ {% endif %} {% endif %} +{# Enable SO_FILTERS and SO_EXTRACTIONS when Suricata is the metadata engine #} +{% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %} +{% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %} +{% for ruleset in SOCMERGED.config.server.modules.suricataengine.rulesetSources %} +{% if ruleset.name in ['SO_FILTERS', 'SO_EXTRACTIONS'] and GLOBALS.md_engine == 'SURICATA' %} +{% do ruleset.update({'enabled': true}) %} +{% endif %} +{% endfor %} +{% endif %} +{% endif %} + {# Transform Emerging-Threats ruleset based on license key #} {% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %} {% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %} diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 7ce605e0b..46f5b1d7e 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -126,6 +126,20 @@ surirulesync: - group: 940 - show_changes: False +suriextractionrules: + file.managed: + - name: /nsm/rules/suricata/so_extraction.rules + - source: salt://suricata/files/so_extraction.rules + - user: 939 + - group: 939 + +surifiltersrules: + file.managed: + - name: /nsm/rules/suricata/so_filters.rules + - source: salt://suricata/files/so_filters.rules + - user: 939 + - group: 939 + surilogscript: file.managed: - name: /usr/local/bin/surilogcompress diff --git a/salt/suricata/files/so_extraction.rules b/salt/suricata/files/so_extraction.rules new file mode 100644 index 000000000..d43812144 --- /dev/null +++ b/salt/suricata/files/so_extraction.rules @@ -0,0 +1,26 @@ +# Extract all PDF mime type +alert http any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100000; rev:1;) +alert smtp any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100001; rev:1;) +alert nfs any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100002; rev:1;) +alert smb any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100003; rev:1;) +# Extract EXE/DLL file types +alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100004; rev:1;) +alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100005; rev:1;) +alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100006; rev:1;) +alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100007; rev:1;) +alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100008; rev:1;) +alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100009; rev:1;) +alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100010; rev:1;) +alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100011; rev:1;) + +# Extract all Zip files +alert http any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100012; rev:1;) +alert smtp any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100013; rev:1;) +alert nfs any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100014; rev:1;) +alert smb any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100015; rev:1;) + +# Extract Word Docs +alert http any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100016; rev:1;) +alert smtp any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100017; rev:1;) +alert nfs any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100018; rev:1;) +alert smb any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100019; rev:1;) diff --git a/salt/suricata/files/so_filters.rules b/salt/suricata/files/so_filters.rules new file mode 100644 index 000000000..c49eaec26 --- /dev/null +++ b/salt/suricata/files/so_filters.rules @@ -0,0 +1,12 @@ +# Start the filters at sid 1200000 +# Example of filtering out *google.com from being in the dns log. +#config dns any any -> any any (dns.query; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200000;) +# Example of filtering out *google.com from being in the http log. +#config http any any -> any any (http.host; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200001;) +# Example of filtering out someuseragent from being in the http log. +#config http any any -> any any (http.user_agent; content:"someuseragent"; config: logging disable, type tx, scope tx; sid:1200002;) +# Example of filtering out Google's certificate from being in the ssl log. +#config tls any any -> any any (tls.fingerprint; content:"4f:a4:5e:58:7e:d9:db:20:09:d7:b6:c7:ff:58:c4:7b:dc:3f:55:b4"; config: logging disable, type tx, scope tx; sid:1200003;) +# Example of filtering out a md5 of a file from being in the files log. +#config fileinfo any any -> any any (fileinfo.filemd5; content:"7a125dc69c82d5caf94d3913eecde4b5"; config: logging disable, type tx, scope tx; sid:1200004;) + From 1284150382791b919c0ebd415a6da4f55d546563 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 27 Nov 2025 08:39:19 -0500 Subject: [PATCH 28/67] Move to manager init --- salt/manager/init.sls | 22 ++++++++++++++++++++++ salt/suricata/config.sls | 14 -------------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index da829c1ce..cf97a6f0b 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -211,6 +211,28 @@ git_config_set_safe_dirs: - /opt/so/conf/soc/ai_summary_repos/securityonion-resources - /nsm/airgap-resources/playbooks - /opt/so/conf/soc/playbooks + +surinsmrulesdir: + file.directory: + - name: /nsm/rules/suricata + - user: 939 + - group: 939 + - makedirs: True + +suriextractionrules: + file.managed: + - name: /nsm/rules/suricata/so_extraction.rules + - source: salt://suricata/files/so_extraction.rules + - user: 939 + - group: 939 + +surifiltersrules: + file.managed: + - name: /nsm/rules/suricata/so_filters.rules + - source: salt://suricata/files/so_filters.rules + - user: 939 + - group: 939 + {% else %} {{sls}}_state_not_allowed: diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 46f5b1d7e..7ce605e0b 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -126,20 +126,6 @@ surirulesync: - group: 940 - show_changes: False -suriextractionrules: - file.managed: - - name: /nsm/rules/suricata/so_extraction.rules - - source: salt://suricata/files/so_extraction.rules - - user: 939 - - group: 939 - -surifiltersrules: - file.managed: - - name: /nsm/rules/suricata/so_filters.rules - - source: salt://suricata/files/so_filters.rules - - user: 939 - - group: 939 - surilogscript: file.managed: - name: /usr/local/bin/surilogcompress From 89a9106d79e002747e9e526d9046f61ea5d7107b Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Sat, 29 Nov 2025 15:17:28 -0500 Subject: [PATCH 29/67] Add context --- salt/manager/tools/sbin/soup | 9 +++++---- salt/soc/merged.map.jinja | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index f55395691..ba954ca6f 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1234,11 +1234,12 @@ check_config_file() { echo "Does not match known default - custom configuration detected" echo "Custom $file_display_name detected (hash: $file_hash)" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock - # If this is so-rule-update, check for ETPRO key + # If this is so-rule-update, check for ETPRO license code and write out to the syncBlock file + # If ETPRO is enabled, the license code already exists in the so-rule-update script, this is just making it easier to migrate if [[ "$file_display_name" == "so-rule-update" ]]; then - etpro_key=$(grep -oP '\-\-etpro=\K[0-9a-fA-F]+' "$file" 2>/dev/null || true) - if [[ -n "$etpro_key" ]]; then - echo "ETPRO key found: $etpro_key" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + etpro_code=$(grep -oP '\-\-etpro=\K[0-9a-fA-F]+' "$file" 2>/dev/null || true) + if [[ -n "$etpro_code" ]]; then + echo "ETPRO code found: $etpro_code" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock fi fi diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index cd23a17f3..e1532462c 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -108,6 +108,7 @@ {% if ruleset.name == 'Emerging-Threats' %} {% if ruleset.licenseKey and ruleset.licenseKey != '' %} {# License key is defined - transform to ETPRO #} +{# Engine Version is hardcoded in the URL - this does not change often: https://community.emergingthreats.net/t/supported-engines/71 #} {% do ruleset.update({ 'name': 'ETPRO', 'sourcePath': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz', From 87477ae4f66903d0fcf08a744bd74365b0b2af47 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Sat, 29 Nov 2025 15:40:10 -0500 Subject: [PATCH 30/67] Removed uneeded bind --- salt/soc/enabled.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 0319c6c81..62e673ffc 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -26,7 +26,6 @@ so-soc: - /nsm/rules:/nsm/rules:rw - /opt/so/conf/strelka:/opt/sensoroni/yara:rw - /opt/so/conf/sigma:/opt/sensoroni/sigma:rw - - /opt/so/conf/suricata:/opt/sensoroni/suricata:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw - /opt/so/saltstack/local/salt/suricata/rules:/opt/sensoroni/suricata/rules:rw - /opt/so/saltstack/local/salt/suricata/files:/opt/sensoroni/suricata/threshold:rw From 65c96b2edf07d05a43317786e5afd6e45b2c1c26 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Sat, 29 Nov 2025 16:27:22 -0500 Subject: [PATCH 31/67] Add error handling --- salt/manager/tools/sbin/soup | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index ba954ca6f..af0222414 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1124,10 +1124,17 @@ if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then # Verify backup by comparing counts echo "Verifying detection overrides backup..." - es_override_count=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -k -L \ - "https://localhost:9200/so-detection/_count" \ - -H "Content-Type: application/json" \ - -d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') + es_override_count=$(/sbin/so-elasticsearch-query 'so-detection/_count' \ + -H 'Content-Type: application/json' \ + -d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') || { + echo " Error: Failed to query Elasticsearch for override count" + exit 1 + } + + if [[ ! "$es_override_count" =~ ^[0-9]+$ ]]; then + echo " Error: Invalid override count from Elasticsearch: '$es_override_count'" + exit 1 + fi backup_override_count=$(find /nsm/backup/detections/repo/*/overrides -type f 2>/dev/null | wc -l) From e96cfd35f70b115435f494933f45d1f4fbe25408 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Sat, 29 Nov 2025 17:00:51 -0500 Subject: [PATCH 32/67] Refactor for simplicity --- salt/manager/tools/sbin/soup | 61 +++++++++++++----------------------- 1 file changed, 22 insertions(+), 39 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index af0222414..f2e584bf6 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1162,36 +1162,22 @@ suricata_idstools_removal_post() { echo "Checking idstools configuration for custom modifications..." -# Normalize file content for consistent hashing +# Normalize and hash file content for consistent comparison # Args: $1 - file path -normalize_file() { +# Outputs: SHA256 hash to stdout +# Returns: 0 on success, 1 on failure +hash_normalized_file() { local file="$1" - if [[ ! -f "$file" ]]; then - echo "FILE_NOT_FOUND" + if [[ ! -r "$file" ]]; then return 1 fi - # Strip whitespace, normalize hostname, remove blank lines sed -E \ -e 's/^[[:space:]]+//; s/[[:space:]]+$//' \ -e '/^$/d' \ -e 's|--url=http://[^:]+:7788|--url=http://MANAGER:7788|' \ - "$file" -} - -# Hash normalized content -hash_file() { - local file="$1" - - local normalized=$(normalize_file "$file") - - if [[ "$normalized" == "FILE_NOT_FOUND" ]]; then - echo "FILE_NOT_FOUND" - return 1 - fi - - echo -n "$normalized" | sha256sum | awk '{print $1}' + "$file" | sha256sum | awk '{print $1}' } # Known-default hashes @@ -1213,47 +1199,44 @@ check_config_file() { if [[ ! -f "$file" ]]; then echo "Warning: $file not found" + echo "$file_display_name not found - manual verification required" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock return 1 fi echo "Hashing $file..." - local file_hash=$(hash_file "$file") - - if [[ "$file_hash" == "FILE_NOT_FOUND" ]]; then + local file_hash + if ! file_hash=$(hash_normalized_file "$file"); then echo "Warning: Could not read $file" + echo "$file_display_name not readable - manual verification required" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock return 1 fi echo " Hash: $file_hash" # Check if hash matches any known default - local match_found=0 local -n known_hashes=$known_hashes_array for known_hash in "${known_hashes[@]}"; do if [[ "$file_hash" == "$known_hash" ]]; then - match_found=1 echo " Matches known default configuration" - break + return 0 fi done - if [[ $match_found -eq 0 ]]; then - echo "Does not match known default - custom configuration detected" - echo "Custom $file_display_name detected (hash: $file_hash)" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock + # No match - custom configuration detected + echo "Does not match known default - custom configuration detected" + echo "Custom $file_display_name detected (hash: $file_hash)" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock - # If this is so-rule-update, check for ETPRO license code and write out to the syncBlock file - # If ETPRO is enabled, the license code already exists in the so-rule-update script, this is just making it easier to migrate - if [[ "$file_display_name" == "so-rule-update" ]]; then - etpro_code=$(grep -oP '\-\-etpro=\K[0-9a-fA-F]+' "$file" 2>/dev/null || true) - if [[ -n "$etpro_code" ]]; then - echo "ETPRO code found: $etpro_code" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock - fi + # If this is so-rule-update, check for ETPRO license code and write out to the syncBlock file + # If ETPRO is enabled, the license code already exists in the so-rule-update script, this is just making it easier to migrate + if [[ "$file_display_name" == "so-rule-update" ]]; then + local etpro_code + etpro_code=$(grep -oP '\-\-etpro=\K[0-9a-fA-F]+' "$file" 2>/dev/null) || true + if [[ -n "$etpro_code" ]]; then + echo "ETPRO code found: $etpro_code" >> /opt/so/conf/soc/fingerprints/suricataengine.syncBlock fi - - return 1 fi - return 0 + return 1 } # Check so-rule-update and rulecat.conf From bda83a47a2f2f45af528e5da823b76aef34603a4 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Sat, 29 Nov 2025 17:45:22 -0500 Subject: [PATCH 33/67] Remove header --- salt/manager/tools/sbin/soup | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index f2e584bf6..59690c0bf 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1125,7 +1125,6 @@ if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then # Verify backup by comparing counts echo "Verifying detection overrides backup..." es_override_count=$(/sbin/so-elasticsearch-query 'so-detection/_count' \ - -H 'Content-Type: application/json' \ -d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') || { echo " Error: Failed to query Elasticsearch for override count" exit 1 From 999f83ce57cac79bb8b4a0dbb4daed4cfd83c6c1 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 1 Dec 2025 14:21:58 -0500 Subject: [PATCH 34/67] Create dir earlier --- salt/suricata/config.sls | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 7ce605e0b..59ae376dc 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -22,6 +22,14 @@ suriPCAPbpfcompilationfailure: {% endif %} {% endif %} +suridir: + file.directory: + - name: /opt/so/conf/suricata + - user: 940 + - group: 939 + - mode: 775 + - makedirs: True + # BPF applied to all of Suricata - alerts/metadata/pcap suribpf: file.managed: @@ -81,13 +89,6 @@ suricata_sbin_jinja: - file_mode: 755 - template: jinja -suridir: - file.directory: - - name: /opt/so/conf/suricata - - user: 940 - - group: 939 - - mode: 775 - suriruledir: file.directory: - name: /opt/so/rules/suricata From 45a8c0acd1d0ac6d68aafc23d7298748746f6770 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Dec 2025 11:16:08 -0600 Subject: [PATCH 35/67] merge 2.4/dev --- salt/_runners/setup_hypervisor.py | 175 ++++++++++++----- salt/elasticfleet/enabled.sls | 10 + .../so-elastic-fleet-outputs-update | 63 +++++- .../files/ingest/suricata.common | 183 +++++++++++++++--- .../templates/component/ecs/suricata.json | 4 + .../tools/sbin_jinja/so-kvm-create-volume | 15 +- .../engines/master/virtual_node_manager.py | 3 +- .../hypervisor/soc_hypervisor.yaml.jinja | 20 +- salt/zeek/files/config.zeek.ja4 | 2 + 9 files changed, 381 insertions(+), 94 deletions(-) diff --git a/salt/_runners/setup_hypervisor.py b/salt/_runners/setup_hypervisor.py index 929801783..182a9b2c8 100644 --- a/salt/_runners/setup_hypervisor.py +++ b/salt/_runners/setup_hypervisor.py @@ -172,7 +172,15 @@ MANAGER_HOSTNAME = socket.gethostname() def _download_image(): """ - Download and validate the Oracle Linux KVM image. + Download and validate the Oracle Linux KVM image with retry logic and progress monitoring. + + Features: + - Detects stalled downloads (no progress for 30 seconds) + - Retries up to 3 times on failure + - Connection timeout of 30 seconds + - Read timeout of 60 seconds + - Cleans up partial downloads on failure + Returns: bool: True if successful or file exists with valid checksum, False on error """ @@ -185,45 +193,107 @@ def _download_image(): os.unlink(IMAGE_PATH) log.info("Starting image download process") + + # Retry configuration + max_attempts = 3 + retry_delay = 5 # seconds to wait between retry attempts + stall_timeout = 30 # seconds without progress before considering download stalled + connection_timeout = 30 # seconds to establish connection + read_timeout = 60 # seconds to wait for data chunks + + for attempt in range(1, max_attempts + 1): + log.info("Download attempt %d of %d", attempt, max_attempts) + + try: + # Download file with timeouts + log.info("Downloading Oracle Linux KVM image from %s to %s", IMAGE_URL, IMAGE_PATH) + response = requests.get( + IMAGE_URL, + stream=True, + timeout=(connection_timeout, read_timeout) + ) + response.raise_for_status() - try: - # Download file - log.info("Downloading Oracle Linux KVM image from %s to %s", IMAGE_URL, IMAGE_PATH) - response = requests.get(IMAGE_URL, stream=True) - response.raise_for_status() + # Get total file size for progress tracking + total_size = int(response.headers.get('content-length', 0)) + downloaded_size = 0 + last_log_time = 0 + last_progress_time = time.time() + last_downloaded_size = 0 - # Get total file size for progress tracking - total_size = int(response.headers.get('content-length', 0)) - downloaded_size = 0 - last_log_time = 0 + # Save file with progress logging and stall detection + with salt.utils.files.fopen(IMAGE_PATH, 'wb') as f: + for chunk in response.iter_content(chunk_size=8192): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + downloaded_size += len(chunk) + current_time = time.time() + + # Check for stalled download + if downloaded_size > last_downloaded_size: + # Progress made, reset stall timer + last_progress_time = current_time + last_downloaded_size = downloaded_size + elif current_time - last_progress_time > stall_timeout: + # No progress for stall_timeout seconds + raise Exception( + f"Download stalled: no progress for {stall_timeout} seconds " + f"at {downloaded_size}/{total_size} bytes" + ) + + # Log progress every second + if current_time - last_log_time >= 1: + progress = (downloaded_size / total_size) * 100 if total_size > 0 else 0 + log.info("Progress - %.1f%% (%d/%d bytes)", + progress, downloaded_size, total_size) + last_log_time = current_time - # Save file with progress logging - with salt.utils.files.fopen(IMAGE_PATH, 'wb') as f: - for chunk in response.iter_content(chunk_size=8192): - f.write(chunk) - downloaded_size += len(chunk) + # Validate downloaded file + log.info("Download complete, validating checksum...") + if not _validate_image_checksum(IMAGE_PATH, IMAGE_SHA256): + log.error("Checksum validation failed on attempt %d", attempt) + os.unlink(IMAGE_PATH) + if attempt < max_attempts: + log.info("Will retry download...") + continue + else: + log.error("All download attempts failed due to checksum mismatch") + return False + + log.info("Successfully downloaded and validated Oracle Linux KVM image") + return True + + except requests.exceptions.Timeout as e: + log.error("Download attempt %d failed: Timeout - %s", attempt, str(e)) + if os.path.exists(IMAGE_PATH): + os.unlink(IMAGE_PATH) + if attempt < max_attempts: + log.info("Will retry download in %d seconds...", retry_delay) + time.sleep(retry_delay) + else: + log.error("All download attempts failed due to timeout") - # Log progress every second - current_time = time.time() - if current_time - last_log_time >= 1: - progress = (downloaded_size / total_size) * 100 if total_size > 0 else 0 - log.info("Progress - %.1f%% (%d/%d bytes)", - progress, downloaded_size, total_size) - last_log_time = current_time - - # Validate downloaded file - if not _validate_image_checksum(IMAGE_PATH, IMAGE_SHA256): - os.unlink(IMAGE_PATH) - return False - - log.info("Successfully downloaded and validated Oracle Linux KVM image") - return True - - except Exception as e: - log.error("Error downloading hypervisor image: %s", str(e)) - if os.path.exists(IMAGE_PATH): - os.unlink(IMAGE_PATH) - return False + except requests.exceptions.RequestException as e: + log.error("Download attempt %d failed: Network error - %s", attempt, str(e)) + if os.path.exists(IMAGE_PATH): + os.unlink(IMAGE_PATH) + if attempt < max_attempts: + log.info("Will retry download in %d seconds...", retry_delay) + time.sleep(retry_delay) + else: + log.error("All download attempts failed due to network errors") + + except Exception as e: + log.error("Download attempt %d failed: %s", attempt, str(e)) + if os.path.exists(IMAGE_PATH): + os.unlink(IMAGE_PATH) + if attempt < max_attempts: + log.info("Will retry download in %d seconds...", retry_delay) + time.sleep(retry_delay) + else: + log.error("All download attempts failed") + + return False def _check_ssh_keys_exist(): """ @@ -419,25 +489,28 @@ def _ensure_hypervisor_host_dir(minion_id: str = None): log.error(f"Error creating hypervisor host directory: {str(e)}") return False -def _apply_dyanno_hypervisor_state(): +def _apply_dyanno_hypervisor_state(status): """ Apply the soc.dyanno.hypervisor state on the salt master. This function applies the soc.dyanno.hypervisor state on the salt master to update the hypervisor annotation and ensure all hypervisor host directories exist. + Args: + status: Status passed to the hypervisor annotation state + Returns: bool: True if state was applied successfully, False otherwise """ try: - log.info("Applying soc.dyanno.hypervisor state on salt master") + log.info(f"Applying soc.dyanno.hypervisor state on salt master with status: {status}") # Initialize the LocalClient local = salt.client.LocalClient() # Target the salt master to apply the soc.dyanno.hypervisor state target = MANAGER_HOSTNAME + '_*' - state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', "pillar={'baseDomain': {'status': 'PreInit'}}", 'concurrent=True'], tgt_type='glob') + state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', f"pillar={{'baseDomain': {{'status': '{status}'}}}}", 'concurrent=True'], tgt_type='glob') log.debug(f"state_result: {state_result}") # Check if state was applied successfully if state_result: @@ -454,17 +527,17 @@ def _apply_dyanno_hypervisor_state(): success = False if success: - log.info("Successfully applied soc.dyanno.hypervisor state") + log.info(f"Successfully applied soc.dyanno.hypervisor state with status: {status}") return True else: - log.error("Failed to apply soc.dyanno.hypervisor state") + log.error(f"Failed to apply soc.dyanno.hypervisor state with status: {status}") return False else: - log.error("No response from salt master when applying soc.dyanno.hypervisor state") + log.error(f"No response from salt master when applying soc.dyanno.hypervisor state with status: {status}") return False except Exception as e: - log.error(f"Error applying soc.dyanno.hypervisor state: {str(e)}") + log.error(f"Error applying soc.dyanno.hypervisor state with status: {status}: {str(e)}") return False def _apply_cloud_config_state(): @@ -598,11 +671,6 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id log.warning("Failed to apply salt.cloud.config state, continuing with setup") # We don't return an error here as we want to continue with the setup process - # Apply the soc.dyanno.hypervisor state on the salt master - if not _apply_dyanno_hypervisor_state(): - log.warning("Failed to apply soc.dyanno.hypervisor state, continuing with setup") - # We don't return an error here as we want to continue with the setup process - log.info("Starting setup_environment in setup_hypervisor runner") # Check if environment is already set up @@ -616,9 +684,12 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id # Handle image setup if needed if not image_valid: + _apply_dyanno_hypervisor_state('ImageDownloadStart') log.info("Starting image download/validation process") if not _download_image(): log.error("Image download failed") + # Update hypervisor annotation with failure status + _apply_dyanno_hypervisor_state('ImageDownloadFailed') return { 'success': False, 'error': 'Image download failed', @@ -631,6 +702,8 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id log.info("Setting up SSH keys") if not _setup_ssh_keys(): log.error("SSH key setup failed") + # Update hypervisor annotation with failure status + _apply_dyanno_hypervisor_state('SSHKeySetupFailed') return { 'success': False, 'error': 'SSH key setup failed', @@ -655,6 +728,12 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id success = vm_result.get('success', False) log.info("Setup environment completed with status: %s", "SUCCESS" if success else "FAILED") + # Update hypervisor annotation with success status + if success: + _apply_dyanno_hypervisor_state('PreInit') + else: + _apply_dyanno_hypervisor_state('SetupFailed') + # If setup was successful and we have a minion_id, run highstate if success and minion_id: log.info("Running highstate on hypervisor %s", minion_id) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index cef47168f..ec8c8337e 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -32,6 +32,16 @@ so-elastic-fleet-auto-configure-logstash-outputs: - retry: attempts: 4 interval: 30 + +{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #} +so-elastic-fleet-auto-configure-logstash-outputs-force: + cmd.run: + - name: /usr/sbin/so-elastic-fleet-outputs-update --force --certs + - retry: + attempts: 4 + interval: 30 + - onchanges: + - x509: etc_elasticfleet_logstash_crt {% endif %} # If enabled, automatically update Fleet Server URLs & ES Connection diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index de9b5f93f..c64d022a4 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -10,6 +10,26 @@ . /usr/sbin/so-common FORCE_UPDATE=false +UPDATE_CERTS=false + +while [[ $# -gt 0 ]]; do + case $1 in + -f|--force) + FORCE_UPDATE=true + shift + ;; + -c| --certs) + UPDATE_CERTS=true + shift + ;; + *) + echo "Unknown option $1" + echo "Usage: $0 [-f|--force] [-c|--certs]" + exit 1 + ;; + esac +done + # Only run on Managers if ! is_manager_node; then printf "Not a Manager Node... Exiting" @@ -19,17 +39,42 @@ fi function update_logstash_outputs() { if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl') + LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key) + LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt) + LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then - JSON_STRING=$(jq -n \ - --arg UPDATEDLIST "$NEW_LIST_JSON" \ - --argjson SECRETS "$SECRETS" \ - --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + if [[ "$UPDATE_CERTS" != "true" ]]; then + # Reuse existing secret + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --argjson SECRETS "$SECRETS" \ + --argjson SSL_CONFIG "$SSL_CONFIG" \ + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + else + # Update certs, creating new secret + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg LOGSTASHKEY "$LOGSTASHKEY" \ + --arg LOGSTASHCRT "$LOGSTASHCRT" \ + --arg LOGSTASHCA "$LOGSTASHCA" \ + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}') + fi else - JSON_STRING=$(jq -n \ - --arg UPDATEDLIST "$NEW_LIST_JSON" \ - --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}') + if [[ "$UPDATE_CERTS" != "true" ]]; then + # Reuse existing ssl config + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --argjson SSL_CONFIG "$SSL_CONFIG" \ + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": $SSL_CONFIG}') + else + # Update ssl config + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg LOGSTASHKEY "$LOGSTASHKEY" \ + --arg LOGSTASHCRT "$LOGSTASHCRT" \ + --arg LOGSTASHCA "$LOGSTASHCA" \ + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}') + fi fi fi diff --git a/salt/elasticsearch/files/ingest/suricata.common b/salt/elasticsearch/files/ingest/suricata.common index 102b5dac8..7b2dc7eeb 100644 --- a/salt/elasticsearch/files/ingest/suricata.common +++ b/salt/elasticsearch/files/ingest/suricata.common @@ -1,30 +1,155 @@ { - "description" : "suricata.common", - "processors" : [ - { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, - { "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } }, - { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } }, - { "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } }, - { "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } }, - { "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } }, - { "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } }, - { "rename": { "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } }, - { "rename": { "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } }, - { "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } }, - { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } }, - { "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } }, - { "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } }, - { "set": { "field": "observer.name", "value": "{{agent.name}}" } }, - { "set": { "field": "event.ingested", "value": "{{@timestamp}}" } }, - { "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } }, - { "remove":{ "field": "agent", "ignore_failure": true } }, - {"append":{"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"ignore_failure":true}}, - { - "script": { - "source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }", - "ignore_failure": false - } - }, - { "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}" } } - ] -} + "description": "suricata.common", + "processors": [ + { + "json": { + "field": "message", + "target_field": "message2", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.pkt_src", + "target_field": "network.packet_source", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.proto", + "target_field": "network.transport", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.in_iface", + "target_field": "observer.ingress.interface.name", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.flow_id", + "target_field": "log.id.uid", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.src_ip", + "target_field": "source.ip", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.src_port", + "target_field": "source.port", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.dest_ip", + "target_field": "destination.ip", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.dest_port", + "target_field": "destination.port", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.vlan", + "target_field": "network.vlan.id", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.community_id", + "target_field": "network.community_id", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.xff", + "target_field": "xff.ip", + "ignore_missing": true + } + }, + { + "set": { + "field": "event.dataset", + "value": "{{ message2.event_type }}" + } + }, + { + "set": { + "field": "observer.name", + "value": "{{agent.name}}" + } + }, + { + "set": { + "field": "event.ingested", + "value": "{{@timestamp}}" + } + }, + { + "date": { + "field": "message2.timestamp", + "target_field": "@timestamp", + "formats": [ + "ISO8601", + "UNIX" + ], + "timezone": "UTC", + "ignore_failure": true + } + }, + { + "remove": { + "field": "agent", + "ignore_failure": true + } + }, + { + "append": { + "field": "related.ip", + "value": [ + "{{source.ip}}", + "{{destination.ip}}" + ], + "allow_duplicates": false, + "ignore_failure": true + } + }, + { + "script": { + "source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }", + "ignore_failure": false + } + }, + { + "rename": { + "field": "message2.capture_file", + "target_field": "suricata.capture_file", + "ignore_missing": true + } + }, + { + "pipeline": { + "if": "ctx?.event?.dataset != null", + "name": "suricata.{{event.dataset}}" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/templates/component/ecs/suricata.json b/salt/elasticsearch/templates/component/ecs/suricata.json index 1eb06d266..3f393ff6a 100644 --- a/salt/elasticsearch/templates/component/ecs/suricata.json +++ b/salt/elasticsearch/templates/component/ecs/suricata.json @@ -841,6 +841,10 @@ "type": "long" } } + }, + "capture_file": { + "type": "keyword", + "ignore_above": 1024 } } } diff --git a/salt/hypervisor/tools/sbin_jinja/so-kvm-create-volume b/salt/hypervisor/tools/sbin_jinja/so-kvm-create-volume index 2322c3a94..601de643f 100644 --- a/salt/hypervisor/tools/sbin_jinja/so-kvm-create-volume +++ b/salt/hypervisor/tools/sbin_jinja/so-kvm-create-volume @@ -45,7 +45,7 @@ used during VM provisioning to add dedicated NSM storage volumes. This command creates and attaches a volume with the following settings: - VM Name: `vm1_sensor` - Volume Size: `500` GB - - Volume Path: `/nsm/libvirt/volumes/vm1_sensor-nsm.img` + - Volume Path: `/nsm/libvirt/volumes/vm1_sensor-nsm-.img` - Device: `/dev/vdb` (virtio-blk) - VM remains stopped after attachment @@ -75,7 +75,8 @@ used during VM provisioning to add dedicated NSM storage volumes. - The script automatically stops the VM if it's running before creating and attaching the volume. - Volumes are created with full pre-allocation for optimal performance. -- Volume files are stored in `/nsm/libvirt/volumes/` with naming pattern `-nsm.img`. +- Volume files are stored in `/nsm/libvirt/volumes/` with naming pattern `-nsm-.img`. +- The epoch timestamp ensures unique volume names and prevents conflicts. - Volumes are attached as `/dev/vdb` using virtio-blk for high performance. - The script checks available disk space before creating the volume. - Ownership is set to `qemu:qemu` with permissions `640`. @@ -142,6 +143,7 @@ import socket import subprocess import pwd import grp +import time import xml.etree.ElementTree as ET from io import StringIO from so_vm_utils import start_vm, stop_vm @@ -242,10 +244,13 @@ def create_volume_file(vm_name, size_gb, logger): Raises: VolumeCreationError: If volume creation fails """ - # Define volume path (directory already created in main()) - volume_path = os.path.join(VOLUME_DIR, f"{vm_name}-nsm.img") + # Generate epoch timestamp for unique volume naming + epoch_timestamp = int(time.time()) - # Check if volume already exists + # Define volume path with epoch timestamp for uniqueness + volume_path = os.path.join(VOLUME_DIR, f"{vm_name}-nsm-{epoch_timestamp}.img") + + # Check if volume already exists (shouldn't be possible with timestamp) if os.path.exists(volume_path): logger.error(f"VOLUME: Volume already exists: {volume_path}") raise VolumeCreationError(f"Volume already exists: {volume_path}") diff --git a/salt/salt/engines/master/virtual_node_manager.py b/salt/salt/engines/master/virtual_node_manager.py index 6d88bd688..ccc063d64 100644 --- a/salt/salt/engines/master/virtual_node_manager.py +++ b/salt/salt/engines/master/virtual_node_manager.py @@ -727,7 +727,8 @@ def check_hypervisor_disk_space(hypervisor: str, size_gb: int) -> Tuple[bool, Op result = local.cmd( hypervisor_minion, 'cmd.run', - ["df -BG /nsm/libvirt/volumes | tail -1 | awk '{print $4}' | sed 's/G//'"] + ["df -BG /nsm/libvirt/volumes | tail -1 | awk '{print $4}' | sed 's/G//'"], + kwarg={'python_shell': True} ) if not result or hypervisor_minion not in result: diff --git a/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja b/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja index ac2fd6fea..f23fdb5d9 100644 --- a/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja +++ b/salt/soc/dyanno/hypervisor/soc_hypervisor.yaml.jinja @@ -43,10 +43,26 @@ No Virtual Machines Found {%- endif %} -{%- else %} +{%- elif baseDomainStatus == 'ImageDownloadStart' %} +#### INFO + +Base domain image download started. +{%- elif baseDomainStatus == 'ImageDownloadFailed' %} +#### ERROR + +Base domain image download failed. Please check the salt-master log for details and verify network connectivity. +{%- elif baseDomainStatus == 'SSHKeySetupFailed' %} +#### ERROR + +SSH key setup failed. Please check the salt-master log for details. +{%- elif baseDomainStatus == 'SetupFailed' %} #### WARNING -Base domain has not been initialized. +Setup failed. Please check the salt-master log for details. +{%- elif baseDomainStatus == 'PreInit' %} +#### WARNING + +Base domain has not been initialized. Waiting for hypervisor to highstate. {%- endif %} {%- endmacro -%} diff --git a/salt/zeek/files/config.zeek.ja4 b/salt/zeek/files/config.zeek.ja4 index e3dd08a48..3d0035481 100644 --- a/salt/zeek/files/config.zeek.ja4 +++ b/salt/zeek/files/config.zeek.ja4 @@ -11,6 +11,8 @@ export { option JA4S_enabled: bool = F; option JA4S_raw: bool = F; + option JA4D_enabled: bool = F; + option JA4H_enabled: bool = F; option JA4H_raw: bool = F; From 8abd4c9c78a8caaa258c7837cfc89244e0f974f3 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 2 Dec 2025 12:42:15 -0500 Subject: [PATCH 36/67] Remove idstools files --- salt/manager/tools/sbin/soup | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 59690c0bf..eb424ba72 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1255,6 +1255,24 @@ else echo "Custom idstools configuration detected - syncBlock remains in place" echo "Review /opt/so/conf/soc/fingerprints/suricataengine.syncBlock for details" fi + +echo "Cleaning up idstools" +echo "Stopping and removing the idstools container..." +if [ -n "$(docker ps -q -f name=^so-idstools$)" ]; then + image_name=$(docker ps -a --filter name=^so-idstools$ --format '{{.Image}}' 2>/dev/null || true) + docker stop so-idstools || echo "Warning: failed to stop so-idstools container" + docker rm so-idstools || echo "Warning: failed to remove so-idstools container" + + if [[ -n "$image_name" ]]; then + echo "Removing idstools image: $image_name" + docker rmi "$image_name" || echo "Warning: failed to remove image $image_name" + fi +fi + +echo "Removing idstools symlink and scripts..." +rm /opt/so/saltstack/local/salt/suricata/rules +rm -rf /usr/sbin/so-idstools* + } determine_elastic_agent_upgrade() { From 18accae47ea56c4d9e58fdf19cd5ae07f9e4ee4e Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Dec 2025 15:10:29 -0600 Subject: [PATCH 37/67] annotation typo --- salt/elasticfleet/soc_elasticfleet.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/soc_elasticfleet.yaml b/salt/elasticfleet/soc_elasticfleet.yaml index d7c324855..d78189f96 100644 --- a/salt/elasticfleet/soc_elasticfleet.yaml +++ b/salt/elasticfleet/soc_elasticfleet.yaml @@ -83,7 +83,7 @@ elasticfleet: advanced: True global: True helpLink: elastic-fleet.html - compression: + compression_level: description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression). regex: ^[1-9]$ forcedType: int From b0d9426f1be143ef8b7768f2707746a563a99244 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Dec 2025 15:11:00 -0600 Subject: [PATCH 38/67] automated cert update for kafka fleet output policy --- salt/elasticfleet/enabled.sls | 1 + .../so-elastic-fleet-outputs-update | 47 ++++++++++++++----- 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index ec8c8337e..25fca759d 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -42,6 +42,7 @@ so-elastic-fleet-auto-configure-logstash-outputs-force: interval: 30 - onchanges: - x509: etc_elasticfleet_logstash_crt + - x509: elasticfleet_kafka_crt {% endif %} # If enabled, automatically update Fleet Server URLs & ES Connection diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index c64d022a4..715d53a3b 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -85,19 +85,42 @@ function update_kafka_outputs() { # Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl') + KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) + KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) + KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then - # Update policy when fleet has secrets enabled - JSON_STRING=$(jq -n \ - --arg UPDATEDLIST "$NEW_LIST_JSON" \ - --argjson SSL_CONFIG "$SSL_CONFIG" \ - --argjson SECRETS "$SECRETS" \ - '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + if [[ "$UPDATE_CERTS" != "true" ]]; then + # Update policy when fleet has secrets enabled + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --argjson SSL_CONFIG "$SSL_CONFIG" \ + --argjson SECRETS "$SECRETS" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + else + # Update certs, creating new secret + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg KAFKAKEY "$KAFKAKEY" \ + --arg KAFKACRT "$KAFKACRT" \ + --arg KAFKACA "$KAFKACA" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": {"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"secrets": {"ssl":{"key": $KAFKAKEY }}}') + fi else - # Update policy when fleet has secrets disabled or policy hasn't been force updated - JSON_STRING=$(jq -n \ - --arg UPDATEDLIST "$NEW_LIST_JSON" \ - --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}') + if [[ "$UPDATE_CERTS" != "true" ]]; then + # Update policy when fleet has secrets disabled or policy hasn't been force updated + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --argjson SSL_CONFIG "$SSL_CONFIG" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}') + else + # Update ssl config + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg KAFKAKEY "$KAFKAKEY" \ + --arg KAFKACRT "$KAFKACRT" \ + --arg KAFKACA "$KAFKACA" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }}') + fi fi # Update Kafka outputs curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq @@ -120,7 +143,7 @@ function update_kafka_outputs() { # Get the current list of kafka outputs & hash them CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") - CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}') declare -a NEW_LIST=() From 877444ac29c50093634b323ff1fd3f1ac14f6b0a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Dec 2025 15:16:59 -0600 Subject: [PATCH 39/67] cert update is a forced update --- salt/elasticfleet/enabled.sls | 2 +- .../tools/sbin_jinja/so-elastic-fleet-outputs-update | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index 25fca759d..db10a7182 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -36,7 +36,7 @@ so-elastic-fleet-auto-configure-logstash-outputs: {# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #} so-elastic-fleet-auto-configure-logstash-outputs-force: cmd.run: - - name: /usr/sbin/so-elastic-fleet-outputs-update --force --certs + - name: /usr/sbin/so-elastic-fleet-outputs-update --certs - retry: attempts: 4 interval: 30 diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 715d53a3b..281e05c59 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -20,6 +20,7 @@ while [[ $# -gt 0 ]]; do ;; -c| --certs) UPDATE_CERTS=true + FORCE_UPDATE=true shift ;; *) From 52f70dc49ae87b3f1f064c1091fd132c4afbc1ba Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 2 Dec 2025 17:40:30 -0500 Subject: [PATCH 40/67] Cleanup idstools --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index eb424ba72..2efc1603a 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1272,6 +1272,8 @@ fi echo "Removing idstools symlink and scripts..." rm /opt/so/saltstack/local/salt/suricata/rules rm -rf /usr/sbin/so-idstools* +sed -i '/^so-idstools$/d' /opt/so/conf/so-status/so-status.conf +so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids } From 23575fdf6c4173ae29c00ee5480d5bb10583f0d2 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 2 Dec 2025 19:19:57 -0500 Subject: [PATCH 41/67] edit actual file --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 2efc1603a..67262dcb0 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1272,7 +1272,7 @@ fi echo "Removing idstools symlink and scripts..." rm /opt/so/saltstack/local/salt/suricata/rules rm -rf /usr/sbin/so-idstools* -sed -i '/^so-idstools$/d' /opt/so/conf/so-status/so-status.conf +sed '/^so-idstools$/d' /opt/so/conf/so-status/so-status.conf so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids } From 41b3ac75540e59169effa32cc278e8504a19a2ba Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 2 Dec 2025 19:58:56 -0500 Subject: [PATCH 42/67] Backup salt master config --- salt/manager/tools/sbin/soup | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 67262dcb0..6cf5921b5 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1272,7 +1272,10 @@ fi echo "Removing idstools symlink and scripts..." rm /opt/so/saltstack/local/salt/suricata/rules rm -rf /usr/sbin/so-idstools* -sed '/^so-idstools$/d' /opt/so/conf/so-status/so-status.conf +sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf + +# Backup the salt master config before editing it +cp /etc/salt/master /nsm/backup/detections-migration/2-4-200 so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids } From 822c411e83730aec9939c4fd45ec6455bc15aff2 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 2 Dec 2025 21:24:24 -0500 Subject: [PATCH 43/67] Update version to 2.4.0-delta --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 86df31761..09e15369f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.200 +2.4.0-delta From f15a39c1535463c1ae2c5ca2ef45b1fbef9c7911 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 3 Dec 2025 11:24:04 -0500 Subject: [PATCH 44/67] Add historical hashes --- salt/common/tools/sbin_jinja/so-import-pcap | 2 +- salt/manager/tools/sbin/soup | 33 ++++++++++++++++++--- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/salt/common/tools/sbin_jinja/so-import-pcap b/salt/common/tools/sbin_jinja/so-import-pcap index b630df015..9171c4bc6 100755 --- a/salt/common/tools/sbin_jinja/so-import-pcap +++ b/salt/common/tools/sbin_jinja/so-import-pcap @@ -85,7 +85,7 @@ function suricata() { docker run --rm \ -v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \ -v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \ - -v /opt/so/conf/suricata/rules:/etc/suricata/rules:ro \ + -v /opt/so/rules/suricata/:/etc/suricata/rules:ro \ -v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${NSM_PATH}/:/nsm/:rw \ -v "$PCAP:/input.pcap:ro" \ diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 6cf5921b5..2ffed6af3 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1179,14 +1179,39 @@ hash_normalized_file() { "$file" | sha256sum | awk '{print $1}' } -# Known-default hashes +# Known-default hashes for so-rule-update (ETOPEN ruleset) KNOWN_SO_RULE_UPDATE_HASHES=( - "8f1fe1cb65c08aab78830315b952785c7ccdcc108c5c0474f427e29d4e39ee5f" # non-Airgap - "d23ac5a962c709dcb888103effb71444df72b46009b6c426e280dbfbc7d74d40" # Airgap + # 2.4.100+ (suricata 7.0.3, non-airgap) + "5fbd067ced86c8ec72ffb7e1798aa624123b536fb9d78f4b3ad8d3b45db1eae7" # 2.4.100-2.4.190 non-Airgap + # 2.4.90+ airgap (same for 2.4.90 and 2.4.100+) + "61f632c55791338c438c071040f1490066769bcce808b595b5cc7974a90e653a" # 2.4.90+ Airgap + # 2.4.90 (suricata 6.0, non-airgap, comment inside proxy block) + "0380ec52a05933244ab0f0bc506576e1d838483647b40612d5fe4b378e47aedd" # 2.4.90 non-Airgap + # 2.4.10-2.4.80 (suricata 6.0, non-airgap, comment outside proxy block) + "b6e4d1b5a78d57880ad038a9cd2cc6978aeb2dd27d48ea1a44dd866a2aee7ff4" # 2.4.10-2.4.80 non-Airgap + # 2.4.10-2.4.80 airgap + "b20146526ace2b142fde4664f1386a9a1defa319b3a1d113600ad33a1b037dad" # 2.4.10-2.4.80 Airgap + # 2.4.5 and earlier (no pidof check, non-airgap) + "d04f5e4015c348133d28a7840839e82d60009781eaaa1c66f7f67747703590dc" # 2.4.5 non-Airgap ) +# Known-default hashes for rulecat.conf KNOWN_RULECAT_CONF_HASHES=( - "17fc663a83b30d4ba43ac6643666b0c96343c5ea6ea833fe6a8362fe415b666b" # default + # 2.4.100+ (suricata 7.0.3) + "302e75dca9110807f09ade2eec3be1fcfc8b2bf6cf2252b0269bb72efeefe67e" # 2.4.100-2.4.190 without SURICATA md_engine + "8029b7718c324a9afa06a5cf180afde703da1277af4bdd30310a6cfa3d6398cb" # 2.4.100-2.4.190 with SURICATA md_engine + # 2.4.80-2.4.90 (suricata 6.0, with --suricata-version and --output) + "4d8b318e6950a6f60b02f307cf27c929efd39652990c1bd0c8820aa8a307e1e7" # 2.4.80-2.4.90 without SURICATA md_engine + "a1ddf264c86c4e91c81c5a317f745a19466d4311e4533ec3a3c91fed04c11678" # 2.4.80-2.4.90 with SURICATA md_engine + # 2.4.50-2.4.70 (/suri/ path, no --suricata-version) + "86e3afb8d0f00c62337195602636864c98580a13ca9cc85029661a539deae6ae" # 2.4.50-2.4.70 without SURICATA md_engine + "5a97604ca5b820a10273a2d6546bb5e00c5122ca5a7dfe0ba0bfbce5fc026f4b" # 2.4.50-2.4.70 with SURICATA md_engine + # 2.4.20-2.4.40 (/nids/ path without /suri/) + "d098ea9ecd94b5cca35bf33543f8ea8f48066a0785221fabda7fef43d2462c29" # 2.4.20-2.4.40 without SURICATA md_engine + "9dbc60df22ae20d65738ba42e620392577857038ba92278e23ec182081d191cd" # 2.4.20-2.4.40 with SURICATA md_engine + # 2.4.5-2.4.10 (/sorules/ path for extraction/filters) + "490f6843d9fca759ee74db3ada9c702e2440b8393f2cfaf07bbe41aaa6d955c3" # 2.4.5-2.4.10 with SURICATA md_engine + # Note: 2.4.5-2.4.10 without SURICATA md_engine has same hash as 2.4.20-2.4.40 without SURICATA md_engine ) # Check a config file against known hashes From 30487a54c137ecf0aacc86e780ca1792208d905a Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 3 Dec 2025 11:52:10 -0500 Subject: [PATCH 45/67] skip continue prompt if user cannot actually contine --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index a8414d0e8..8d1aafa0c 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -656,11 +656,11 @@ check_requirements() { fi if [[ $total_mem_hr -lt $req_mem ]]; then - whiptail_requirements_error "memory" "${total_mem_hr} GB" "${req_mem} GB" if [[ $is_standalone || $is_heavynode ]]; then echo "This install type will fail with less than $req_mem GB of memory. Exiting setup." exit 0 fi + whiptail_requirements_error "memory" "${total_mem_hr} GB" "${req_mem} GB" fi if [[ $is_standalone || $is_heavynode ]]; then if [[ $total_mem_hr -gt 15 && $total_mem_hr -lt 24 ]]; then From 847742091144e2832e345c8073dee94f1741b276 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 3 Dec 2025 20:10:06 -0600 Subject: [PATCH 46/67] logstash adv config state file --- .../so-elastic-fleet-outputs-update | 33 ++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 281e05c59..58baadca5 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -11,6 +11,8 @@ FORCE_UPDATE=false UPDATE_CERTS=false +LOGSTASH_PILLAR_CONFIG_YAML="{{ LOGSTASH_CONFIG_YAML }}" +LOGSTASH_PILLAR_STATE_FILE="/opt/so/state/esfleet_logstash_config_pillar" while [[ $# -gt 0 ]]; do case $1 in @@ -43,38 +45,45 @@ function update_logstash_outputs() { LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key) LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt) LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) + # Revert escaped \\n to \n for jq + LOGSTASH_PILLAR_CONFIG_YAML=$(printf '%b' "$LOGSTASH_PILLAR_CONFIG_YAML") + if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then if [[ "$UPDATE_CERTS" != "true" ]]; then # Reuse existing secret JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \ --argjson SECRETS "$SECRETS" \ --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG,"secrets": $SECRETS}') else # Update certs, creating new secret JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \ --arg LOGSTASHKEY "$LOGSTASHKEY" \ --arg LOGSTASHCRT "$LOGSTASHCRT" \ --arg LOGSTASHCA "$LOGSTASHCA" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}') fi else if [[ "$UPDATE_CERTS" != "true" ]]; then # Reuse existing ssl config JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \ --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": $SSL_CONFIG}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG}') else # Update ssl config JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \ --arg LOGSTASHKEY "$LOGSTASHKEY" \ --arg LOGSTASHCRT "$LOGSTASHCRT" \ --arg LOGSTASHCA "$LOGSTASHCA" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}') fi fi fi @@ -167,14 +176,14 @@ function update_kafka_outputs() { printf "Failed to query for current Logstash Outputs..." exit 1 fi - - CURRENT_LOGSTASH_ADV_CONFIG=$(jq -r '.item.config_yaml // ""' <<< "$RAW_JSON") - CURRENT_LOGSTASH_ADV_CONFIG_HASH=$(sha256sum <<< "$CURRENT_LOGSTASH_ADV_CONFIG" | awk '{print $1}') - NEW_LOGSTASH_ADV_CONFIG=$'{{ LOGSTASH_CONFIG_YAML }}' - NEW_LOGSTASH_ADV_CONFIG_HASH=$(sha256sum <<< "$NEW_LOGSTASH_ADV_CONFIG" | awk '{print $1}') - - if [ "$CURRENT_LOGSTASH_ADV_CONFIG_HASH" != "$NEW_LOGSTASH_ADV_CONFIG_HASH" ]; then - FORCE_UPDATE=true + # logstash adv config - compare pillar to last state file value + if [[ -f "$LOGSTASH_PILLAR_STATE_FILE" ]]; then + PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML=$(cat "$LOGSTASH_PILLAR_STATE_FILE") + if [[ "$LOGSTASH_PILLAR_CONFIG_YAML" != "$PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML" ]]; then + echo "Logstash pillar config has changed - forcing update" + FORCE_UPDATE=true + fi + echo "$LOGSTASH_PILLAR_CONFIG_YAML" > "$LOGSTASH_PILLAR_STATE_FILE" fi # Get the current list of Logstash outputs & hash them From 0b127582cbea904b88f17bf9b4023aeb887dc560 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 3 Dec 2025 20:49:25 -0600 Subject: [PATCH 47/67] 2.4.200 soup changes --- salt/manager/tools/sbin/soup | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 32553b5c3..885f9b521 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -426,6 +426,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170 [[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180 [[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190 + [[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200 true } @@ -457,6 +458,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170 [[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180 [[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190 + [[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200 true } @@ -636,6 +638,11 @@ post_to_2.4.190() { POSTVERSION=2.4.190 } +post_to_2.4.200() { + echo "Nothing to apply" + POSTVERSION=2.4.200 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -903,6 +910,12 @@ up_to_2.4.190() { INSTALLEDVERSION=2.4.190 } +up_to_2.4.200() { + touch /opt/so/state/esfleet_logstash_config_pillar + + INSTALLEDVERSION=2.4.200 +} + add_hydra_pillars() { mkdir -p /opt/so/saltstack/local/pillar/hydra touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls From 9304513ce82d7afb85ca5b156ef35f3b338bbf47 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 4 Dec 2025 12:26:13 -0500 Subject: [PATCH 48/67] Add support for suricata rules load status --- salt/suricata/config.sls | 8 +++++++ salt/suricata/cron/so-suricata-rulestats | 30 ++++++++++++++++++++++++ salt/suricata/enabled.sls | 12 ++++++++++ salt/telegraf/defaults.yaml | 4 ++++ salt/telegraf/scripts/surirules.sh | 30 ++++++++++++++++++++++++ 5 files changed, 84 insertions(+) create mode 100644 salt/suricata/cron/so-suricata-rulestats create mode 100644 salt/telegraf/scripts/surirules.sh diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 59ae376dc..2a4a051cf 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -178,6 +178,14 @@ so-suricata-eve-clean: - template: jinja - source: salt://suricata/cron/so-suricata-eve-clean +so-suricata-rulestats: + file.managed: + - name: /usr/sbin/so-suricata-rulestats + - user: root + - group: root + - mode: 755 + - source: salt://suricata/cron/so-suricata-rulestats + {% else %} {{sls}}_state_not_allowed: diff --git a/salt/suricata/cron/so-suricata-rulestats b/salt/suricata/cron/so-suricata-rulestats new file mode 100644 index 000000000..95b51c58a --- /dev/null +++ b/salt/suricata/cron/so-suricata-rulestats @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# Query Suricata for ruleset stats and reload time, write to JSON file for Telegraf to consume + +OUTFILE="/opt/so/log/suricata/rulestats.json" +SURICATASC="docker exec so-suricata /opt/suricata/bin/suricatasc" +SOCKET="/var/run/suricata/suricata-command.socket" + +query() { + timeout 10 $SURICATASC -c "$1" "$SOCKET" 2>/dev/null +} + +STATS=$(query "ruleset-stats") +RELOAD=$(query "ruleset-reload-time") + +if echo "$STATS" | jq -e '.return == "OK"' > /dev/null 2>&1; then + LOADED=$(echo "$STATS" | jq -r '.message[0].rules_loaded') + FAILED=$(echo "$STATS" | jq -r '.message[0].rules_failed') + LAST_RELOAD=$(echo "$RELOAD" | jq -r '.message[0].last_reload') + + jq -n --argjson loaded "$LOADED" --argjson failed "$FAILED" --arg reload "$LAST_RELOAD" \ + '{rules_loaded: $loaded, rules_failed: $failed, last_reload: $reload, return: "OK"}' > "$OUTFILE" +else + echo '{"return":"FAIL"}' > "$OUTFILE" +fi diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index 1576a0629..ec521abb3 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -90,6 +90,18 @@ clean_suricata_eve_files: - month: '*' - dayweek: '*' +# Add rulestats cron - runs every minute to query Suricata for rule load status +suricata_rulestats: + cron.present: + - name: /usr/sbin/so-suricata-rulestats > /dev/null 2>&1 + - identifier: suricata_rulestats + - user: root + - minute: '*' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' + {% else %} {{sls}}_state_not_allowed: diff --git a/salt/telegraf/defaults.yaml b/salt/telegraf/defaults.yaml index 79ad9008d..c0a67b0ca 100644 --- a/salt/telegraf/defaults.yaml +++ b/salt/telegraf/defaults.yaml @@ -21,6 +21,7 @@ telegraf: - sostatus.sh - stenoloss.sh - suriloss.sh + - surirules.sh - zeekcaptureloss.sh - zeekloss.sh standalone: @@ -36,6 +37,7 @@ telegraf: - sostatus.sh - stenoloss.sh - suriloss.sh + - surirules.sh - zeekcaptureloss.sh - zeekloss.sh - features.sh @@ -81,6 +83,7 @@ telegraf: - sostatus.sh - stenoloss.sh - suriloss.sh + - surirules.sh - zeekcaptureloss.sh - zeekloss.sh - features.sh @@ -95,6 +98,7 @@ telegraf: - sostatus.sh - stenoloss.sh - suriloss.sh + - surirules.sh - zeekcaptureloss.sh - zeekloss.sh idh: diff --git a/salt/telegraf/scripts/surirules.sh b/salt/telegraf/scripts/surirules.sh new file mode 100644 index 000000000..b38d5df26 --- /dev/null +++ b/salt/telegraf/scripts/surirules.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# Read Suricata ruleset stats from JSON file written by so-suricata-rulestats cron job +# JSON format: {"rules_loaded":45879,"rules_failed":1,"last_reload":"2025-12-04T14:10:57+0000","return":"OK"} +# or on failure: {"return":"FAIL"} + +# if this script isn't already running +if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then + + STATSFILE="/var/log/suricata/rulestats.json" + + # Check file exists, is less than 90 seconds old, and has valid data + if [ -f "$STATSFILE" ] && [ $(($(date +%s) - $(stat -c %Y "$STATSFILE"))) -lt 90 ] && jq -e '.return == "OK" and .rules_loaded != null and .rules_failed != null' "$STATSFILE" > /dev/null 2>&1; then + LOADED=$(jq -r '.rules_loaded' "$STATSFILE") + FAILED=$(jq -r '.rules_failed' "$STATSFILE") + RELOAD_TIME=$(jq -r '.last_reload // ""' "$STATSFILE") + + echo "surirules loaded=${LOADED}i,failed=${FAILED}i,reload_time=\"${RELOAD_TIME}\",status=\"ok\"" + else + echo "surirules loaded=0i,failed=0i,reload_time=\"\",status=\"unknown\"" + fi + +fi + +exit 0 From dba087ae256db4b3e58e164809d02faf31c553e9 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 5 Dec 2025 09:43:31 -0500 Subject: [PATCH 49/67] Update version from 2.4.0-delta to 2.4.200 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 09e15369f..86df31761 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.0-delta +2.4.200 From b7ad985c7a2bd7c671b9b9372f449ff5f9cb81b7 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 5 Dec 2025 09:48:46 -0500 Subject: [PATCH 50/67] Add cron.abset --- salt/suricata/config.sls | 2 +- salt/suricata/disabled.sls | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 2a4a051cf..b6796031f 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -124,7 +124,7 @@ surirulesync: - name: /opt/so/rules/suricata/ - source: salt://suricata/rules/ - user: 940 - - group: 940 + - group: 939 - show_changes: False surilogscript: diff --git a/salt/suricata/disabled.sls b/salt/suricata/disabled.sls index 49f8f93bf..e7a75867f 100644 --- a/salt/suricata/disabled.sls +++ b/salt/suricata/disabled.sls @@ -23,6 +23,11 @@ clean_suricata_eve_files: cron.absent: - identifier: clean_suricata_eve_files +# Remove rulestats cron +rulestats: + cron.absent: + - identifier: suricata_rulestats + {% else %} {{sls}}_state_not_allowed: From 3f9a9b7019bd1fb456d9c39efdbc16652214253c Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 5 Dec 2025 10:23:24 -0500 Subject: [PATCH 51/67] tweak threshold --- salt/suricata/config.sls | 1 - salt/suricata/files/threshold.conf | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 salt/suricata/files/threshold.conf diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index b6796031f..e0b85b7e7 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -160,7 +160,6 @@ surithresholding: - source: salt://suricata/files/threshold.conf - user: 940 - group: 940 - - onlyif: salt://suricata/files/threshold.conf suriclassifications: file.managed: diff --git a/salt/suricata/files/threshold.conf b/salt/suricata/files/threshold.conf new file mode 100644 index 000000000..a03ac31a3 --- /dev/null +++ b/salt/suricata/files/threshold.conf @@ -0,0 +1,2 @@ +# Threshold configuration generated by Security Onion +# This file is automatically generated - do not edit manually \ No newline at end of file From a6b19c4a6c5110860144108a3482c624b359dcb2 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 5 Dec 2025 12:13:05 -0500 Subject: [PATCH 52/67] Remove idstools config from manager pillar file --- salt/manager/tools/sbin/soup | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 2ffed6af3..c439d2b89 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1299,8 +1299,10 @@ rm /opt/so/saltstack/local/salt/suricata/rules rm -rf /usr/sbin/so-idstools* sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf -# Backup the salt master config before editing it -cp /etc/salt/master /nsm/backup/detections-migration/2-4-200 +# Backup the salt master config & manager pillar before editing it +cp /opt/so/saltstack/local/pillar/minions/$MINIONID.sls /nsm/backup/detections-migration/2-4-200/ +cp /etc/salt/master /nsm/backup/detections-migration/2-4-200/ +so-yaml.py remove /opt/so/saltstack/local/pillar/minions/$MINIONID.sls idstools so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids } From 271f545f4f8d2cbbf4fe2bb324126d35db2d455b Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Sat, 6 Dec 2025 15:26:44 -0500 Subject: [PATCH 53/67] Fixup Airgap --- salt/manager/init.sls | 2 +- salt/manager/tools/sbin/soup | 2 +- salt/soc/defaults.yaml | 7 +++--- salt/soc/merged.map.jinja | 44 +++++++++++++++++++++++++----------- 4 files changed, 36 insertions(+), 19 deletions(-) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index cf97a6f0b..7148ea16e 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -214,7 +214,7 @@ git_config_set_safe_dirs: surinsmrulesdir: file.directory: - - name: /nsm/rules/suricata + - name: /nsm/rules/suricata/etopen - user: 939 - group: 939 - makedirs: True diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c29ad6345..5635a41d9 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1355,7 +1355,7 @@ unmount_update() { update_airgap_rules() { # Copy the rules over to update them for airgap. - rsync -a $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/ + rsync -a --delete $UPDATE_DIR/agrules/suricata/ /nsm/rules/suricata/etopen/ rsync -a $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/ rsync -a $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/ # Copy the securityonion-resorces repo over for SOC Detection Summaries and checkout the published summaries branch diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index ec0937a4f..bd6538e2d 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1622,12 +1622,11 @@ soc: sourceType: directory airgap: - name: Emerging-Threats - description: "Emerging Threats ruleset - To enable ET Pro, enter your license key below. Leave empty for ET Open (free) rules." + description: "Emerging Threats ruleset - To enable ET Pro on Airgap, review the documentation at https://docs.securityonion.net/suricata" licenseKey: "" enabled: true - sourceType: url - sourcePath: 'https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz' - urlHash: "https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz.md5" + sourceType: directory + sourcePath: /nsm/rules/suricata/etopen/ license: "BSD" excludeFiles: - "*deleted*" diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index e1532462c..4c301fa9d 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -108,21 +108,39 @@ {% if ruleset.name == 'Emerging-Threats' %} {% if ruleset.licenseKey and ruleset.licenseKey != '' %} {# License key is defined - transform to ETPRO #} -{# Engine Version is hardcoded in the URL - this does not change often: https://community.emergingthreats.net/t/supported-engines/71 #} -{% do ruleset.update({ - 'name': 'ETPRO', - 'sourcePath': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz', - 'urlHash': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz.md5', - 'license': 'Commercial' - }) %} +{% if ruleset.sourceType == 'directory' %} +{# Airgap mode - update directory path #} +{% do ruleset.update({ + 'name': 'ETPRO', + 'sourcePath': '/nsm/rules/custom-local-repos/local-etpro-suricata/etpro.rules.tar.gz', + 'license': 'Commercial' + }) %} +{% else %} +{# Engine Version is hardcoded in the URL - this does not change often: https://community.emergingthreats.net/t/supported-engines/71 #} +{% do ruleset.update({ + 'name': 'ETPRO', + 'sourcePath': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz', + 'urlHash': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz.md5', + 'license': 'Commercial' + }) %} +{% endif %} {% else %} {# No license key - explicitly set to ETOPEN #} -{% do ruleset.update({ - 'name': 'ETOPEN', - 'sourcePath': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz', - 'urlHash': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz.md5', - 'license': 'BSD' - }) %} +{% if ruleset.sourceType == 'directory' %} +{# Airgap mode - update directory path #} +{% do ruleset.update({ + 'name': 'ETOPEN', + 'sourcePath': '/nsm/rules/suricata/etopen/', + 'license': 'BSD' + }) %} +{% else %} +{% do ruleset.update({ + 'name': 'ETOPEN', + 'sourcePath': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz', + 'urlHash': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz.md5', + 'license': 'BSD' + }) %} +{% endif %} {% endif %} {% endif %} {% endfor %} From 0f42233092348b8277b1ce1f79c2c06fa24be070 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Sun, 7 Dec 2025 16:13:55 -0500 Subject: [PATCH 54/67] Make sure local salt dir is created --- salt/manager/tools/sbin/soup | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 5635a41d9..0d2f94b8d 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1116,6 +1116,9 @@ cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs EOF +# Create salt local rules dir +install -d -o 939 -g 939 /opt/so/saltstack/local/salt/suricata/rules/ + # Backup custom rules & overrides mkdir -p /nsm/backup/detections-migration/2-4-200 cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200 From 0ff8fa57e717aa8bc9df4496017f9930e8a6b2c5 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 8 Dec 2025 10:29:24 -0500 Subject: [PATCH 55/67] be more verbose --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0d2f94b8d..44fd54ead 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1117,7 +1117,7 @@ Suricata ruleset sync is blocked until this file is removed. Make sure that you EOF # Create salt local rules dir -install -d -o 939 -g 939 /opt/so/saltstack/local/salt/suricata/rules/ +install -d -o 939 -g 939 /opt/so/saltstack/local/salt/suricata/rules/ || echo "Failed to create Suricata local rules directory" # Backup custom rules & overrides mkdir -p /nsm/backup/detections-migration/2-4-200 From 72c8c2371ef70aa8c9918b84cb5f8cacfb27f404 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 8 Dec 2025 12:39:30 -0500 Subject: [PATCH 56/67] Rework ordering --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 44fd54ead..e7784fe4f 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1116,7 +1116,8 @@ cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs EOF -# Create salt local rules dir +# Remove possible symlink & create salt local rules dir +[ -L /opt/so/saltstack/local/salt/suricata/rules ] && rm -f /opt/so/saltstack/local/salt/suricata/rules install -d -o 939 -g 939 /opt/so/saltstack/local/salt/suricata/rules/ || echo "Failed to create Suricata local rules directory" # Backup custom rules & overrides @@ -1300,7 +1301,6 @@ if [ -n "$(docker ps -q -f name=^so-idstools$)" ]; then fi echo "Removing idstools symlink and scripts..." -rm /opt/so/saltstack/local/salt/suricata/rules rm -rf /usr/sbin/so-idstools* sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf From db45ce07ed1d54990ba48d6310eb44bc4afa0e7e Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 8 Dec 2025 16:26:45 -0500 Subject: [PATCH 57/67] Modify model display names and remove GPT-OSS 120B Updated display names for models and removed GPT-OSS 120B. --- salt/soc/defaults.yaml | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index bd6538e2d..c1ae2579c 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2653,25 +2653,19 @@ soc: thresholdColorRatioMax: 1 availableModels: - id: sonnet-4 - displayName: Claude Sonnet 4 + displayName: Claude Sonnet 4 ($$) contextLimitSmall: 200000 contextLimitLarge: 1000000 lowBalanceColorAlert: 500000 enabled: true - id: sonnet-4.5 - displayName: Claude Sonnet 4.5 + displayName: Claude Sonnet 4.5 ($$$) contextLimitSmall: 200000 contextLimitLarge: 1000000 lowBalanceColorAlert: 500000 enabled: true - - id: gptoss-120b - displayName: GPT-OSS 120B - contextLimitSmall: 128000 - contextLimitLarge: 128000 - lowBalanceColorAlert: 500000 - enabled: true - id: qwen-235b - displayName: QWEN 235B + displayName: QWEN 235B ($) contextLimitSmall: 256000 contextLimitLarge: 256000 lowBalanceColorAlert: 500000 From eec3373ae73a5c057f520b70249fda6d8f8c5f20 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 8 Dec 2025 16:30:50 -0500 Subject: [PATCH 58/67] Update display name for Claude Sonnet 4 --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index c1ae2579c..2c298eb83 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2653,7 +2653,7 @@ soc: thresholdColorRatioMax: 1 availableModels: - id: sonnet-4 - displayName: Claude Sonnet 4 ($$) + displayName: Claude Sonnet 4 ($$$) contextLimitSmall: 200000 contextLimitLarge: 1000000 lowBalanceColorAlert: 500000 From 03dd746601ca80f23cb874e8908b066331738f2d Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 8 Dec 2025 16:34:19 -0500 Subject: [PATCH 59/67] Add origin field to model configurations --- salt/soc/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 2c298eb83..2de9aed2d 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2654,18 +2654,21 @@ soc: availableModels: - id: sonnet-4 displayName: Claude Sonnet 4 ($$$) + origin: USA contextLimitSmall: 200000 contextLimitLarge: 1000000 lowBalanceColorAlert: 500000 enabled: true - id: sonnet-4.5 displayName: Claude Sonnet 4.5 ($$$) + origin: USA contextLimitSmall: 200000 contextLimitLarge: 1000000 lowBalanceColorAlert: 500000 enabled: true - id: qwen-235b displayName: QWEN 235B ($) + origin: China contextLimitSmall: 256000 contextLimitLarge: 256000 lowBalanceColorAlert: 500000 From 94694d394e0fe8b4932e236988e23fbaaf3111f8 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 8 Dec 2025 16:36:09 -0500 Subject: [PATCH 60/67] Add origin field to model training configuration --- salt/soc/soc_soc.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index ff13922fb..cffcecaa1 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -696,6 +696,9 @@ soc: - field: displayName label: Display Name required: True + - field: origin + label: Country of Origin for the Model Training + required: false - field: contextLimitSmall label: Context Limit (Small) forcedType: int From 72a4ba405f37a655c78d7a0cef373d3e404c017f Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 8 Dec 2025 16:45:40 -0500 Subject: [PATCH 61/67] match correct custom ruleset name --- salt/soc/merged.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 4c301fa9d..1d9946a05 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -70,7 +70,7 @@ {# Define the Detections custom ruleset that should always be present #} {% set CUSTOM_RULESET = { - 'name': 'custom', + 'name': '__custom__', 'description': 'User-created custom rules created via the Detections module in the SOC UI', 'sourceType': 'elasticsearch', 'sourcePath': 'so_detection.ruleset:__custom__', From e105bd12e6f95b60b7f07905013aa4dcc29d49eb Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 9 Dec 2025 09:49:27 -0500 Subject: [PATCH 62/67] Fix custom name --- salt/soc/merged.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 1d9946a05..349937983 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -83,7 +83,7 @@ {# Always append the custom ruleset to suricataengine.rulesetSources if not already present #} {% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %} {% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %} -{% set custom_names = SOCMERGED.config.server.modules.suricataengine.rulesetSources | selectattr('name', 'equalto', 'custom') | list %} +{% set custom_names = SOCMERGED.config.server.modules.suricataengine.rulesetSources | selectattr('name', 'equalto', '__custom__') | list %} {% if custom_names | length == 0 %} {% do SOCMERGED.config.server.modules.suricataengine.rulesetSources.append(CUSTOM_RULESET) %} {% endif %} From dc945dad00ec592b19d05de295496d01c3fb35ad Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 9 Dec 2025 11:00:53 -0500 Subject: [PATCH 63/67] Remove Claude Sonnet 4 model configuration Removed configuration for Claude Sonnet 4 model. --- salt/soc/defaults.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index bd6538e2d..a25ff5d83 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2652,12 +2652,6 @@ soc: thresholdColorRatioMed: 0.75 thresholdColorRatioMax: 1 availableModels: - - id: sonnet-4 - displayName: Claude Sonnet 4 - contextLimitSmall: 200000 - contextLimitLarge: 1000000 - lowBalanceColorAlert: 500000 - enabled: true - id: sonnet-4.5 displayName: Claude Sonnet 4.5 contextLimitSmall: 200000 From 8ef6c2f91df61bdfb7fc85fc9bcf68b441700302 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 10 Dec 2025 15:19:44 -0500 Subject: [PATCH 64/67] small fixes --- salt/manager/tools/sbin/soup | 3 ++- salt/soc/soc_soc.yaml | 12 ++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index e7784fe4f..9fd9542c0 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1113,7 +1113,7 @@ suricata_idstools_removal_pre() { install -d -o 939 -g 939 -m 755 /opt/so/conf/soc/fingerprints install -o 939 -g 939 -m 644 /dev/null /opt/so/conf/soc/fingerprints/suricataengine.syncBlock cat > /opt/so/conf/soc/fingerprints/suricataengine.syncBlock << EOF -Suricata ruleset sync is blocked until this file is removed. Make sure that you have manually added any custom Suricata rulesets via SOC config - review the documentation for more details: securityonion.net/docs +Suricata ruleset sync is blocked until this file is removed. **CRITICAL** Make sure that you have manually added any custom Suricata rulesets via SOC config before removing this file - review the documentation for more details: https://docs.securityonion.net/en/2.4/nids.html#sync-block EOF # Remove possible symlink & create salt local rules dir @@ -1131,6 +1131,7 @@ if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then # Verify backup by comparing counts echo "Verifying detection overrides backup..." es_override_count=$(/sbin/so-elasticsearch-query 'so-detection/_count' \ + --retry 5 --retry-delay 10 --retry-all-errors \ -d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') || { echo " Error: Failed to query Elasticsearch for override count" exit 1 diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index ff13922fb..e4d2dc225 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -608,6 +608,18 @@ soc: label: Delete Unreferenced (Deletes rules that are no longer referenced by ruleset source) forcedType: bool required: False + - field: proxyURL + label: HTTP/HTTPS proxy URL for downloading the ruleset. + required: False + - field: proxyUsername + label: Proxy authentication username. + required: False + - field: proxyPassword + label: Proxy authentication password. + required: False + - field: proxyCACert + label: Path to CA certificate file for MITM proxy verification. + required: False airgap: *serulesetSources navigator: intervalMinutes: From 5ab6bda639175ad2d0398c494dca1c5633419d79 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 10 Dec 2025 17:16:35 -0500 Subject: [PATCH 65/67] Fixup logic --- salt/suricata/cron/so-suricata-rulestats | 21 +++++++++++++++------ salt/telegraf/scripts/surirules.sh | 10 +++++++--- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/salt/suricata/cron/so-suricata-rulestats b/salt/suricata/cron/so-suricata-rulestats index 95b51c58a..459ab894f 100644 --- a/salt/suricata/cron/so-suricata-rulestats +++ b/salt/suricata/cron/so-suricata-rulestats @@ -17,14 +17,23 @@ query() { STATS=$(query "ruleset-stats") RELOAD=$(query "ruleset-reload-time") +[ -z "$RELOAD" ] && RELOAD='{}' -if echo "$STATS" | jq -e '.return == "OK"' > /dev/null 2>&1; then - LOADED=$(echo "$STATS" | jq -r '.message[0].rules_loaded') - FAILED=$(echo "$STATS" | jq -r '.message[0].rules_failed') - LAST_RELOAD=$(echo "$RELOAD" | jq -r '.message[0].last_reload') +# Outputs valid JSON on success, empty on failure +OUTPUT=$(jq -n \ + --argjson stats "$STATS" \ + --argjson reload "$RELOAD" \ + 'if $stats.return == "OK" and ($stats.message[0].rules_loaded | type) == "number" and ($stats.message[0].rules_failed | type) == "number" then + { + rules_loaded: $stats.message[0].rules_loaded, + rules_failed: $stats.message[0].rules_failed, + last_reload: ($reload.message[0].last_reload // ""), + return: "OK" + } + else empty end' 2>/dev/null) - jq -n --argjson loaded "$LOADED" --argjson failed "$FAILED" --arg reload "$LAST_RELOAD" \ - '{rules_loaded: $loaded, rules_failed: $failed, last_reload: $reload, return: "OK"}' > "$OUTFILE" +if [ -n "$OUTPUT" ]; then + echo "$OUTPUT" > "$OUTFILE" else echo '{"return":"FAIL"}' > "$OUTFILE" fi diff --git a/salt/telegraf/scripts/surirules.sh b/salt/telegraf/scripts/surirules.sh index b38d5df26..f4c6885e1 100644 --- a/salt/telegraf/scripts/surirules.sh +++ b/salt/telegraf/scripts/surirules.sh @@ -18,11 +18,15 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then if [ -f "$STATSFILE" ] && [ $(($(date +%s) - $(stat -c %Y "$STATSFILE"))) -lt 90 ] && jq -e '.return == "OK" and .rules_loaded != null and .rules_failed != null' "$STATSFILE" > /dev/null 2>&1; then LOADED=$(jq -r '.rules_loaded' "$STATSFILE") FAILED=$(jq -r '.rules_failed' "$STATSFILE") - RELOAD_TIME=$(jq -r '.last_reload // ""' "$STATSFILE") + RELOAD_TIME=$(jq -r 'if .last_reload then .last_reload else "" end' "$STATSFILE") - echo "surirules loaded=${LOADED}i,failed=${FAILED}i,reload_time=\"${RELOAD_TIME}\",status=\"ok\"" + if [ -n "$RELOAD_TIME" ]; then + echo "surirules loaded=${LOADED}i,failed=${FAILED}i,reload_time=\"${RELOAD_TIME}\",status=\"ok\"" + else + echo "surirules loaded=${LOADED}i,failed=${FAILED}i,status=\"ok\"" + fi else - echo "surirules loaded=0i,failed=0i,reload_time=\"\",status=\"unknown\"" + echo "surirules loaded=0i,failed=0i,status=\"unknown\"" fi fi From a945768251b434a14e90dc8010d6f51eefad1e47 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 11 Dec 2025 11:15:30 -0500 Subject: [PATCH 66/67] Refactor backup --- salt/elasticsearch/defaults.yaml | 13 ++++ salt/manager/tools/sbin/soup | 56 ++++++++--------- salt/soc/files/soc/so-detections-backup.py | 72 +++++++++++++--------- 3 files changed, 82 insertions(+), 59 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 5cfb9a0e0..c9f77aa7d 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -299,6 +299,19 @@ elasticsearch: hot: actions: {} min_age: 0ms + sos-backup: + index_sorting: false + index_template: + composed_of: [] + ignore_missing_component_templates: [] + index_patterns: + - sos-backup-* + priority: 501 + template: + settings: + index: + number_of_replicas: 0 + number_of_shards: 1 so-assistant-chat: index_sorting: false index_template: diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 9fd9542c0..5029f28c3 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1125,40 +1125,35 @@ mkdir -p /nsm/backup/detections-migration/2-4-200 cp /usr/sbin/so-rule-update /nsm/backup/detections-migration/2-4-200 cp /opt/so/conf/idstools/etc/rulecat.conf /nsm/backup/detections-migration/2-4-200 -if [[ -f /opt/so/conf/soc/so-detections-backup.py ]]; then - python3 /opt/so/conf/soc/so-detections-backup.py +# Backup so-detection index via reindex +echo "Creating sos-backup index template..." +template_result=$(/sbin/so-elasticsearch-query '_index_template/sos-backup' -X PUT \ + --retry 5 --retry-delay 15 --retry-all-errors \ + -d '{"index_patterns":["sos-backup-*"],"priority":501,"template":{"settings":{"index":{"number_of_replicas":0,"number_of_shards":1}}}}') - # Verify backup by comparing counts - echo "Verifying detection overrides backup..." - es_override_count=$(/sbin/so-elasticsearch-query 'so-detection/_count' \ - --retry 5 --retry-delay 10 --retry-all-errors \ - -d '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}}}' | jq -r '.count') || { - echo " Error: Failed to query Elasticsearch for override count" - exit 1 - } +if [[ -z "$template_result" ]] || ! echo "$template_result" | jq -e '.acknowledged == true' > /dev/null 2>&1; then + echo "Error: Failed to create sos-backup index template" + echo "$template_result" + exit 1 +fi - if [[ ! "$es_override_count" =~ ^[0-9]+$ ]]; then - echo " Error: Invalid override count from Elasticsearch: '$es_override_count'" - exit 1 - fi +BACKUP_INDEX="sos-backup-detection-$(date +%Y%m%d-%H%M%S)" +echo "Backing up so-detection index to $BACKUP_INDEX..." +reindex_result=$(/sbin/so-elasticsearch-query '_reindex?wait_for_completion=true' \ + --retry 5 --retry-delay 15 --retry-all-errors \ + -X POST -d "{\"source\": {\"index\": \"so-detection\"}, \"dest\": {\"index\": \"$BACKUP_INDEX\"}}") - backup_override_count=$(find /nsm/backup/detections/repo/*/overrides -type f 2>/dev/null | wc -l) - - echo " Elasticsearch overrides: $es_override_count" - echo " Backed up overrides: $backup_override_count" - - if [[ "$es_override_count" -gt 0 ]]; then - if [[ "$backup_override_count" -gt 0 ]]; then - echo " Override backup verified successfully" - else - echo " Error: Elasticsearch has $es_override_count overrides but backup has 0 files" - exit 1 - fi - else - echo " No overrides to backup" - fi +if [[ -z "$reindex_result" ]]; then + echo "Error: Backup of detections failed - no response from Elasticsearch" + exit 1 +elif echo "$reindex_result" | jq -e '.created >= 0' > /dev/null 2>&1; then + echo "Backup complete: $(echo "$reindex_result" | jq -r '.created') documents copied" +elif echo "$reindex_result" | grep -q "index_not_found_exception"; then + echo "so-detection index does not exist, skipping backup" else - echo "SOC Detections backup script not found, skipping detection backup" + echo "Error: Backup of detections failed" + echo "$reindex_result" + exit 1 fi } @@ -1304,6 +1299,7 @@ fi echo "Removing idstools symlink and scripts..." rm -rf /usr/sbin/so-idstools* sed -i '/^#\?so-idstools$/d' /opt/so/conf/so-status/so-status.conf +crontab -l | grep -v 'so-rule-update' | crontab - # Backup the salt master config & manager pillar before editing it cp /opt/so/saltstack/local/pillar/minions/$MINIONID.sls /nsm/backup/detections-migration/2-4-200/ diff --git a/salt/soc/files/soc/so-detections-backup.py b/salt/soc/files/soc/so-detections-backup.py index 085b1e4c7..0300c15f2 100644 --- a/salt/soc/files/soc/so-detections-backup.py +++ b/salt/soc/files/soc/so-detections-backup.py @@ -6,6 +6,7 @@ # This script queries Elasticsearch for Custom Detections and all Overrides, # and git commits them to disk at $OUTPUT_DIR +import argparse import os import subprocess import json @@ -18,10 +19,10 @@ from datetime import datetime urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # Constants -ES_URL = "https://localhost:9200/so-detection/_search" +DEFAULT_INDEX = "so-detection" +DEFAULT_OUTPUT_DIR = "/nsm/backup/detections/repo" QUERY_DETECTIONS = '{"query": {"bool": {"must": [{"match_all": {}}, {"term": {"so_detection.ruleset": "__custom__"}}]}},"size": 10000}' QUERY_OVERRIDES = '{"query": {"bool": {"must": [{"exists": {"field": "so_detection.overrides"}}]}},"size": 10000}' -OUTPUT_DIR = "/nsm/backup/detections/repo" AUTH_FILE = "/opt/so/conf/elasticsearch/curl.config" def get_auth_credentials(auth_file): @@ -30,9 +31,10 @@ def get_auth_credentials(auth_file): if line.startswith('user ='): return line.split('=', 1)[1].strip().replace('"', '') -def query_elasticsearch(query, auth): +def query_elasticsearch(query, auth, index): + url = f"https://localhost:9200/{index}/_search" headers = {"Content-Type": "application/json"} - response = requests.get(ES_URL, headers=headers, data=query, auth=auth, verify=False) + response = requests.get(url, headers=headers, data=query, auth=auth, verify=False) response.raise_for_status() return response.json() @@ -47,12 +49,12 @@ def save_content(hit, base_folder, subfolder="", extension="txt"): f.write(content) return file_path -def save_overrides(hit): +def save_overrides(hit, output_dir): so_detection = hit["_source"]["so_detection"] public_id = so_detection["publicId"] overrides = so_detection["overrides"] language = so_detection["language"] - folder = os.path.join(OUTPUT_DIR, language, "overrides") + folder = os.path.join(output_dir, language, "overrides") os.makedirs(folder, exist_ok=True) extension = "yaml" if language == "sigma" else "txt" file_path = os.path.join(folder, f"{public_id}.{extension}") @@ -60,20 +62,20 @@ def save_overrides(hit): f.write('\n'.join(json.dumps(override) for override in overrides) if isinstance(overrides, list) else overrides) return file_path -def ensure_git_repo(): - if not os.path.isdir(os.path.join(OUTPUT_DIR, '.git')): +def ensure_git_repo(output_dir): + if not os.path.isdir(os.path.join(output_dir, '.git')): subprocess.run(["git", "config", "--global", "init.defaultBranch", "main"], check=True) - subprocess.run(["git", "-C", OUTPUT_DIR, "init"], check=True) - subprocess.run(["git", "-C", OUTPUT_DIR, "remote", "add", "origin", "default"], check=True) + subprocess.run(["git", "-C", output_dir, "init"], check=True) + subprocess.run(["git", "-C", output_dir, "remote", "add", "origin", "default"], check=True) -def commit_changes(): - ensure_git_repo() - subprocess.run(["git", "-C", OUTPUT_DIR, "config", "user.email", "securityonion@local.invalid"], check=True) - subprocess.run(["git", "-C", OUTPUT_DIR, "config", "user.name", "securityonion"], check=True) - subprocess.run(["git", "-C", OUTPUT_DIR, "add", "."], check=True) - status_result = subprocess.run(["git", "-C", OUTPUT_DIR, "status"], capture_output=True, text=True) +def commit_changes(output_dir): + ensure_git_repo(output_dir) + subprocess.run(["git", "-C", output_dir, "config", "user.email", "securityonion@local.invalid"], check=True) + subprocess.run(["git", "-C", output_dir, "config", "user.name", "securityonion"], check=True) + subprocess.run(["git", "-C", output_dir, "add", "."], check=True) + status_result = subprocess.run(["git", "-C", output_dir, "status"], capture_output=True, text=True) print(status_result.stdout) - commit_result = subprocess.run(["git", "-C", OUTPUT_DIR, "commit", "-m", "Update detections and overrides"], check=False, capture_output=True) + commit_result = subprocess.run(["git", "-C", output_dir, "commit", "-m", "Update detections and overrides"], check=False, capture_output=True) if commit_result.returncode == 1: print("No changes to commit.") elif commit_result.returncode == 0: @@ -81,29 +83,41 @@ def commit_changes(): else: commit_result.check_returncode() +def parse_args(): + parser = argparse.ArgumentParser(description="Backup custom detections and overrides from Elasticsearch") + parser.add_argument("--output", "-o", default=DEFAULT_OUTPUT_DIR, + help=f"Output directory for backups (default: {DEFAULT_OUTPUT_DIR})") + parser.add_argument("--index", "-i", default=DEFAULT_INDEX, + help=f"Elasticsearch index to query (default: {DEFAULT_INDEX})") + return parser.parse_args() + def main(): + args = parse_args() + output_dir = args.output + index = args.index + try: timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - print(f"Backing up Custom Detections and all Overrides to {OUTPUT_DIR} - {timestamp}\n") - - os.makedirs(OUTPUT_DIR, exist_ok=True) + print(f"Backing up Custom Detections and all Overrides to {output_dir} - {timestamp}\n") + + os.makedirs(output_dir, exist_ok=True) auth_credentials = get_auth_credentials(AUTH_FILE) username, password = auth_credentials.split(':', 1) auth = HTTPBasicAuth(username, password) - + # Query and save custom detections - detections = query_elasticsearch(QUERY_DETECTIONS, auth)["hits"]["hits"] + detections = query_elasticsearch(QUERY_DETECTIONS, auth, index)["hits"]["hits"] for hit in detections: - save_content(hit, OUTPUT_DIR, hit["_source"]["so_detection"]["language"], "yaml" if hit["_source"]["so_detection"]["language"] == "sigma" else "txt") - + save_content(hit, output_dir, hit["_source"]["so_detection"]["language"], "yaml" if hit["_source"]["so_detection"]["language"] == "sigma" else "txt") + # Query and save overrides - overrides = query_elasticsearch(QUERY_OVERRIDES, auth)["hits"]["hits"] + overrides = query_elasticsearch(QUERY_OVERRIDES, auth, index)["hits"]["hits"] for hit in overrides: - save_overrides(hit) - - commit_changes() - + save_overrides(hit, output_dir) + + commit_changes(output_dir) + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") print(f"Backup Completed - {timestamp}") except Exception as e: From cb9a6fac258cbcc7428773554eb53c866b9514a1 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Thu, 11 Dec 2025 12:14:37 -0500 Subject: [PATCH 67/67] Update tests for rework --- .../files/soc/so-detections-backup_test.py | 56 ++++++++++--------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/salt/soc/files/soc/so-detections-backup_test.py b/salt/soc/files/soc/so-detections-backup_test.py index 3afa11886..4cdc9fa36 100644 --- a/salt/soc/files/soc/so-detections-backup_test.py +++ b/salt/soc/files/soc/so-detections-backup_test.py @@ -57,12 +57,12 @@ class TestBackupScript(unittest.TestCase): mock_response.json.return_value = {'hits': {'hits': []}} mock_response.raise_for_status = MagicMock() mock_get.return_value = mock_response - - response = ds.query_elasticsearch(ds.QUERY_DETECTIONS, self.auth) - + + response = ds.query_elasticsearch(ds.QUERY_DETECTIONS, self.auth, ds.DEFAULT_INDEX) + self.assertEqual(response, {'hits': {'hits': []}}) mock_get.assert_called_once_with( - ds.ES_URL, + f"https://localhost:9200/{ds.DEFAULT_INDEX}/_search", headers={"Content-Type": "application/json"}, data=ds.QUERY_DETECTIONS, auth=self.auth, @@ -81,7 +81,7 @@ class TestBackupScript(unittest.TestCase): @patch('os.makedirs') @patch('builtins.open', new_callable=mock_open) def test_save_overrides(self, mock_file, mock_makedirs): - file_path = ds.save_overrides(self.mock_override_hit) + file_path = ds.save_overrides(self.mock_override_hit, self.output_dir) expected_path = f'{self.output_dir}/sigma/overrides/test_id.yaml' self.assertEqual(file_path, expected_path) mock_makedirs.assert_called_once_with(f'{self.output_dir}/sigma/overrides', exist_ok=True) @@ -90,9 +90,9 @@ class TestBackupScript(unittest.TestCase): @patch('subprocess.run') def test_ensure_git_repo(self, mock_run): mock_run.return_value = MagicMock(returncode=0) - - ds.ensure_git_repo() - + + ds.ensure_git_repo(self.output_dir) + mock_run.assert_has_calls([ call(["git", "config", "--global", "init.defaultBranch", "main"], check=True), call(["git", "-C", self.output_dir, "init"], check=True), @@ -106,9 +106,9 @@ class TestBackupScript(unittest.TestCase): mock_commit_result = MagicMock(returncode=1) # Ensure sufficient number of MagicMock instances for each subprocess.run call mock_run.side_effect = [mock_status_result, mock_commit_result, MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0), MagicMock(returncode=0)] - + print("Running test_commit_changes...") - ds.commit_changes() + ds.commit_changes(self.output_dir) print("Finished test_commit_changes.") mock_run.assert_has_calls([ @@ -120,39 +120,45 @@ class TestBackupScript(unittest.TestCase): ]) @patch('builtins.print') - @patch('so-detections-backup.commit_changes') - @patch('so-detections-backup.save_overrides') - @patch('so-detections-backup.save_content') - @patch('so-detections-backup.query_elasticsearch') - @patch('so-detections-backup.get_auth_credentials') + @patch.object(ds, 'commit_changes') + @patch.object(ds, 'save_overrides') + @patch.object(ds, 'save_content') + @patch.object(ds, 'query_elasticsearch') + @patch.object(ds, 'get_auth_credentials') @patch('os.makedirs') - def test_main(self, mock_makedirs, mock_get_auth, mock_query, mock_save_content, mock_save_overrides, mock_commit, mock_print): + @patch.object(ds, 'parse_args') + def test_main(self, mock_parse_args, mock_makedirs, mock_get_auth, mock_query, mock_save_content, mock_save_overrides, mock_commit, mock_print): + mock_args = MagicMock() + mock_args.output = self.output_dir + mock_args.index = ds.DEFAULT_INDEX + mock_parse_args.return_value = mock_args mock_get_auth.return_value = self.auth_credentials mock_query.side_effect = [ {'hits': {'hits': [{"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}}]}}, {'hits': {'hits': [{"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}}]}} ] - + with patch('datetime.datetime') as mock_datetime: mock_datetime.now.return_value.strftime.return_value = "2024-05-23 20:49:44" ds.main() - + mock_makedirs.assert_called_once_with(self.output_dir, exist_ok=True) mock_get_auth.assert_called_once_with(ds.AUTH_FILE) mock_query.assert_has_calls([ - call(ds.QUERY_DETECTIONS, self.auth), - call(ds.QUERY_OVERRIDES, self.auth) + call(ds.QUERY_DETECTIONS, self.auth, ds.DEFAULT_INDEX), + call(ds.QUERY_OVERRIDES, self.auth, ds.DEFAULT_INDEX) ]) mock_save_content.assert_called_once_with( - {"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}}, - self.output_dir, - "sigma", + {"_source": {"so_detection": {"publicId": "1", "content": "content1", "language": "sigma"}}}, + self.output_dir, + "sigma", "yaml" ) mock_save_overrides.assert_called_once_with( - {"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}} + {"_source": {"so_detection": {"publicId": "2", "overrides": [{"key": "value"}], "language": "suricata"}}}, + self.output_dir ) - mock_commit.assert_called_once() + mock_commit.assert_called_once_with(self.output_dir) mock_print.assert_called() if __name__ == '__main__':