From 33d1170a914b5e787cb25436bc3b306af5cbda3c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 2 May 2024 11:58:39 -0400 Subject: [PATCH 01/40] add default pillar value for pillarWatch --- salt/salt/engines/master/pillarWatch.py | 153 ++++++++++++++++++++++++ salt/salt/files/engines.conf | 27 +++++ salt/salt/master.sls | 6 + 3 files changed, 186 insertions(+) create mode 100644 salt/salt/engines/master/pillarWatch.py diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py new file mode 100644 index 000000000..f75a6bb6b --- /dev/null +++ b/salt/salt/engines/master/pillarWatch.py @@ -0,0 +1,153 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# -*- coding: utf-8 -*- + +import logging +import re +import os +import sys +log = logging.getLogger(__name__) + +# will need this in future versions of this engine +#import salt.client +#local = salt.client.LocalClient() + +def start(fpa, interval=10): + currentPillarValue = '' + previousPillarValue = '' + + ''' + def processJinjaFile(): + log.info("pillarWatch engine: processing jinja file") + log.info(pillarFile) + log.info(__salt__['jinja.load_map'](pillarFile, 'GLOBALMERGED')) + sys.exit(0) + ''' + + def checkChangesTakeAction(): + # if the pillar value changed, then we find what actions we should take + log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue") + if currentPillarValue != previousPillarValue: + log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue)) + # check if the previous pillar value is defined in the pillar from -> to actions + if previousPillarValue in actions['from']: + # check if the new / current pillar value is defined under to + if currentPillarValue in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue] + # if the new / current pillar value isn't defined under to, is there a wildcard defined + elif '*' in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to']['*'] + # no action was defined for us to take when we see the pillar change + else: + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + # if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from + elif '*' in actions['from']: + # is the new pillar value defined for the wildcard match + if currentPillarValue in actions['from']['*']['to']: + ACTIONS=actions['from']['*']['to'][currentPillarValue] + # if the new pillar doesn't have an action, was a wildcard defined + elif '*' in actions['from']['*']['to']: + # need more logic here for to and from + ACTIONS=actions['from']['*']['to']['*'] + else: + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + # a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard + else: + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + log.debug("pillarWatch engine: all defined actions: %s" % actions['from']) + log.debug("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue)) + for action in ACTIONS: + log.info("pillarWatch engine: action: %s" % action) + if action != 'NO DEFINED ACTION FOR US TO TAKE': + for saltModule, args in action.items(): + log.debug("pillarWatch engine: saltModule: %s" % saltModule) + log.debug("pillarWatch engine: args: %s" % args) + #__salt__[saltModule](**args) + actionReturn = __salt__[saltModule](**args) + log.info("pillarWatch engine: actionReturn: %s" % actionReturn) + + + log.info("pillarWatch engine: ##### checking watched pillars for changes #####") + + # try to open the file that stores the previous runs data + # if the file doesn't exist, create a blank one + try: + # maybe change this location + dataFile = open("/opt/so/state/pillarWatch.txt", "r+") + except FileNotFoundError: + log.warn("pillarWatch engine: No previous pillarWatch data saved") + dataFile = open("/opt/so/state/pillarWatch.txt", "w+") + + df = dataFile.read() + for i in fpa: + log.trace("pillarWatch engine: files: %s" % i['files']) + log.trace("pillarWatch engine: pillar: %s" % i['pillar']) + log.trace("pillarWatch engine: actions: %s" % i['actions']) + pillarFiles = i['files'] + pillar = i['pillar'] + default = str(i['default']) + actions = i['actions'] + # these are the keys that we are going to look for as we traverse the pillarFiles + patterns = pillar.split(".") + # check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file + for pillarFile in reversed(pillarFiles): + currentPillarValue = default + previousPillarValue = '' + ''' + if 'jinja' in os.path.splitext(pillarFile)[1]: + processJinjaFile() + ''' + # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later + patternFound = 0 + with open(pillarFile, "r") as file: + log.debug("pillarWatch engine: checking file: %s" % pillarFile) + for line in file: + log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file)) + log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound]) + # since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar + # ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look + # for pipeline. once pipeline is found, it will record the value + if re.search('^' + patterns[patternFound] + ':', line.strip()): + # strip the newline because it makes the logs u-g-l-y + log.debug("pillarWatch engine: found: %s" % line.strip('\n')) + patternFound += 1 + # we have found the final key in the pillar that we are looking for, get the previous value and current value + if patternFound == len(patterns): + currentPillarValue = str(line.split(":")[1]).strip() + # we have found the pillar so we dont need to loop through the file anymore + break + + # if key and value was found in the first file, then we don't want to look in + # any more files since we use the first file as the source of truth. + if patternFound == len(patterns): + break + + # at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars + previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE' + # check the contents of the dataFile that stores the previousPillarValue(s). + # find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value + for l in df.splitlines(): + if pillar in l: + previousPillarValue = str(l.split(":")[1].strip()) + log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) + # if the pillar we are checking for changes has been defined in the dataFile, + # replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it. + if pillar in df: + df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df) + else: + df += pillar + ': ' + currentPillarValue + '\n' + log.trace("pillarWatch engine: df: %s" % df) + if previousPillarValue != "PREVIOUSPILLARVALUENOTSAVEDINDATAFILE": + checkChangesTakeAction() + else: + log.info("pillarWatch engine: %s was not previously tracked. not tacking action." % pillar) + + + dataFile.seek(0) + dataFile.write(df) + dataFile.truncate() + dataFile.close() diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 7c43e99e1..4f3bc31a1 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -4,3 +4,30 @@ engines_dirs: engines: - checkmine: interval: 60 + - pillarWatch: + fpa: + - files: + - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls + - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls + pillar: idstools.config.ruleset + default: ETOPEN + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: /usr/sbin/so-rule-update + - files: + - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls + - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls + pillar: idstools.config.oinkcode + default: '' + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: /usr/sbin/so-rule-update + interval: 10 diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 0a65f3e01..6e320e4a6 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -27,6 +27,11 @@ checkmine_engine: - source: salt://salt/engines/master/checkmine.py - makedirs: True +pillarWatch_engine: + file.managed: + - name: /etc/salt/engines/pillarWatch.py + - source: salt://salt/engines/master/pillarWatch.py + engines_config: file.managed: - name: /etc/salt/master.d/engines.conf @@ -38,6 +43,7 @@ salt_master_service: - enable: True - watch: - file: checkmine_engine + - file: pillarWatch_engine - file: engines_config - order: last From 3b2d3573d8e7e9b063770efaee2f119946074a1d Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Thu, 2 May 2024 16:06:04 -0400 Subject: [PATCH 02/40] Update pillarWatch.py --- salt/salt/engines/master/pillarWatch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index f75a6bb6b..48d364374 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -70,7 +70,7 @@ def start(fpa, interval=10): log.info("pillarWatch engine: actionReturn: %s" % actionReturn) - log.info("pillarWatch engine: ##### checking watched pillars for changes #####") + log.debug("pillarWatch engine: ##### checking watched pillars for changes #####") # try to open the file that stores the previous runs data # if the file doesn't exist, create a blank one From e9b12632495db01340aff639fb8f1ff01526f8e1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 2 May 2024 16:32:43 -0400 Subject: [PATCH 03/40] orchestate searchnode deployment --- salt/manager/tools/sbin/so-minion | 11 +++++++---- salt/orch/deploy_searchnode.sls | 16 ++++++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) create mode 100644 salt/orch/deploy_searchnode.sls diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 34e069ece..8a34ddca0 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -613,11 +613,14 @@ function updateMineAndApplyStates() { if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then # calls so-common and set_minionid sets MINIONID to local minion id set_minionid - salt $MINIONID state.apply elasticsearch queue=True --async - salt $MINIONID state.apply soc queue=True --async + #salt $MINIONID state.apply elasticsearch queue=True --async + # salt $MINIONID state.apply soc queue=True --async + # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured + salt-run state.orch orch.deploy_searchnode pillar="{'setup': {'manager': $MINIONID, 'searchnode': $MINION_ID }}" + else + # run this async so the cli doesn't wait for a return + salt "$MINION_ID" state.highstate --async queue=True fi - # run this async so the cli doesn't wait for a return - salt "$MINION_ID" state.highstate --async queue=True } function setupMinionFiles() { diff --git a/salt/orch/deploy_searchnode.sls b/salt/orch/deploy_searchnode.sls new file mode 100644 index 000000000..a70e7bdbf --- /dev/null +++ b/salt/orch/deploy_searchnode.sls @@ -0,0 +1,16 @@ +{% set MANAGER = salt['pillar.get']('setup:manager') %} +{% set SEARCHNODE = salt['pillar.get']('setup:searchnode') %} + +manager_run_es_soc: + salt.state: + - tgt: {{ MANAGER }} + - sls: + - elasticsearch + - soc + +searchnode_run_highstate: + salt.state: + - tgt: {{ TARGET }} + - highstate: True + - require: + - salt: manager_run_es_soc From 29298770422c02d34ee30853d08649c0c334bf78 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 2 May 2024 16:37:54 -0400 Subject: [PATCH 04/40] fix var --- salt/orch/deploy_searchnode.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/orch/deploy_searchnode.sls b/salt/orch/deploy_searchnode.sls index a70e7bdbf..f36f02511 100644 --- a/salt/orch/deploy_searchnode.sls +++ b/salt/orch/deploy_searchnode.sls @@ -10,7 +10,7 @@ manager_run_es_soc: searchnode_run_highstate: salt.state: - - tgt: {{ TARGET }} + - tgt: {{ SEARCHNODE }} - highstate: True - require: - salt: manager_run_es_soc From 5fe8c6a95f587fa89e61d44a55fd1d844c0eb02d Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 3 May 2024 09:38:34 -0400 Subject: [PATCH 05/40] Update so-whiptail to make installation screen more consistent --- setup/so-whiptail | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 90bbaf397..4be002565 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -592,8 +592,8 @@ whiptail_install_type() { "IMPORT" "Import PCAP or log files " \ "EVAL" "Evaluation mode (not for production) " \ "STANDALONE" "Standalone production install " \ - "DISTRIBUTED" "Distributed install submenu " \ - "DESKTOP" "Install Security Onion Desktop" \ + "DISTRIBUTED" "Distributed deployment " \ + "DESKTOP" "Security Onion Desktop" \ 3>&1 1>&2 2>&3 ) elif [[ "$OSVER" == "focal" ]]; then From bbc374b56ea0a324d926184cfb8c666190005756 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 09:56:52 -0400 Subject: [PATCH 06/40] add logic in orch --- salt/manager/tools/sbin/so-minion | 16 +++++++------- salt/orch/deploy_newnode.sls | 36 +++++++++++++++++++++++++++++++ salt/orch/deploy_searchnode.sls | 16 -------------- 3 files changed, 44 insertions(+), 24 deletions(-) create mode 100644 salt/orch/deploy_newnode.sls delete mode 100644 salt/orch/deploy_searchnode.sls diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 8a34ddca0..3f8adfa31 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -607,20 +607,20 @@ function updateMineAndApplyStates() { # tell the minion to populate the mine with data from mine_functions which is populated during setup # this only needs to happen on non managers since they handle this during setup # and they need to wait for ca creation to update the mine - updateMine - checkMine "network.ip_addrs" + #updateMine + #checkMine "network.ip_addrs" # apply the elasticsearch state to the manager if a new searchnode was added - if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then + #if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then # calls so-common and set_minionid sets MINIONID to local minion id - set_minionid + set_minionid #salt $MINIONID state.apply elasticsearch queue=True --async # salt $MINIONID state.apply soc queue=True --async # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured - salt-run state.orch orch.deploy_searchnode pillar="{'setup': {'manager': $MINIONID, 'searchnode': $MINION_ID }}" - else + salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" + #else # run this async so the cli doesn't wait for a return - salt "$MINION_ID" state.highstate --async queue=True - fi + # salt "$MINION_ID" state.highstate --async queue=True + #fi } function setupMinionFiles() { diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls new file mode 100644 index 000000000..94ed86723 --- /dev/null +++ b/salt/orch/deploy_newnode.sls @@ -0,0 +1,36 @@ +{% set MANAGER = salt['pillar.get']('setup:manager') %} +{% set NEWNODE = salt['pillar.get']('setup:newnode') %} + +{{NEWNODE}}_update_mine: + salt.function: + - name: mine.update + - tgt: {{ NEWNODE }} + - retry: + attempts: 24 + interval: 5 + +{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} +manager_run_es_soc: + salt.state: + - tgt: {{ MANAGER }} + - sls: + - elasticsearch + - soc + - kwarg: + queue: True + - retry: + attempts: 30 + interval: 10 + - require: + - salt: new_node_update_mine +{% endif %} + +{{NEWNODE}}_run_highstate: + salt.state: + - tgt: {{ NEWNODE }} + - highstate: True + - kwarg: + queue: True + - retry: + attempts: 30 + interval: 10 diff --git a/salt/orch/deploy_searchnode.sls b/salt/orch/deploy_searchnode.sls deleted file mode 100644 index f36f02511..000000000 --- a/salt/orch/deploy_searchnode.sls +++ /dev/null @@ -1,16 +0,0 @@ -{% set MANAGER = salt['pillar.get']('setup:manager') %} -{% set SEARCHNODE = salt['pillar.get']('setup:searchnode') %} - -manager_run_es_soc: - salt.state: - - tgt: {{ MANAGER }} - - sls: - - elasticsearch - - soc - -searchnode_run_highstate: - salt.state: - - tgt: {{ SEARCHNODE }} - - highstate: True - - require: - - salt: manager_run_es_soc From fa3522a2333a25eb8eb63fabbfd52c178c9b466b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 11:10:21 -0400 Subject: [PATCH 07/40] fix requirement --- salt/orch/deploy_newnode.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls index 94ed86723..bf4a67574 100644 --- a/salt/orch/deploy_newnode.sls +++ b/salt/orch/deploy_newnode.sls @@ -22,7 +22,7 @@ manager_run_es_soc: attempts: 30 interval: 10 - require: - - salt: new_node_update_mine + - salt: {{NEWNODE}}_update_mine {% endif %} {{NEWNODE}}_run_highstate: From 442a717d75f071fff0c7e7aebc3e8bb50692209d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 12:08:57 -0400 Subject: [PATCH 08/40] orchit --- salt/orch/deploy_newnode.sls | 23 +++++++++++++++-------- setup/so-setup | 1 - 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls index bf4a67574..182fff06c 100644 --- a/salt/orch/deploy_newnode.sls +++ b/salt/orch/deploy_newnode.sls @@ -16,11 +16,10 @@ manager_run_es_soc: - sls: - elasticsearch - soc - - kwarg: - queue: True + - queue: True - retry: - attempts: 30 - interval: 10 + attempts: 3 + interval: 60 - require: - salt: {{NEWNODE}}_update_mine {% endif %} @@ -29,8 +28,16 @@ manager_run_es_soc: salt.state: - tgt: {{ NEWNODE }} - highstate: True - - kwarg: - queue: True + - queue: True - retry: - attempts: 30 - interval: 10 + attempts: 5 + interval: 60 + +{{NEWNODE}}_set_highstate_cron: + salt.state: + - tgt: {{ NEWNODE }} + - sls: + - setup.highstate_cron + - queue: True + - onfail: + - salt: {{NEWNODE}}_run_highstate diff --git a/setup/so-setup b/setup/so-setup index 1c3be22bf..8a1879c58 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -808,7 +808,6 @@ if ! [[ -f $install_opt_file ]]; then configure_minion "$minion_type" check_sos_appliance drop_install_options - logCmd "salt-call state.apply setup.highstate_cron --local --file-root=../salt/" verify_setup fi From 6cbbb81cadeb4712dcb74c0f03fae02d171fcdf7 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Fri, 3 May 2024 12:59:41 -0400 Subject: [PATCH 09/40] FEATURE: Add hyperlink to airgap screen in setup #12925 --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 4be002565..06d62a027 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -14,7 +14,7 @@ whiptail_airgap() { [[ $is_manager || $is_import ]] && node_str='manager' INTERWEBS=$(whiptail --title "$whiptail_title" --menu \ - "How should this $node_str be installed?" 10 70 2 \ + "How should this $node_str be installed?\n\nFor more information, please see:\n$DOC_BASE_URL/airgap.html" 13 70 2 \ "Standard " "This $node_str has access to the Internet" \ "Airgap " "This $node_str does not have access to the Internet" 3>&1 1>&2 2>&3 ) From 3d4fd59a159901d3deb34381fbff98e88d1d953d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 13:48:51 -0400 Subject: [PATCH 10/40] orchit --- salt/manager/tools/sbin/so-minion | 21 +++------------------ salt/orch/deploy_newnode.sls | 6 +++++- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 3f8adfa31..e31ec87d3 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -426,10 +426,6 @@ function checkMine() { } -function updateMine() { - retry 20 1 "salt '$MINION_ID' mine.update" True -} - function createEVAL() { is_pcaplimit=true pcapspace @@ -604,23 +600,12 @@ function addMinion() { } function updateMineAndApplyStates() { - # tell the minion to populate the mine with data from mine_functions which is populated during setup - # this only needs to happen on non managers since they handle this during setup - # and they need to wait for ca creation to update the mine - #updateMine + #checkMine "network.ip_addrs" - # apply the elasticsearch state to the manager if a new searchnode was added - #if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then - # calls so-common and set_minionid sets MINIONID to local minion id + # calls so-common and set_minionid sets MINIONID to local minion id set_minionid - #salt $MINIONID state.apply elasticsearch queue=True --async - # salt $MINIONID state.apply soc queue=True --async - # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured + # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" - #else - # run this async so the cli doesn't wait for a return - # salt "$MINION_ID" state.highstate --async queue=True - #fi } function setupMinionFiles() { diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls index 182fff06c..a2e6b147f 100644 --- a/salt/orch/deploy_newnode.sls +++ b/salt/orch/deploy_newnode.sls @@ -1,14 +1,18 @@ {% set MANAGER = salt['pillar.get']('setup:manager') %} {% set NEWNODE = salt['pillar.get']('setup:newnode') %} +# tell the minion to populate the mine with data from mine_functions which is populated during setup +# this only needs to happen on non managers since they handle this during setup +# and they need to wait for ca creation to update the mine {{NEWNODE}}_update_mine: salt.function: - name: mine.update - tgt: {{ NEWNODE }} - retry: - attempts: 24 + attempts: 36 interval: 5 +# we need to prepare the manager for a new searchnode or heavynode {% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} manager_run_es_soc: salt.state: From bdf1b45a07252a03b31287bb2a86114ed59514df Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 May 2024 14:54:44 -0400 Subject: [PATCH 11/40] redirect and throw in bg --- salt/manager/tools/sbin/so-minion | 2 +- salt/orch/deploy_newnode.sls | 15 --------------- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index e31ec87d3..79eea59fe 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -605,7 +605,7 @@ function updateMineAndApplyStates() { # calls so-common and set_minionid sets MINIONID to local minion id set_minionid # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured - salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" + salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" > /dev/null 2>&1 & } function setupMinionFiles() { diff --git a/salt/orch/deploy_newnode.sls b/salt/orch/deploy_newnode.sls index a2e6b147f..c05a812a3 100644 --- a/salt/orch/deploy_newnode.sls +++ b/salt/orch/deploy_newnode.sls @@ -21,9 +21,6 @@ manager_run_es_soc: - elasticsearch - soc - queue: True - - retry: - attempts: 3 - interval: 60 - require: - salt: {{NEWNODE}}_update_mine {% endif %} @@ -33,15 +30,3 @@ manager_run_es_soc: - tgt: {{ NEWNODE }} - highstate: True - queue: True - - retry: - attempts: 5 - interval: 60 - -{{NEWNODE}}_set_highstate_cron: - salt.state: - - tgt: {{ NEWNODE }} - - sls: - - setup.highstate_cron - - queue: True - - onfail: - - salt: {{NEWNODE}}_run_highstate From 7f12d4c81589792f138ca8c3d07e60b2b8ed6f54 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Fri, 3 May 2024 15:22:53 -0400 Subject: [PATCH 12/40] Exclude new sigma rules --- setup/so-verify | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-verify b/setup/so-verify index b4c79a88c..d22b80fc2 100755 --- a/setup/so-verify +++ b/setup/so-verify @@ -67,6 +67,7 @@ log_has_errors() { grep -vE "Reading first line of patchfile" | \ grep -vE "Command failed with exit code" | \ grep -vE "Running scope as unit" | \ + grep -vE "securityonion-resources/sigma/stable" | \ grep -vE "log-.*-pipeline_failed_attempts" &> "$error_log" if [[ $? -eq 0 ]]; then From 7b905f5a946b3408775863bf5ad1e216ed3f9454 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 6 May 2024 08:22:08 -0400 Subject: [PATCH 13/40] FEATURE: Add Events table columns for tunnel logs #12937 --- salt/soc/defaults.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index ad154e9d1..f2bf77805 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -502,16 +502,15 @@ soc: - syslog.severity - log.id.uid - event.dataset - '::tunnels': + '::tunnel': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - - tunnel_type - - action - - log.id.uid - - event.dataset + - event.action + - tunnel.type '::weird': - soc_timestamp - source.ip From 26c6a98b45369e522d3a396b7e92623a0a81eb6c Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 6 May 2024 08:43:01 -0400 Subject: [PATCH 14/40] Initial airgap support for detections --- salt/soc/soc_soc.yaml | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 4b88a5f84..a9d6bac08 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -107,7 +107,7 @@ soc: advanced: True helpLink: sigma.html rulesRepos: - description: 'Custom Git repos to pull Sigma rules from. License field is required, folder is optional.' + description: 'Custom Git repos to pull Sigma rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.' global: True advanced: True forcedType: "[]{}" @@ -117,8 +117,8 @@ soc: global: True advanced: False helpLink: sigma.html - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Sigma Community Ruleset. If this is an Airgap system, this setting will be overridden and set to false.' + airgapEnabled: + description: 'This setting dynamically changes to the current status of Airgap on this system and is used during the Sigma ruleset update process.' global: True advanced: True helpLink: sigma.html @@ -185,31 +185,27 @@ soc: advanced: True strelkaengine: allowRegex: - description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.' + description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.' global: True advanced: True helpLink: yara.html - autoEnabledYaraRules: - description: 'Yara rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara' + autoEnabledYARARules: + description: 'YARA rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara' global: True advanced: True helpLink: sigma.html - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false.' - global: True - advanced: True denyRegex: - description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.' + description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.' global: True advanced: True helpLink: yara.html communityRulesImportFrequencySeconds: - description: 'How often to check for new Yara rules (in seconds). This applies to both Community Rules and any configured Git repos.' + description: 'How often to check for new YARA rules (in seconds). This applies to both Community Rules and any configured Git repos.' global: True advanced: True helpLink: yara.html rulesRepos: - description: 'Custom Git repos to pull Yara rules from. License field is required' + description: 'Custom Git repos to pull YARA rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.'' global: True advanced: True forcedType: "[]{}" @@ -220,10 +216,6 @@ soc: global: True advanced: True helpLink: suricata.html - autoUpdateEnabled: - description: 'Set to true to enable automatic Internet-connected updates of the Suricata rulesets. If this is an Airgap system, this setting will be overridden and set to false.' - global: True - advanced: True denyRegex: description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.' global: True From f689cfcd0ae8d44a335b9fe313aa26e49cf2a83f Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 6 May 2024 08:52:43 -0400 Subject: [PATCH 15/40] FEATURE: Add Events table columns for stun logs #12940 --- salt/soc/defaults.yaml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index f2bf77805..593b55b07 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -491,6 +491,17 @@ soc: - ssl.version - log.id.uid - event.dataset + '::stun': + - soc_timestamp + - event.dataset + - source.ip + - source.port + - destination.ip + - destination.port + - stun.class + - stun.method + - stun.attribute.types + - log.id.uid ':zeek:syslog': - soc_timestamp - source.ip @@ -1841,7 +1852,7 @@ soc: query: 'event.dataset:zeek.ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name' - name: STUN description: STUN (Session Traversal Utilities for NAT) network metadata - query: 'tags:stun* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby event.dataset' + query: 'tags:stun* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby stun.class | groupby -sankey stun.class stun.method | groupby stun.method | groupby stun.attribute.types' - name: Syslog description: Syslog logs query: 'tags:syslog | groupby syslog.severity_label | groupby syslog.facility_label | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby event.dataset' From 3f73b14a6a8ff3fa7682ef9e9c180d5ad21ca9fc Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Mon, 6 May 2024 09:20:47 -0400 Subject: [PATCH 16/40] FEATURE: Add event.dataset to all Events table layouts #12641 --- salt/soc/defaults.yaml | 237 +++++++++++++++++++++-------------------- 1 file changed, 121 insertions(+), 116 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 593b55b07..b6a52fd75 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -81,22 +81,23 @@ soc: eventFields: default: - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - log.id.uid - network.community_id - - event.dataset ':kratos:': - soc_timestamp + - event.dataset - http_request.headers.x-real-ip - identity_id - http_request.headers.user-agent - - event.dataset - msg '::conn': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -105,9 +106,9 @@ soc: - network.protocol - log.id.uid - network.community_id - - event.dataset '::dce_rpc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -116,27 +117,27 @@ soc: - dce_rpc.named_pipe - dce_rpc.operation - log.id.uid - - event.dataset '::dhcp': - soc_timestamp + - event.dataset - client.address - server.address - host.domain - host.hostname - dhcp.message_types - log.id.uid - - event.dataset '::dnp3': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - dnp3.fc_reply - log.id.uid - - event.dataset '::dnp3_control': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -144,9 +145,9 @@ soc: - dnp3.function_code - dnp3.block_type - log.id.uid - - event.dataset '::dnp3_objects': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -154,9 +155,9 @@ soc: - dnp3.function_code - dnp3.object_type - log.id.uid - - event.dataset '::dns': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -167,9 +168,9 @@ soc: - dns.response.code_name - log.id.uid - network.community_id - - event.dataset '::dpd': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -178,9 +179,9 @@ soc: - observer.analyser - error.reason - log.id.uid - - event.dataset '::file': - soc_timestamp + - event.dataset - source.ip - destination.ip - file.name @@ -189,9 +190,9 @@ soc: - file.bytes.total - log.id.fuid - log.id.uid - - event.dataset '::ftp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -202,9 +203,9 @@ soc: - ftp.reply_code - file.size - log.id.uid - - event.dataset '::http': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -217,9 +218,9 @@ soc: - http.response.body.length - log.id.uid - network.community_id - - event.dataset '::intel': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -228,9 +229,9 @@ soc: - intel.indicator_type - intel.seen_where - log.id.uid - - event.dataset '::irc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -241,9 +242,9 @@ soc: - irc.command.value - irc.command.info - log.id.uid - - event.dataset '::kerberos': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -252,18 +253,18 @@ soc: - kerberos.service - kerberos.request_type - log.id.uid - - event.dataset '::modbus': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - modbus.function - log.id.uid - - event.dataset '::mysql': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -273,9 +274,9 @@ soc: - mysql.success - mysql.response - log.id.uid - - event.dataset '::notice': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -285,9 +286,9 @@ soc: - log.id.fuid - log.id.uid - network.community_id - - event.dataset '::ntlm': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -298,18 +299,18 @@ soc: - ntlm.server.nb.name - ntlm.server.tree.name - log.id.uid - - event.dataset '::pe': - soc_timestamp + - event.dataset - file.is_64bit - file.is_exe - file.machine - file.os - file.subsystem - log.id.fuid - - event.dataset '::radius': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -319,9 +320,9 @@ soc: - radius.framed_address - radius.reply_message - radius.result - - event.dataset '::rdp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -335,9 +336,9 @@ soc: - rdp.result - rdp.security_protocol - log.id.uid - - event.dataset '::rfb': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -347,9 +348,9 @@ soc: - rfb.share_flag - rfb.desktop.name - log.id.uid - - event.dataset '::signatures': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -361,9 +362,9 @@ soc: - signature_count - host.count - log.id.uid - - event.dataset '::sip': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -379,9 +380,9 @@ soc: - sip.user_agent - sip.status_code - log.id.uid - - event.dataset '::smb_files': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -393,9 +394,9 @@ soc: - file.size - file.prev_name - log.id.uid - - event.dataset '::smb_mapping': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -404,9 +405,9 @@ soc: - smb.service - smb.share_type - log.id.uid - - event.dataset '::smtp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -417,9 +418,9 @@ soc: - smtp.useragent - log.id.uid - network.community_id - - event.dataset '::snmp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -427,9 +428,9 @@ soc: - snmp.community - snmp.version - log.id.uid - - event.dataset '::socks': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -439,15 +440,15 @@ soc: - socks.request.port - socks.status - log.id.uid - - event.dataset '::software': - soc_timestamp + - event.dataset - source.ip - software.name - software.type - - event.dataset '::ssh': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -458,9 +459,9 @@ soc: - ssh.client - ssh.server - log.id.uid - - event.dataset ':suricata:ssl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -469,9 +470,9 @@ soc: - ssl.certificate.subject - ssl.version - log.id.uid - - event.dataset ':zeek:ssl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -480,9 +481,9 @@ soc: - ssl.validation_status - ssl.version - log.id.uid - - event.dataset '::ssl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -490,7 +491,6 @@ soc: - ssl.server_name - ssl.version - log.id.uid - - event.dataset '::stun': - soc_timestamp - event.dataset @@ -504,6 +504,7 @@ soc: - log.id.uid ':zeek:syslog': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -512,7 +513,6 @@ soc: - network.protocol - syslog.severity - log.id.uid - - event.dataset '::tunnel': - soc_timestamp - event.dataset @@ -524,23 +524,24 @@ soc: - tunnel.type '::weird': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - weird.name - log.id.uid - - event.dataset '::x509': - soc_timestamp + - event.dataset - x509.certificate.subject - x509.certificate.key.type - x509.certificate.key.length - x509.certificate.issuer - log.id.fuid - - event.dataset '::firewall': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -550,9 +551,9 @@ soc: - observer.ingress.interface.name - event.action - network.community_id - - event.dataset ':pfsense:': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -562,9 +563,9 @@ soc: - observer.ingress.interface.name - event.action - network.community_id - - event.dataset ':osquery:': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -572,27 +573,27 @@ soc: - source.hostname - process.executable - user.name - - event.dataset ':strelka:': - soc_timestamp + - event.dataset - file.name - file.size - hash.md5 - file.source - file.mime_type - log.id.fuid - - event.dataset ':strelka:file': - soc_timestamp + - event.dataset - file.name - file.size - hash.md5 - file.source - file.mime_type - log.id.fuid - - event.dataset ':suricata:': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -602,35 +603,35 @@ soc: - event.severity_label - log.id.uid - network.community_id - - event.dataset ':windows_eventlog:': - soc_timestamp - - user.name - event.dataset + - user.name ':elasticsearch:': - soc_timestamp + - event.dataset - agent.name - message - log.level - metadata.version - metadata.pipeline - - event.dataset ':kibana:': - soc_timestamp + - event.dataset - host.name - message - kibana.log.meta.req.headers.x-real-ip - - event.dataset ':syslog:syslog': - soc_timestamp + - event.dataset - host.name - metadata.ip_address - real_message - syslog.priority - syslog.application - - event.dataset ':aws:': - soc_timestamp + - event.dataset - aws.cloudtrail.event_category - aws.cloudtrail.event_type - event.provider @@ -640,25 +641,25 @@ soc: - user.name - source.ip - source.geo.region_iso_code - - event.dataset ':squid:': - soc_timestamp + - event.dataset - url.original - destination.ip - destination.geo.country_iso_code - user.name - source.ip - - event.dataset '::sysmon_operational': - soc_timestamp + - event.dataset - event.action - winlog.computer_name - user.name - process.executable - process.pid - - event.dataset '::network_connection': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -666,59 +667,59 @@ soc: - source.hostname - process.executable - user.name - - event.dataset '::process_terminated': - soc_timestamp + - event.dataset - process.executable - process.pid - winlog.computer_name - - event.dataset '::file_create': - soc_timestamp + - event.dataset - file.target - process.executable - process.pid - winlog.computer_name - - event.dataset '::registry_value_set': - soc_timestamp + - event.dataset - winlog.event_data.TargetObject - process.executable - process.pid - winlog.computer_name - - event.dataset '::process_creation': - soc_timestamp + - event.dataset - process.command_line - process.pid - process.parent.executable - process.working_directory - - event.dataset '::registry_create_delete': - soc_timestamp + - event.dataset - winlog.event_data.TargetObject - process.executable - process.pid - winlog.computer_name - - event.dataset '::dns_query': - soc_timestamp + - event.dataset - dns.query.name - dns.answers.name - process.executable - winlog.computer_name - - event.dataset '::file_create_stream_hash': - soc_timestamp + - event.dataset - file.target - hash.md5 - hash.sha256 - process.executable - process.pid - winlog.computer_name - - event.dataset '::bacnet': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -726,9 +727,9 @@ soc: - bacnet.bclv.function - bacnet.result.code - log.id.uid - - event.dataset '::bacnet_discovery': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -736,9 +737,9 @@ soc: - bacnet.vendor - bacnet.pdu.service - log.id.uid - - event.dataset '::bacnet_property': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -746,9 +747,9 @@ soc: - bacnet.property - bacnet.pdu.service - log.id.uid - - event.dataset '::bsap_ip_header': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -756,16 +757,16 @@ soc: - bsap.message.type - bsap.number.messages - log.id.uid - - event.dataset '::bsap_ip_rdb': - soc_timestamp + - event.dataset - bsap.application.function - bsap.application.sub.function - bsap.vector.variables - log.id.uid - - event.dataset '::bsap_serial_header': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -774,15 +775,15 @@ soc: - bsap.destination.function - bsap.message.type - log.id.uid - - event.dataset '::bsap_serial_rdb': - soc_timestamp + - event.dataset - bsap.rdb.function - bsap.vector.variables - log.id.uid - - event.dataset '::cip': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -790,9 +791,9 @@ soc: - cip.service - cip.status_code - log.id.uid - - event.dataset '::cip_identity': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -800,9 +801,9 @@ soc: - cip.device.type.name - cip.vendor.name - log.id.uid - - event.dataset '::cip_io': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -810,63 +811,63 @@ soc: - cip.connection.id - cip.io.data - log.id.uid - - event.dataset '::cotp': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - cotp.pdu.name - log.id.uid - - event.dataset '::ecat_arp_info': - soc_timestamp + - event.dataset - source.ip - destination.ip - source.mac - destination.mac - ecat.arp.type - - event.dataset '::ecat_aoe_info': - soc_timestamp + - event.dataset - source.mac - source.port - destination.mac - destination.port - ecat.command - - event.dataset '::ecat_coe_info': - soc_timestamp + - event.dataset - ecat.message.number - ecat.message.type - ecat.request.response.type - ecat.index - ecat.sub.index - - event.dataset '::ecat_dev_info': - soc_timestamp + - event.dataset - ecat.device.type - ecat.features - ecat.ram.size - ecat.revision - ecat.slave.address - - event.dataset '::ecat_log_address': - soc_timestamp + - event.dataset - source.mac - destination.mac - ecat.command - - event.dataset '::ecat_registers': - soc_timestamp + - event.dataset - source.mac - destination.mac - ecat.command - ecat.register.type - - event.dataset '::enip': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -874,18 +875,18 @@ soc: - enip.command - enip.status_code - log.id.uid - - event.dataset '::modbus_detailed': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - modbus.function - log.id.uid - - event.dataset '::opcua_binary': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -893,9 +894,9 @@ soc: - opcua.identifier_string - opcua.message_type - log.id.uid - - event.dataset '::opcua_binary_activate_session': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -904,9 +905,9 @@ soc: - opcua.identifier_string - opcua.user_name - log.id.uid - - event.dataset '::opcua_binary_activate_session_diagnostic_info': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -914,9 +915,9 @@ soc: - opcua.activate_session_diag_info_link_id - opcua.diag_info_link_id - log.id.uid - - event.dataset '::opcua_binary_activate_session_locale_id': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -924,9 +925,9 @@ soc: - opcua.local_id - opcua.locale_link_id - log.id.uid - - event.dataset '::opcua_binary_browse': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -934,17 +935,17 @@ soc: - opcua.link_id - opcua.service_type - log.id.uid - - event.dataset '::opcua_binary_browse_description': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - log.id.uid - - event.dataset '::opcua_binary_browse_response_references': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -952,27 +953,27 @@ soc: - opcua.node_class - opcua.display_name_text - log.id.uid - - event.dataset '::opcua_binary_browse_result': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - opcua.response_link_id - log.id.uid - - event.dataset '::opcua_binary_create_session': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - opcua.link_id - log.id.uid - - event.dataset '::opcua_binary_create_session_endpoints': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -980,27 +981,27 @@ soc: - opcua.endpoint_link_id - opcua.endpoint_url - log.id.uid - - event.dataset '::opcua_binary_create_session_user_token': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - opcua.user_token_link_id - log.id.uid - - event.dataset '::opcua_binary_create_subscription': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - opcua.link_id - log.id.uid - - event.dataset '::opcua_binary_get_endpoints': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1008,9 +1009,9 @@ soc: - opcua.endpoint_url - opcua.link_id - log.id.uid - - event.dataset '::opcua_binary_get_endpoints_description': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1018,9 +1019,9 @@ soc: - opcua.endpoint_description_link_id - opcua.endpoint_uri - log.id.uid - - event.dataset '::opcua_binary_get_endpoints_user_token': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1028,9 +1029,9 @@ soc: - opcua.user_token_link_id - opcua.user_token_type - log.id.uid - - event.dataset '::opcua_binary_read': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1038,9 +1039,9 @@ soc: - opcua.link_id - opcua.read_results_link_id - log.id.uid - - event.dataset '::opcua_binary_status_code_detail': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1048,9 +1049,9 @@ soc: - opcua.info_type_string - opcua.source_string - log.id.uid - - event.dataset '::profinet': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1058,18 +1059,18 @@ soc: - profinet.index - profinet.operation_type - log.id.uid - - event.dataset '::profinet_dce_rpc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - profinet.operation - log.id.uid - - event.dataset '::s7comm': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1077,9 +1078,9 @@ soc: - s7.ros.control.name - s7.function.name - log.id.uid - - event.dataset '::s7comm_plus': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1087,9 +1088,9 @@ soc: - s7.opcode.name - s7.version - log.id.uid - - event.dataset '::s7comm_read_szl': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1097,9 +1098,9 @@ soc: - s7.szl_id_name - s7.return_code_name - log.id.uid - - event.dataset '::s7comm_upload_download': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip @@ -1107,52 +1108,52 @@ soc: - s7.ros.control.name - s7.function_code - log.id.uid - - event.dataset '::tds': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - tds.command - log.id.uid - - event.dataset '::tds_rpc': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - tds.procedure_name - log.id.uid - - event.dataset '::tds_sql_batch': - soc_timestamp + - event.dataset - source.ip - source.port - destination.ip - destination.port - tds.header_type - log.id.uid - - event.dataset ':endpoint:events_x_api': - soc_timestamp + - event.dataset - host.name - user.name - process.name - process.Ext.api.name - process.thread.Ext.call_stack_final_user_module.path - - event.dataset ':endpoint:events_x_file': - soc_timestamp + - event.dataset - host.name - user.name - process.name - event.action - file.path - - event.dataset ':endpoint:events_x_library': - soc_timestamp + - event.dataset - host.name - user.name - process.name @@ -1160,9 +1161,9 @@ soc: - dll.path - dll.code_signature.status - dll.code_signature.subject_name - - event.dataset ':endpoint:events_x_network': - soc_timestamp + - event.dataset - host.name - user.name - process.name @@ -1172,43 +1173,43 @@ soc: - destination.ip - destination.port - network.community_id - - event.dataset ':endpoint:events_x_process': - soc_timestamp + - event.dataset - host.name - user.name - process.parent.name - process.name - event.action - process.working_directory - - event.dataset ':endpoint:events_x_registry': - soc_timestamp + - event.dataset - host.name - user.name - process.name - event.action - registry.path - - event.dataset ':endpoint:events_x_security': - soc_timestamp + - event.dataset - host.name - user.name - process.executable - event.action - event.outcome - - event.dataset ':system:': - soc_timestamp + - event.dataset - process.name - process.pid - user.effective.name - user.name - system.auth.sudo.command - - event.dataset - message ':opencanary:': - soc_timestamp + - event.dataset - source.ip - source.port - logdata.HOSTNAME @@ -1216,20 +1217,20 @@ soc: - logdata.PATH - logdata.USERNAME - logdata.USERAGENT - - event.dataset ':elastic_agent:': - soc_timestamp - event.dataset - message ':kismet:': - soc_timestamp + - event.dataset - device.manufacturer - client.mac - network.wireless.ssid - network.wireless.bssid - - event.dataset ':playbook:': - soc_timestamp + - event.dataset - rule.name - event.severity_label - event_data.event.dataset @@ -1241,6 +1242,7 @@ soc: - event_data.process.pid ':sigma:': - soc_timestamp + - event.dataset - rule.name - event.severity_label - event_data.event.dataset @@ -1954,6 +1956,7 @@ soc: eventFields: default: - soc_timestamp + - event.dataset - rule.name - event.severity_label - source.ip @@ -1966,6 +1969,7 @@ soc: - rule.rev ':playbook:': - soc_timestamp + - event.dataset - rule.name - event.severity_label - event_data.event.dataset @@ -1977,6 +1981,7 @@ soc: - event_data.process.pid ':sigma:': - soc_timestamp + - event.dataset - rule.name - event.severity_label - event_data.event.dataset @@ -1989,13 +1994,13 @@ soc: - event_data.process.pid ':strelka:': - soc_timestamp + - event.dataset - file.name - file.size - hash.md5 - file.source - file.mime_type - log.id.fuid - - event.dataset queryBaseFilter: tags:alert queryToggleFilters: - name: acknowledged From 5b966b83a9881a35c1a040561255af3b000739d1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 6 May 2024 09:26:52 -0400 Subject: [PATCH 17/40] change rulesRepos for airgap or not --- salt/soc/defaults.yaml | 24 +++++++++++++++++------- salt/soc/merged.map.jinja | 9 +++++++++ salt/soc/soc_soc.yaml | 24 ++++++++++++++---------- 3 files changed, 40 insertions(+), 17 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index ad154e9d1..1f96c63a8 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1274,10 +1274,15 @@ soc: rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint stateFilePath: /opt/sensoroni/fingerprints/elastalertengine.state rulesRepos: - - repo: https://github.com/Security-Onion-Solutions/securityonion-resources - license: Elastic-2.0 - folder: sigma/stable - community: true + default: + - repo: https://github.com/Security-Onion-Solutions/securityonion-resources + license: Elastic-2.0 + folder: sigma/stable + community: true + airgap: + - repo: file:///nsm/rules/detect-sigma/repos/securityonion-resources + license: DRL + community: true sigmaRulePackages: - core - emerging_threats_addon @@ -1333,9 +1338,14 @@ soc: denyRegex: '' reposFolder: /opt/sensoroni/yara/repos rulesRepos: - - repo: https://github.com/Security-Onion-Solutions/securityonion-yara - license: DRL - community: true + default: + - repo: https://github.com/Security-Onion-Solutions/securityonion-yara + license: DRL + community: true + airgap: + - repo: file:///nsm/rules/detect-yara/repos/securityonion-yara + license: DRL + community: true yaraRulesFolder: /opt/sensoroni/yara/rules stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state suricataengine: diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 222566dba..e31fabf2a 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -37,6 +37,15 @@ {% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules.default}) %} {% endif %} +{# set elastalertengine.rulesRepos and strelkaengine.rulesRepos based on airgap or not #} +{% if GLOBALS.airgap %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.airgap}) %} +{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.airgap}) %} +{% else %} +{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.default}) %} +{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.default}) %} +{% endif %} + {# remove these modules if detections is disabled #} {% if not SOCMERGED.config.server.client.detectionsEnabled %} {% do SOCMERGED.config.server.modules.pop('elastalertengine') %} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index a9d6bac08..01308f73f 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -107,11 +107,13 @@ soc: advanced: True helpLink: sigma.html rulesRepos: - description: 'Custom Git repos to pull Sigma rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.' - global: True - advanced: True - forcedType: "[]{}" - helpLink: sigma.html + default: &eerulesRepos + description: 'Custom Git repos to pull Sigma rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.' + global: True + advanced: True + forcedType: "[]{}" + helpLink: sigma.html + airgap: *eerulesRepos sigmaRulePackages: description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.' global: True @@ -205,11 +207,13 @@ soc: advanced: True helpLink: yara.html rulesRepos: - description: 'Custom Git repos to pull YARA rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.'' - global: True - advanced: True - forcedType: "[]{}" - helpLink: yara.html + default: &serulesRepos + description: 'Custom Git repos to pull YARA rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.'' + global: True + advanced: True + forcedType: "[]{}" + helpLink: yara.html + airgap: *serulesRepos suricataengine: allowRegex: description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.' From 38f74d2e9e8e17262a44d21fb515c9cc7ab73053 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 6 May 2024 11:38:30 -0400 Subject: [PATCH 18/40] change quotes --- salt/soc/soc_soc.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 01308f73f..67305d4e9 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -108,7 +108,7 @@ soc: helpLink: sigma.html rulesRepos: default: &eerulesRepos - description: 'Custom Git repos to pull Sigma rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.' + description: "Custom Git repos to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." global: True advanced: True forcedType: "[]{}" @@ -208,7 +208,7 @@ soc: helpLink: yara.html rulesRepos: default: &serulesRepos - description: 'Custom Git repos to pull YARA rules from. "license" field is required, "folder" is optional. "community" disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled.'' + description: "Custom Git repos to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled." global: True advanced: True forcedType: "[]{}" From be1758aea71f308a2aa2fd4204da80a9015b2a8e Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Mon, 6 May 2024 12:22:44 -0400 Subject: [PATCH 19/40] Fix license and folder --- salt/soc/defaults.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 1f96c63a8..5ae1497f0 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1281,7 +1281,8 @@ soc: community: true airgap: - repo: file:///nsm/rules/detect-sigma/repos/securityonion-resources - license: DRL + license: Elastic-2.0 + folder: sigma/stable community: true sigmaRulePackages: - core From 554a2035414f0ddea0e01b4f8acaac55233251d9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 6 May 2024 12:59:45 -0400 Subject: [PATCH 20/40] update airgapEnabled in map file --- salt/soc/defaults.yaml | 1 - salt/soc/merged.map.jinja | 2 ++ salt/soc/soc_soc.yaml | 5 ----- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 1f96c63a8..582f0af82 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1246,7 +1246,6 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads - airgapEnabled: false modules: cases: soc filedatastore: diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index e31fabf2a..f23d9c115 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -41,9 +41,11 @@ {% if GLOBALS.airgap %} {% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.airgap}) %} {% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.airgap}) %} +{% do SOCMERGED.config.server.update({'airgapEnabled': true}) %} {% else %} {% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.default}) %} {% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.default}) %} +{% do SOCMERGED.config.server.update({'airgapEnabled': false}) %} {% endif %} {# remove these modules if detections is disabled #} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 67305d4e9..2b1e83ec4 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -119,11 +119,6 @@ soc: global: True advanced: False helpLink: sigma.html - airgapEnabled: - description: 'This setting dynamically changes to the current status of Airgap on this system and is used during the Sigma ruleset update process.' - global: True - advanced: True - helpLink: sigma.html elastic: index: description: Comma-separated list of indices or index patterns (wildcard "*" supported) that SOC will search for records. From 5aa611302a7cdf3a1f6159758710dd7ab20141f3 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:08:01 +0000 Subject: [PATCH 21/40] Handle YARA rules for distributed deployments --- salt/allowed_states.map.jinja | 3 +++ salt/strelka/config.sls | 9 +++++++++ salt/top.sls | 3 +++ 3 files changed, 15 insertions(+) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 7fbf4ff14..109e244d7 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -65,6 +65,7 @@ 'registry', 'manager', 'nginx', + 'strelka.manager', 'soc', 'kratos', 'influxdb', @@ -91,6 +92,7 @@ 'nginx', 'telegraf', 'influxdb', + 'strelka.manager', 'soc', 'kratos', 'elasticfleet', @@ -111,6 +113,7 @@ 'nginx', 'telegraf', 'influxdb', + 'strelka.manager', 'soc', 'kratos', 'elastic-fleet-package-registry', diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index 90bba58a7..c65f9c2cb 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -29,6 +29,15 @@ strelkarulesdir: - group: 939 - makedirs: True +{%- if grains.role in ['so-sensor', 'so-heavynode'] %} +strelkasensorrules: + file.managed: + - name: /opt/so/conf/strelka/rules/compiled/rules.compiled + - source: salt://strelka/rules/compiled/rules.compiled + - user: 939 + - group: 939 +{%- endif %} + strelkareposdir: file.directory: - name: /opt/so/conf/strelka/repos diff --git a/salt/top.sls b/salt/top.sls index d4852aa4d..e4eaab786 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -87,6 +87,7 @@ base: - registry - nginx - influxdb + - strelka.manager - soc - kratos - firewall @@ -161,6 +162,7 @@ base: - registry - nginx - influxdb + - strelka.manager - soc - kratos - firewall @@ -210,6 +212,7 @@ base: - manager - nginx - influxdb + - strelka.manager - soc - kratos - sensoroni From 445fb316342089293bc45efe3c6e24e006e9413a Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:09:37 +0000 Subject: [PATCH 22/40] Add manager SLS --- salt/strelka/compile_yara.py | 67 ++++++++++++++++++++++++++++++++++++ salt/strelka/manager.sls | 45 ++++++++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 salt/strelka/compile_yara.py create mode 100644 salt/strelka/manager.sls diff --git a/salt/strelka/compile_yara.py b/salt/strelka/compile_yara.py new file mode 100644 index 000000000..dc77980d2 --- /dev/null +++ b/salt/strelka/compile_yara.py @@ -0,0 +1,67 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +import os +import yara +import glob +import json +from concurrent.futures import ThreadPoolExecutor + +def check_syntax(rule_file): + try: + # Testing if compilation throws a syntax error, don't save the result + yara.compile(filepath=rule_file) + return (True, rule_file, None) + except yara.SyntaxError as e: + # Return the error message for logging purposes + return (False, rule_file, str(e)) + +def compile_yara_rules(rules_dir): + compiled_dir = os.path.join(rules_dir, "compiled") + compiled_rules_path = [ os.path.join(compiled_dir, "rules.compiled"), "/opt/so/saltstack/default/salt/strelka/rules/compiled/rules.compiled" ] + rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) + files_to_compile = {} + removed_count = 0 + success_count = 0 + + # Use ThreadPoolExecutor to parallelize syntax checks + with ThreadPoolExecutor() as executor: + results = executor.map(check_syntax, rule_files) + + # Collect yara files and prepare for batch compilation + for success, rule_file, error_message in results: + if success: + files_to_compile[os.path.basename(rule_file)] = rule_file + success_count += 1 + else: + # Extract just the UUID from the rule file name + rule_id = os.path.splitext(os.path.basename(rule_file))[0] + log_entry = { + "event_module": "soc", + "event_dataset": "soc.detections", + "log.level": "error", + "error_message": error_message, + "error_analysis": "Syntax Error", + "detection_type": "YARA", + "rule_uuid": rule_id, + "error_type": "runtime_status" + } + with open('/opt/sensoroni/logs/detections_runtime-status_yara.log', 'a') as log_file: + json.dump(log_entry, log_file) + log_file.write('\n') # Ensure new entries start on new lines + os.remove(rule_file) + removed_count += 1 + + # Compile all remaining valid rules into a single file + if files_to_compile: + compiled_rules = yara.compile(filepaths=files_to_compile) + for path in compiled_rules_path: + compiled_rules.save(path) + print(f"All remaining rules compiled and saved into {path}") + + # Print summary of compilation results + print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") + +compile_yara_rules("/opt/sensoroni/yara/rules/") diff --git a/salt/strelka/manager.sls b/salt/strelka/manager.sls new file mode 100644 index 000000000..1c56a18fd --- /dev/null +++ b/salt/strelka/manager.sls @@ -0,0 +1,45 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls in allowed_states %} + +# Strelka config +strelkaconfdir: + file.directory: + - name: /opt/so/conf/strelka/rules/compiled/ + - user: 939 + - group: 939 + - makedirs: True + +strelkacompileyara: + file.managed: + - name: /opt/so/conf/strelka/compile_yara.py + - source: salt://strelka/compile_yara/compile_yara.py + - user: 939 + - group: 939 + - makedirs: True + +strelkarulesdir: + file.directory: + - name: /opt/so/conf/strelka/rules + - user: 939 + - group: 939 + - makedirs: True + +strelkareposdir: + file.directory: + - name: /opt/so/conf/strelka/repos + - user: 939 + - group: 939 + - makedirs: True + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} From d2fa77ae1074accd831129fc96c0a62d9a5d0cf1 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:10:41 +0000 Subject: [PATCH 23/40] Update compile script --- salt/strelka/compile_yara.py | 67 ----------------------- salt/strelka/compile_yara/compile_yara.py | 9 +-- 2 files changed, 5 insertions(+), 71 deletions(-) delete mode 100644 salt/strelka/compile_yara.py diff --git a/salt/strelka/compile_yara.py b/salt/strelka/compile_yara.py deleted file mode 100644 index dc77980d2..000000000 --- a/salt/strelka/compile_yara.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -import os -import yara -import glob -import json -from concurrent.futures import ThreadPoolExecutor - -def check_syntax(rule_file): - try: - # Testing if compilation throws a syntax error, don't save the result - yara.compile(filepath=rule_file) - return (True, rule_file, None) - except yara.SyntaxError as e: - # Return the error message for logging purposes - return (False, rule_file, str(e)) - -def compile_yara_rules(rules_dir): - compiled_dir = os.path.join(rules_dir, "compiled") - compiled_rules_path = [ os.path.join(compiled_dir, "rules.compiled"), "/opt/so/saltstack/default/salt/strelka/rules/compiled/rules.compiled" ] - rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) - files_to_compile = {} - removed_count = 0 - success_count = 0 - - # Use ThreadPoolExecutor to parallelize syntax checks - with ThreadPoolExecutor() as executor: - results = executor.map(check_syntax, rule_files) - - # Collect yara files and prepare for batch compilation - for success, rule_file, error_message in results: - if success: - files_to_compile[os.path.basename(rule_file)] = rule_file - success_count += 1 - else: - # Extract just the UUID from the rule file name - rule_id = os.path.splitext(os.path.basename(rule_file))[0] - log_entry = { - "event_module": "soc", - "event_dataset": "soc.detections", - "log.level": "error", - "error_message": error_message, - "error_analysis": "Syntax Error", - "detection_type": "YARA", - "rule_uuid": rule_id, - "error_type": "runtime_status" - } - with open('/opt/sensoroni/logs/detections_runtime-status_yara.log', 'a') as log_file: - json.dump(log_entry, log_file) - log_file.write('\n') # Ensure new entries start on new lines - os.remove(rule_file) - removed_count += 1 - - # Compile all remaining valid rules into a single file - if files_to_compile: - compiled_rules = yara.compile(filepaths=files_to_compile) - for path in compiled_rules_path: - compiled_rules.save(path) - print(f"All remaining rules compiled and saved into {path}") - - # Print summary of compilation results - print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") - -compile_yara_rules("/opt/sensoroni/yara/rules/") diff --git a/salt/strelka/compile_yara/compile_yara.py b/salt/strelka/compile_yara/compile_yara.py index ece3c6a9e..dc77980d2 100644 --- a/salt/strelka/compile_yara/compile_yara.py +++ b/salt/strelka/compile_yara/compile_yara.py @@ -20,7 +20,7 @@ def check_syntax(rule_file): def compile_yara_rules(rules_dir): compiled_dir = os.path.join(rules_dir, "compiled") - compiled_rules_path = os.path.join(compiled_dir, "rules.compiled") + compiled_rules_path = [ os.path.join(compiled_dir, "rules.compiled"), "/opt/so/saltstack/default/salt/strelka/rules/compiled/rules.compiled" ] rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True) files_to_compile = {} removed_count = 0 @@ -57,10 +57,11 @@ def compile_yara_rules(rules_dir): # Compile all remaining valid rules into a single file if files_to_compile: compiled_rules = yara.compile(filepaths=files_to_compile) - compiled_rules.save(compiled_rules_path) - print(f"All remaining rules compiled and saved into {compiled_rules_path}") + for path in compiled_rules_path: + compiled_rules.save(path) + print(f"All remaining rules compiled and saved into {path}") # Print summary of compilation results print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.") -compile_yara_rules("/opt/sensoroni/yara/rules/") \ No newline at end of file +compile_yara_rules("/opt/sensoroni/yara/rules/") From 5056ec526bb4c032b82df102e5955a445d8e6cee Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:27:38 +0000 Subject: [PATCH 24/40] Add compiled directory --- salt/strelka/rules/compiled/DO.NOT.TOUCH | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 salt/strelka/rules/compiled/DO.NOT.TOUCH diff --git a/salt/strelka/rules/compiled/DO.NOT.TOUCH b/salt/strelka/rules/compiled/DO.NOT.TOUCH new file mode 100644 index 000000000..e69de29bb From 1e48955376543806d610443f20ae1897d6e776df Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 May 2024 19:39:03 +0000 Subject: [PATCH 25/40] Restart when rules change --- salt/strelka/backend/enabled.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index a626924b1..ffb1df257 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -42,8 +42,8 @@ strelka_backend: {% endfor %} {% endif %} - restart_policy: on-failure - #- watch: - #- file: strelkarules + - watch: + - file: /opt/so/conf/strelka/rules/compiled/rules.compiled delete_so-strelka-backend_so-status.disabled: file.uncomment: From 4ebe070cd8b9e916087b0177911f8699107a7b7b Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 6 May 2024 19:03:12 -0400 Subject: [PATCH 26/40] test regexes for detections --- salt/soc/soc_soc.yaml | 7 +++++++ setup/so-functions | 7 +++++++ setup/so-setup | 3 +++ 3 files changed, 17 insertions(+) diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 4b88a5f84..c3bb525a3 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -135,6 +135,7 @@ soc: description: Duration (in milliseconds) to wait for a response from the Elasticsearch host before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int casesEnabled: description: Set to true if the SOC case management module, natively integrated with Elasticsearch, should be enabled. global: True @@ -179,10 +180,12 @@ soc: description: Duration (in milliseconds) to wait for a response from the Salt API when executing tasks known for being long running before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int relayTimeoutMs: description: Duration (in milliseconds) to wait for a response from the Salt API when executing common grid management tasks before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int strelkaengine: allowRegex: description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.' @@ -242,17 +245,21 @@ soc: description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. global: True advanced: True + forcedType: int webSocketTimeoutMs: description: Duration (in milliseconds) to wait for a response from the SOC server websocket before giving up and reconnecting. global: True advanced: True + forcedType: int tipTimeoutMs: description: Duration (in milliseconds) to show the popup tips, which typically indicate a successful operation. global: True + forcedType: int cacheExpirationMs: description: Duration (in milliseconds) of cached data within the browser, including users and settings. global: True advanced: True + forcedType: int casesEnabled: description: Set to true to enable case management in SOC. global: True diff --git a/setup/so-functions b/setup/so-functions index 7afc0a883..80ad0be6a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1257,6 +1257,13 @@ soc_pillar() { " config:"\ " server:"\ " srvKey: '$SOCSRVKEY'"\ + " modules:"\ + " elastalertengine:"\ + " allowRegex: '$ELASTALERT_ALLOW_REGEX'"\ + " strelkaengine:"\ + " allowRegex: '$STRELKA_ALLOW_REGEX'"\ + " suricataengine:"\ + " allowRegex: '$SURICATA_ALLOW_REGEX'"\ "" > "$soc_pillar_file" if [[ $telemetry -ne 0 ]]; then diff --git a/setup/so-setup b/setup/so-setup index 8a1879c58..9ce99d2d2 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -245,6 +245,9 @@ if [ -n "$test_profile" ]; then WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r WEBPASSWD2=0n10nus3r + STRELKA_ALLOW_REGEX="EquationGroup_Toolset_Apr17__ELV_.*" + ELASTALERT_ALLOW_REGEX="Security Onion" + SURICATA_ALLOW_REGEX="200033\\d" update_sudoers_for_testing fi From bee8c2c1ce15f7033a1dfcfd9127df73ecbdf87b Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 7 May 2024 13:21:59 +0000 Subject: [PATCH 27/40] Remove watch --- salt/strelka/backend/enabled.sls | 2 -- 1 file changed, 2 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index ffb1df257..1de22f404 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -42,8 +42,6 @@ strelka_backend: {% endfor %} {% endif %} - restart_policy: on-failure - - watch: - - file: /opt/so/conf/strelka/rules/compiled/rules.compiled delete_so-strelka-backend_so-status.disabled: file.uncomment: From dcc1f656ee68cea737675c6fdb7a524c28bb5cf5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 7 May 2024 10:13:51 -0400 Subject: [PATCH 28/40] predownload logstash and elastic for new searchnode and heavynode --- salt/elasticsearch/addsearchnode.sls | 29 ++++++++++++++++++++++++++++ salt/elasticsearch/download.sls | 20 +++++++++++++++++++ salt/logstash/download.sls | 20 +++++++++++++++++++ salt/manager/tools/sbin/so-minion | 4 ++++ salt/orch/container_download.sls | 10 ++++++++++ salt/top.sls | 6 +++--- 6 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 salt/elasticsearch/addsearchnode.sls create mode 100644 salt/elasticsearch/download.sls create mode 100644 salt/logstash/download.sls create mode 100644 salt/orch/container_download.sls diff --git a/salt/elasticsearch/addsearchnode.sls b/salt/elasticsearch/addsearchnode.sls new file mode 100644 index 000000000..c5b40df4a --- /dev/null +++ b/salt/elasticsearch/addsearchnode.sls @@ -0,0 +1,29 @@ +so-soc container extrahosts +seed_hosts elasticsearch.yaml +so-elasticsearch container extrahosts +so-logstash container extrahosts + + ID: elasticfleet_sbin_jinja + Function: file.recurse + Name: /usr/sbin + Result: True + Comment: Recursively updated /usr/sbin + Started: 19:56:53.468894 + Duration: 951.706 ms + Changes: + ---------- + /usr/sbin/so-elastic-fleet-artifacts-url-update: + ---------- + diff: + --- + +++ + @@ -26,7 +26,7 @@ + } + + # Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes) + -LOGSTASHNODES='{"manager": {"jpp70man1": {"ip": "10.66.166.231"}}, "searchnode": {"jpp70sea1": {"ip": "10.66.166.232"}, "jpp70sea2": {"ip": "10.66.166.142"}}}' + +LOGSTASHNODES='{"manager": {"jpp70man1": {"ip": "10.66.166.231"}}, "searchnode": {"jpp70sea1": {"ip": "10.66.166.232"}}}' + + # Initialize an array for new hosts from Fleet Nodes + declare -a NEW_LIST=() + diff --git a/salt/elasticsearch/download.sls b/salt/elasticsearch/download.sls new file mode 100644 index 000000000..f74c7059a --- /dev/null +++ b/salt/elasticsearch/download.sls @@ -0,0 +1,20 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +so-elasticsearch_image: + docker_image.present: + - name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ GLOBALS.so_version }} + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} diff --git a/salt/logstash/download.sls b/salt/logstash/download.sls new file mode 100644 index 000000000..cf1c6176c --- /dev/null +++ b/salt/logstash/download.sls @@ -0,0 +1,20 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +so-logstash_image: + docker_image.present: + - name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }} + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 79eea59fe..e0e892c3d 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -604,6 +604,10 @@ function updateMineAndApplyStates() { #checkMine "network.ip_addrs" # calls so-common and set_minionid sets MINIONID to local minion id set_minionid + # if this is a searchnode or heavynode, start downloading logstash and elasticsearch containers while the manager prepares for the new node + if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then + salt-run state.orch orch.container_download pillar="{'setup': {'newnode': $MINION_ID }}" > /dev/null 2>&1 & + fi # $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" > /dev/null 2>&1 & } diff --git a/salt/orch/container_download.sls b/salt/orch/container_download.sls new file mode 100644 index 000000000..c4aedaaba --- /dev/null +++ b/salt/orch/container_download.sls @@ -0,0 +1,10 @@ +{% set NEWNODE = salt['pillar.get']('setup:newnode') %} + +{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} +{{NEWNODE}}_download_logstash_elasticsearch: + salt.state: + - tgt: {{ NEWNODE }} + - sls: + - logstash.download + - elasticsearch.download +{% endif %} diff --git a/salt/top.sls b/salt/top.sls index d4852aa4d..2510356c4 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -143,13 +143,13 @@ base: '*_searchnode and G@saltversion:{{saltversion}}': - match: compound + - firewall - ssl + - elasticsearch + - logstash - sensoroni - telegraf - nginx - - firewall - - elasticsearch - - logstash - elasticfleet.install_agent_grid - stig From 2e70d157e27b7b2b8f0d5dadfadeb351f78cb43e Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 7 May 2024 11:13:51 -0400 Subject: [PATCH 29/40] Add ref --- salt/elasticfleet/defaults.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 2af7e7532..2d9ab97a1 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -37,6 +37,7 @@ elasticfleet: - azure - barracuda - carbonblack_edr + - cef - checkpoint - cisco_asa - cisco_duo @@ -122,4 +123,4 @@ elasticfleet: base_url: http://localhost:2501 poll_interval: 1m api_key: - enabled_nodes: [] \ No newline at end of file + enabled_nodes: [] From 1da88b70ac3d7390630d7363b2a09213f5443e04 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Mon, 6 May 2024 09:56:24 -0600 Subject: [PATCH 30/40] Specify Error Retry Wait and Error Limit for All Detection Engines If a sync errors out, the engine will wait `communityRulesImportErrorSeconds` seconds instead of the usual `communityRulesImportFrequencySeconds` seconds wait. If `failAfterConsecutiveErrorCount` errors happen in a row when syncing detections to ElasticSearch then the sync is considered a failure and will give up and try again later. This assumes ElasticSearch is the source of the errors and backs of in hopes it'll be able to fix itself. --- salt/soc/defaults.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index d76a0a0e4..04a66dc94 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1279,6 +1279,8 @@ soc: - securityonion-resources+critical - securityonion-resources+high communityRulesImportFrequencySeconds: 28800 + communityRulesImportErrorSeconds: 300 + failAfterConsecutiveErrorCount: 10 denyRegex: '' elastAlertRulesFolder: /opt/sensoroni/elastalert reposFolder: /opt/sensoroni/sigma/repos @@ -1346,6 +1348,8 @@ soc: - securityonion-yara autoUpdateEnabled: true communityRulesImportFrequencySeconds: 28800 + communityRulesImportErrorSeconds: 300 + failAfterConsecutiveErrorCount: 10 compileYaraPythonScriptPath: /opt/sensoroni/yara/compile_yara.py denyRegex: '' reposFolder: /opt/sensoroni/yara/repos @@ -1364,6 +1368,8 @@ soc: allowRegex: '' autoUpdateEnabled: true communityRulesImportFrequencySeconds: 28800 + communityRulesImportErrorSeconds: 300 + failAfterConsecutiveErrorCount: 10 communityRulesFile: /nsm/rules/suricata/emerging-all.rules denyRegex: '' rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint From 8364b2a7308931d3422792bb069e78be36db4765 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 7 May 2024 14:30:52 -0400 Subject: [PATCH 31/40] update for testing --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 9ce99d2d2..b76f9bb98 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -247,7 +247,7 @@ if [ -n "$test_profile" ]; then WEBPASSWD2=0n10nus3r STRELKA_ALLOW_REGEX="EquationGroup_Toolset_Apr17__ELV_.*" ELASTALERT_ALLOW_REGEX="Security Onion" - SURICATA_ALLOW_REGEX="200033\\d" + SURICATA_ALLOW_REGEX="(200033\\d|2100538|2102466)" update_sudoers_for_testing fi From 2eee61778842c136466451ea3da7ea696764b109 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 7 May 2024 17:21:01 -0400 Subject: [PATCH 32/40] Update soc_idstools.yaml --- salt/idstools/soc_idstools.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index ce8b56569..d1cca0028 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -16,6 +16,8 @@ idstools: urls: description: This is a list of additional rule download locations. global: True + multiline: True + forcedType: "[]string" helpLink: rules.html sids: disabled: From 326c59bb264cb045a88acd50560e007447769c5d Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 8 May 2024 08:42:38 -0400 Subject: [PATCH 33/40] Update soc_idstools.yaml --- salt/idstools/soc_idstools.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/idstools/soc_idstools.yaml b/salt/idstools/soc_idstools.yaml index d1cca0028..698a7a1fc 100644 --- a/salt/idstools/soc_idstools.yaml +++ b/salt/idstools/soc_idstools.yaml @@ -14,10 +14,11 @@ idstools: regex: ETPRO\b|ETOPEN\b helpLink: rules.html urls: - description: This is a list of additional rule download locations. + description: This is a list of additional rule download locations. This feature is currently disabled. global: True multiline: True forcedType: "[]string" + readonly: True helpLink: rules.html sids: disabled: From 6d2ecce9b741316b56019524f4699638b22fc4a2 Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Wed, 8 May 2024 08:43:37 -0400 Subject: [PATCH 34/40] remove old yara airgap code --- salt/manager/tools/sbin/soup | 6 ------ 1 file changed, 6 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fa3c3b5ee..285882748 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -734,12 +734,6 @@ unmount_update() { update_airgap_rules() { # Copy the rules over to update them for airgap. rsync -av $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/ - rsync -av $UPDATE_DIR/agrules/yara/* /nsm/rules/yara/ - if [ -d /nsm/repo/rules/sigma ]; then - rsync -av $UPDATE_DIR/agrules/sigma/* /nsm/repo/rules/sigma/ - fi - - # SOC Detections Airgap rsync -av $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/ rsync -av $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/ } From 5dc098f0fc6e71a9e3b2f9f953fa48669a337eeb Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 8 May 2024 08:54:24 -0400 Subject: [PATCH 35/40] remove test file --- salt/elasticsearch/addsearchnode.sls | 29 ---------------------------- 1 file changed, 29 deletions(-) delete mode 100644 salt/elasticsearch/addsearchnode.sls diff --git a/salt/elasticsearch/addsearchnode.sls b/salt/elasticsearch/addsearchnode.sls deleted file mode 100644 index c5b40df4a..000000000 --- a/salt/elasticsearch/addsearchnode.sls +++ /dev/null @@ -1,29 +0,0 @@ -so-soc container extrahosts -seed_hosts elasticsearch.yaml -so-elasticsearch container extrahosts -so-logstash container extrahosts - - ID: elasticfleet_sbin_jinja - Function: file.recurse - Name: /usr/sbin - Result: True - Comment: Recursively updated /usr/sbin - Started: 19:56:53.468894 - Duration: 951.706 ms - Changes: - ---------- - /usr/sbin/so-elastic-fleet-artifacts-url-update: - ---------- - diff: - --- - +++ - @@ -26,7 +26,7 @@ - } - - # Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes) - -LOGSTASHNODES='{"manager": {"jpp70man1": {"ip": "10.66.166.231"}}, "searchnode": {"jpp70sea1": {"ip": "10.66.166.232"}, "jpp70sea2": {"ip": "10.66.166.142"}}}' - +LOGSTASHNODES='{"manager": {"jpp70man1": {"ip": "10.66.166.231"}}, "searchnode": {"jpp70sea1": {"ip": "10.66.166.232"}}}' - - # Initialize an array for new hosts from Fleet Nodes - declare -a NEW_LIST=() - From 0d2e5e0065435837c0572c1be57d22dbfa9771f0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 8 May 2024 09:50:01 -0400 Subject: [PATCH 36/40] need repo and docker first --- salt/orch/container_download.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/orch/container_download.sls b/salt/orch/container_download.sls index c4aedaaba..90fb4f6aa 100644 --- a/salt/orch/container_download.sls +++ b/salt/orch/container_download.sls @@ -5,6 +5,8 @@ salt.state: - tgt: {{ NEWNODE }} - sls: + - repo.client + - docker - logstash.download - elasticsearch.download {% endif %} From 1862deaf5e6706193586770f157b34b9cdbf519b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 8 May 2024 10:14:08 -0400 Subject: [PATCH 37/40] add copyright --- salt/orch/container_download.sls | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/orch/container_download.sls b/salt/orch/container_download.sls index 90fb4f6aa..aa8e19587 100644 --- a/salt/orch/container_download.sls +++ b/salt/orch/container_download.sls @@ -1,3 +1,8 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + {% set NEWNODE = salt['pillar.get']('setup:newnode') %} {% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %} From 5a5a1e86acf1a4aee29a6fd01ccc9c1651e2474c Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 8 May 2024 13:26:36 -0400 Subject: [PATCH 38/40] FIX: Adjust so-import-pcap so that suricata works when it is pcapengine #12969 --- salt/common/tools/sbin_jinja/so-import-pcap | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin_jinja/so-import-pcap b/salt/common/tools/sbin_jinja/so-import-pcap index b8a90421f..30d5d4fc4 100755 --- a/salt/common/tools/sbin_jinja/so-import-pcap +++ b/salt/common/tools/sbin_jinja/so-import-pcap @@ -89,6 +89,7 @@ function suricata() { -v ${LOG_PATH}:/var/log/suricata/:rw \ -v ${NSM_PATH}/:/nsm/:rw \ -v "$PCAP:/input.pcap:ro" \ + -v /dev/null:/nsm/suripcap:rw \ -v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \ {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \ --runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1 From 77e21170515b2f7e433ad7954414d9d579f64166 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 8 May 2024 18:47:52 +0000 Subject: [PATCH 39/40] Account for 0 active rules and change watch --- salt/strelka/backend/enabled.sls | 2 ++ salt/strelka/config.sls | 8 +++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index 1de22f404..863115eda 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -42,6 +42,8 @@ strelka_backend: {% endfor %} {% endif %} - restart_policy: on-failure + - watch: + - file: /opt/so/conf/strelka/rules/compiled/* delete_so-strelka-backend_so-status.disabled: file.uncomment: diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index c65f9c2cb..4d3686c41 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -31,11 +31,13 @@ strelkarulesdir: {%- if grains.role in ['so-sensor', 'so-heavynode'] %} strelkasensorrules: - file.managed: - - name: /opt/so/conf/strelka/rules/compiled/rules.compiled - - source: salt://strelka/rules/compiled/rules.compiled + file.recurse: + - name: /opt/so/conf/strelka/rules/compiled/ + - source: salt://strelka/rules/compiled/ - user: 939 - group: 939 + - file_mode: 755 + - clean: True {%- endif %} strelkareposdir: From 0567b935340800e098dd088f609438e88a2a6d5d Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 8 May 2024 15:39:59 -0400 Subject: [PATCH 40/40] Remove mode --- salt/strelka/config.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index 4d3686c41..cd8fb2667 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -36,7 +36,6 @@ strelkasensorrules: - source: salt://strelka/rules/compiled/ - user: 939 - group: 939 - - file_mode: 755 - clean: True {%- endif %}