mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
merge 2.4/dev
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
This commit is contained in:
@@ -65,6 +65,7 @@
|
|||||||
'registry',
|
'registry',
|
||||||
'manager',
|
'manager',
|
||||||
'nginx',
|
'nginx',
|
||||||
|
'strelka.manager',
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
@@ -91,6 +92,7 @@
|
|||||||
'nginx',
|
'nginx',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
|
'strelka.manager',
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'elasticfleet',
|
'elasticfleet',
|
||||||
@@ -112,6 +114,7 @@
|
|||||||
'nginx',
|
'nginx',
|
||||||
'telegraf',
|
'telegraf',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
|
'strelka.manager',
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'elastic-fleet-package-registry',
|
'elastic-fleet-package-registry',
|
||||||
|
|||||||
@@ -89,6 +89,7 @@ function suricata() {
|
|||||||
-v ${LOG_PATH}:/var/log/suricata/:rw \
|
-v ${LOG_PATH}:/var/log/suricata/:rw \
|
||||||
-v ${NSM_PATH}/:/nsm/:rw \
|
-v ${NSM_PATH}/:/nsm/:rw \
|
||||||
-v "$PCAP:/input.pcap:ro" \
|
-v "$PCAP:/input.pcap:ro" \
|
||||||
|
-v /dev/null:/nsm/suripcap:rw \
|
||||||
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
|
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
|
||||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
|
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
|
||||||
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
|
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
|
||||||
|
|||||||
@@ -37,6 +37,7 @@ elasticfleet:
|
|||||||
- azure
|
- azure
|
||||||
- barracuda
|
- barracuda
|
||||||
- carbonblack_edr
|
- carbonblack_edr
|
||||||
|
- cef
|
||||||
- checkpoint
|
- checkpoint
|
||||||
- cisco_asa
|
- cisco_asa
|
||||||
- cisco_duo
|
- cisco_duo
|
||||||
@@ -122,4 +123,4 @@ elasticfleet:
|
|||||||
base_url: http://localhost:2501
|
base_url: http://localhost:2501
|
||||||
poll_interval: 1m
|
poll_interval: 1m
|
||||||
api_key:
|
api_key:
|
||||||
enabled_nodes: []
|
enabled_nodes: []
|
||||||
|
|||||||
20
salt/elasticsearch/download.sls
Normal file
20
salt/elasticsearch/download.sls
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
|
so-elasticsearch_image:
|
||||||
|
docker_image.present:
|
||||||
|
- name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elasticsearch:{{ GLOBALS.so_version }}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -14,8 +14,11 @@ idstools:
|
|||||||
regex: ETPRO\b|ETOPEN\b
|
regex: ETPRO\b|ETOPEN\b
|
||||||
helpLink: rules.html
|
helpLink: rules.html
|
||||||
urls:
|
urls:
|
||||||
description: This is a list of additional rule download locations.
|
description: This is a list of additional rule download locations. This feature is currently disabled.
|
||||||
global: True
|
global: True
|
||||||
|
multiline: True
|
||||||
|
forcedType: "[]string"
|
||||||
|
readonly: True
|
||||||
helpLink: rules.html
|
helpLink: rules.html
|
||||||
sids:
|
sids:
|
||||||
disabled:
|
disabled:
|
||||||
|
|||||||
20
salt/logstash/download.sls
Normal file
20
salt/logstash/download.sls
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
|
so-logstash_image:
|
||||||
|
docker_image.present:
|
||||||
|
- name: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -426,10 +426,6 @@ function checkMine() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function updateMine() {
|
|
||||||
retry 20 1 "salt '$MINION_ID' mine.update" True
|
|
||||||
}
|
|
||||||
|
|
||||||
function createEVAL() {
|
function createEVAL() {
|
||||||
is_pcaplimit=true
|
is_pcaplimit=true
|
||||||
pcapspace
|
pcapspace
|
||||||
@@ -604,24 +600,20 @@ function addMinion() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function updateMineAndApplyStates() {
|
function updateMineAndApplyStates() {
|
||||||
# tell the minion to populate the mine with data from mine_functions which is populated during setup
|
|
||||||
# this only needs to happen on non managers since they handle this during setup
|
#checkMine "network.ip_addrs"
|
||||||
# and they need to wait for ca creation to update the mine
|
# calls so-common and set_minionid sets MINIONID to local minion id
|
||||||
updateMine
|
set_minionid
|
||||||
checkMine "network.ip_addrs"
|
# if this is a searchnode or heavynode, start downloading logstash and elasticsearch containers while the manager prepares for the new node
|
||||||
# apply the elasticsearch state to the manager if a new searchnode was added
|
|
||||||
if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then
|
if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then
|
||||||
# calls so-common and set_minionid sets MINIONID to local minion id
|
salt-run state.orch orch.container_download pillar="{'setup': {'newnode': $MINION_ID }}" > /dev/null 2>&1 &
|
||||||
set_minionid
|
|
||||||
salt $MINIONID state.apply elasticsearch queue=True --async
|
|
||||||
salt $MINIONID state.apply soc queue=True --async
|
|
||||||
fi
|
fi
|
||||||
if [[ "$NODETYPE" == "RECEIVER" ]]; then
|
if [[ "$NODETYPE" == "RECEIVER" ]]; then
|
||||||
# Setup nodeid for Kafka
|
# Setup nodeid for Kafka
|
||||||
salt-call state.apply kafka.nodes queue=True
|
salt-call state.apply kafka.nodes queue=True
|
||||||
fi
|
fi
|
||||||
# run this async so the cli doesn't wait for a return
|
# $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured
|
||||||
salt "$MINION_ID" state.highstate --async queue=True
|
salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" > /dev/null 2>&1 &
|
||||||
}
|
}
|
||||||
|
|
||||||
function setupMinionFiles() {
|
function setupMinionFiles() {
|
||||||
|
|||||||
@@ -750,12 +750,6 @@ unmount_update() {
|
|||||||
update_airgap_rules() {
|
update_airgap_rules() {
|
||||||
# Copy the rules over to update them for airgap.
|
# Copy the rules over to update them for airgap.
|
||||||
rsync -av $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/
|
rsync -av $UPDATE_DIR/agrules/suricata/* /nsm/rules/suricata/
|
||||||
rsync -av $UPDATE_DIR/agrules/yara/* /nsm/rules/yara/
|
|
||||||
if [ -d /nsm/repo/rules/sigma ]; then
|
|
||||||
rsync -av $UPDATE_DIR/agrules/sigma/* /nsm/repo/rules/sigma/
|
|
||||||
fi
|
|
||||||
|
|
||||||
# SOC Detections Airgap
|
|
||||||
rsync -av $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
|
rsync -av $UPDATE_DIR/agrules/detect-sigma/* /nsm/rules/detect-sigma/
|
||||||
rsync -av $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
|
rsync -av $UPDATE_DIR/agrules/detect-yara/* /nsm/rules/detect-yara/
|
||||||
}
|
}
|
||||||
|
|||||||
17
salt/orch/container_download.sls
Normal file
17
salt/orch/container_download.sls
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% set NEWNODE = salt['pillar.get']('setup:newnode') %}
|
||||||
|
|
||||||
|
{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %}
|
||||||
|
{{NEWNODE}}_download_logstash_elasticsearch:
|
||||||
|
salt.state:
|
||||||
|
- tgt: {{ NEWNODE }}
|
||||||
|
- sls:
|
||||||
|
- repo.client
|
||||||
|
- docker
|
||||||
|
- logstash.download
|
||||||
|
- elasticsearch.download
|
||||||
|
{% endif %}
|
||||||
32
salt/orch/deploy_newnode.sls
Normal file
32
salt/orch/deploy_newnode.sls
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
{% set MANAGER = salt['pillar.get']('setup:manager') %}
|
||||||
|
{% set NEWNODE = salt['pillar.get']('setup:newnode') %}
|
||||||
|
|
||||||
|
# tell the minion to populate the mine with data from mine_functions which is populated during setup
|
||||||
|
# this only needs to happen on non managers since they handle this during setup
|
||||||
|
# and they need to wait for ca creation to update the mine
|
||||||
|
{{NEWNODE}}_update_mine:
|
||||||
|
salt.function:
|
||||||
|
- name: mine.update
|
||||||
|
- tgt: {{ NEWNODE }}
|
||||||
|
- retry:
|
||||||
|
attempts: 36
|
||||||
|
interval: 5
|
||||||
|
|
||||||
|
# we need to prepare the manager for a new searchnode or heavynode
|
||||||
|
{% if NEWNODE.split('_')|last in ['searchnode', 'heavynode'] %}
|
||||||
|
manager_run_es_soc:
|
||||||
|
salt.state:
|
||||||
|
- tgt: {{ MANAGER }}
|
||||||
|
- sls:
|
||||||
|
- elasticsearch
|
||||||
|
- soc
|
||||||
|
- queue: True
|
||||||
|
- require:
|
||||||
|
- salt: {{NEWNODE}}_update_mine
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{{NEWNODE}}_run_highstate:
|
||||||
|
salt.state:
|
||||||
|
- tgt: {{ NEWNODE }}
|
||||||
|
- highstate: True
|
||||||
|
- queue: True
|
||||||
@@ -7,6 +7,8 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
# will need this in future versions of this engine
|
# will need this in future versions of this engine
|
||||||
@@ -14,70 +16,18 @@ log = logging.getLogger(__name__)
|
|||||||
#local = salt.client.LocalClient()
|
#local = salt.client.LocalClient()
|
||||||
|
|
||||||
def start(fpa, interval=10):
|
def start(fpa, interval=10):
|
||||||
log.info("pillarWatch engine: ##### checking watched pillars for changes #####")
|
currentPillarValue = ''
|
||||||
|
previousPillarValue = ''
|
||||||
|
|
||||||
# try to open the file that stores the previous runs data
|
'''
|
||||||
# if the file doesn't exist, create a blank one
|
def processJinjaFile():
|
||||||
try:
|
log.info("pillarWatch engine: processing jinja file")
|
||||||
# maybe change this location
|
log.info(pillarFile)
|
||||||
dataFile = open("/opt/so/state/pillarWatch.txt", "r+")
|
log.info(__salt__['jinja.load_map'](pillarFile, 'GLOBALMERGED'))
|
||||||
except FileNotFoundError:
|
sys.exit(0)
|
||||||
log.warn("pillarWatch engine: No previous pillarWatch data saved")
|
'''
|
||||||
dataFile = open("/opt/so/state/pillarWatch.txt", "w+")
|
|
||||||
|
|
||||||
df = dataFile.read()
|
def checkChangesTakeAction():
|
||||||
for i in fpa:
|
|
||||||
log.trace("pillarWatch engine: files: %s" % i['files'])
|
|
||||||
log.trace("pillarWatch engine: pillar: %s" % i['pillar'])
|
|
||||||
log.trace("pillarWatch engine: actions: %s" % i['actions'])
|
|
||||||
pillarFiles = i['files']
|
|
||||||
pillar = i['pillar']
|
|
||||||
actions = i['actions']
|
|
||||||
# these are the keys that we are going to look for as we traverse the pillarFiles
|
|
||||||
patterns = pillar.split(".")
|
|
||||||
# check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file
|
|
||||||
for pillarFile in reversed(pillarFiles):
|
|
||||||
currentPillarValue = ''
|
|
||||||
previousPillarValue = ''
|
|
||||||
# this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later
|
|
||||||
patternFound = 0
|
|
||||||
with open(pillarFile, "r") as file:
|
|
||||||
log.debug("pillarWatch engine: checking file: %s" % pillarFile)
|
|
||||||
for line in file:
|
|
||||||
log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file))
|
|
||||||
log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound])
|
|
||||||
# since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar
|
|
||||||
# ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look
|
|
||||||
# for pipeline. once pipeline is found, it will record the value
|
|
||||||
if re.search('^' + patterns[patternFound] + ':', line.strip()):
|
|
||||||
# strip the newline because it makes the logs u-g-l-y
|
|
||||||
log.debug("pillarWatch engine: found: %s" % line.strip('\n'))
|
|
||||||
patternFound += 1
|
|
||||||
# we have found the final key in the pillar that we are looking for, get the previous value then the current value
|
|
||||||
if patternFound == len(patterns):
|
|
||||||
# at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars
|
|
||||||
previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE'
|
|
||||||
# check the contents of the dataFile that stores the previousPillarValue(s).
|
|
||||||
# find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value
|
|
||||||
for l in df.splitlines():
|
|
||||||
if pillar in l:
|
|
||||||
previousPillarValue = str(l.split(":")[1].strip())
|
|
||||||
currentPillarValue = str(line.split(":")[1]).strip()
|
|
||||||
log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue))
|
|
||||||
log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue))
|
|
||||||
# if the pillar we are checking for changes has been defined in the dataFile,
|
|
||||||
# replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it.
|
|
||||||
if pillar in df:
|
|
||||||
df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df)
|
|
||||||
else:
|
|
||||||
df += pillar + ': ' + currentPillarValue + '\n'
|
|
||||||
log.trace("pillarWatch engine: df: %s" % df)
|
|
||||||
# we have found the pillar so we dont need to loop through the file anymore
|
|
||||||
break
|
|
||||||
# if key and value was found in the first file, then we don't want to look in
|
|
||||||
# any more files since we use the first file as the source of truth.
|
|
||||||
if patternFound == len(patterns):
|
|
||||||
break
|
|
||||||
# if the pillar value changed, then we find what actions we should take
|
# if the pillar value changed, then we find what actions we should take
|
||||||
log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue")
|
log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue")
|
||||||
if currentPillarValue != previousPillarValue:
|
if currentPillarValue != previousPillarValue:
|
||||||
@@ -119,6 +69,84 @@ def start(fpa, interval=10):
|
|||||||
actionReturn = __salt__[saltModule](**args)
|
actionReturn = __salt__[saltModule](**args)
|
||||||
log.info("pillarWatch engine: actionReturn: %s" % actionReturn)
|
log.info("pillarWatch engine: actionReturn: %s" % actionReturn)
|
||||||
|
|
||||||
|
|
||||||
|
log.debug("pillarWatch engine: ##### checking watched pillars for changes #####")
|
||||||
|
|
||||||
|
# try to open the file that stores the previous runs data
|
||||||
|
# if the file doesn't exist, create a blank one
|
||||||
|
try:
|
||||||
|
# maybe change this location
|
||||||
|
dataFile = open("/opt/so/state/pillarWatch.txt", "r+")
|
||||||
|
except FileNotFoundError:
|
||||||
|
log.warn("pillarWatch engine: No previous pillarWatch data saved")
|
||||||
|
dataFile = open("/opt/so/state/pillarWatch.txt", "w+")
|
||||||
|
|
||||||
|
df = dataFile.read()
|
||||||
|
for i in fpa:
|
||||||
|
log.trace("pillarWatch engine: files: %s" % i['files'])
|
||||||
|
log.trace("pillarWatch engine: pillar: %s" % i['pillar'])
|
||||||
|
log.trace("pillarWatch engine: actions: %s" % i['actions'])
|
||||||
|
pillarFiles = i['files']
|
||||||
|
pillar = i['pillar']
|
||||||
|
default = str(i['default'])
|
||||||
|
actions = i['actions']
|
||||||
|
# these are the keys that we are going to look for as we traverse the pillarFiles
|
||||||
|
patterns = pillar.split(".")
|
||||||
|
# check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file
|
||||||
|
for pillarFile in reversed(pillarFiles):
|
||||||
|
currentPillarValue = default
|
||||||
|
previousPillarValue = ''
|
||||||
|
'''
|
||||||
|
if 'jinja' in os.path.splitext(pillarFile)[1]:
|
||||||
|
processJinjaFile()
|
||||||
|
'''
|
||||||
|
# this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later
|
||||||
|
patternFound = 0
|
||||||
|
with open(pillarFile, "r") as file:
|
||||||
|
log.debug("pillarWatch engine: checking file: %s" % pillarFile)
|
||||||
|
for line in file:
|
||||||
|
log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file))
|
||||||
|
log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound])
|
||||||
|
# since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar
|
||||||
|
# ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look
|
||||||
|
# for pipeline. once pipeline is found, it will record the value
|
||||||
|
if re.search('^' + patterns[patternFound] + ':', line.strip()):
|
||||||
|
# strip the newline because it makes the logs u-g-l-y
|
||||||
|
log.debug("pillarWatch engine: found: %s" % line.strip('\n'))
|
||||||
|
patternFound += 1
|
||||||
|
# we have found the final key in the pillar that we are looking for, get the previous value and current value
|
||||||
|
if patternFound == len(patterns):
|
||||||
|
currentPillarValue = str(line.split(":")[1]).strip()
|
||||||
|
# we have found the pillar so we dont need to loop through the file anymore
|
||||||
|
break
|
||||||
|
|
||||||
|
# if key and value was found in the first file, then we don't want to look in
|
||||||
|
# any more files since we use the first file as the source of truth.
|
||||||
|
if patternFound == len(patterns):
|
||||||
|
break
|
||||||
|
|
||||||
|
# at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars
|
||||||
|
previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE'
|
||||||
|
# check the contents of the dataFile that stores the previousPillarValue(s).
|
||||||
|
# find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value
|
||||||
|
for l in df.splitlines():
|
||||||
|
if pillar in l:
|
||||||
|
previousPillarValue = str(l.split(":")[1].strip())
|
||||||
|
log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue))
|
||||||
|
log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue))
|
||||||
|
# if the pillar we are checking for changes has been defined in the dataFile,
|
||||||
|
# replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it.
|
||||||
|
if pillar in df:
|
||||||
|
df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df)
|
||||||
|
else:
|
||||||
|
df += pillar + ': ' + currentPillarValue + '\n'
|
||||||
|
log.trace("pillarWatch engine: df: %s" % df)
|
||||||
|
if previousPillarValue != "PREVIOUSPILLARVALUENOTSAVEDINDATAFILE":
|
||||||
|
checkChangesTakeAction()
|
||||||
|
else:
|
||||||
|
log.info("pillarWatch engine: %s was not previously tracked. not tacking action." % pillar)
|
||||||
|
|
||||||
|
|
||||||
dataFile.seek(0)
|
dataFile.seek(0)
|
||||||
dataFile.write(df)
|
dataFile.write(df)
|
||||||
dataFile.truncate()
|
dataFile.truncate()
|
||||||
|
|||||||
@@ -6,35 +6,11 @@ engines:
|
|||||||
interval: 60
|
interval: 60
|
||||||
- pillarWatch:
|
- pillarWatch:
|
||||||
fpa:
|
fpa:
|
||||||
# these files will be checked in reversed order to replicate the same hierarchy as the pillar top file
|
|
||||||
- files:
|
|
||||||
- /opt/so/saltstack/local/pillar/global/soc_global.sls
|
|
||||||
- /opt/so/saltstack/local/pillar/global/adv_global.sls
|
|
||||||
pillar: global.pipeline
|
|
||||||
actions:
|
|
||||||
from:
|
|
||||||
'*':
|
|
||||||
to:
|
|
||||||
KAFKA:
|
|
||||||
- cmd.run:
|
|
||||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
|
|
||||||
# - cmd.run:
|
|
||||||
# cmd: salt-call saltutil.kill_all_jobs
|
|
||||||
# - cmd.run:
|
|
||||||
# cmd: salt-call state.highstate &
|
|
||||||
KAFKA:
|
|
||||||
to:
|
|
||||||
'*':
|
|
||||||
- cmd.run:
|
|
||||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
|
|
||||||
# - cmd.run:
|
|
||||||
# cmd: salt-call saltutil.kill_all_jobs
|
|
||||||
# - cmd.run:
|
|
||||||
# cmd: salt-call state.highstate &
|
|
||||||
- files:
|
- files:
|
||||||
- /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
|
- /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
|
||||||
- /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
|
- /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
|
||||||
pillar: idstools.config.ruleset
|
pillar: idstools.config.ruleset
|
||||||
|
default: ETOPEN
|
||||||
actions:
|
actions:
|
||||||
from:
|
from:
|
||||||
'*':
|
'*':
|
||||||
@@ -42,4 +18,30 @@ engines:
|
|||||||
'*':
|
'*':
|
||||||
- cmd.run:
|
- cmd.run:
|
||||||
cmd: /usr/sbin/so-rule-update
|
cmd: /usr/sbin/so-rule-update
|
||||||
|
- files:
|
||||||
|
- /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
|
||||||
|
- /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
|
||||||
|
pillar: idstools.config.oinkcode
|
||||||
|
default: ''
|
||||||
|
actions:
|
||||||
|
from:
|
||||||
|
'*':
|
||||||
|
to:
|
||||||
|
'*':
|
||||||
|
- cmd.run:
|
||||||
|
cmd: /usr/sbin/so-rule-update
|
||||||
|
- files:
|
||||||
|
- /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||||||
|
- /opt/so/saltstack/local/pillar/global/adv_global.sls
|
||||||
|
pillar: global.pipeline
|
||||||
|
default: REDIS
|
||||||
|
actions:
|
||||||
|
from:
|
||||||
|
'*':
|
||||||
|
to:
|
||||||
|
'*':
|
||||||
|
- cmd.run:
|
||||||
|
cmd: salt-call saltutil.kill_all_jobs
|
||||||
|
- cmd.run:
|
||||||
|
cmd: salt-call state.highstate
|
||||||
interval: 10
|
interval: 10
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ salt_master_service:
|
|||||||
- enable: True
|
- enable: True
|
||||||
- watch:
|
- watch:
|
||||||
- file: checkmine_engine
|
- file: checkmine_engine
|
||||||
|
- file: pillarWatch_engine
|
||||||
- file: engines_config
|
- file: engines_config
|
||||||
- order: last
|
- order: last
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -37,6 +37,17 @@
|
|||||||
{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules.default}) %}
|
{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.autoEnabledSigmaRules.default}) %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
{# set elastalertengine.rulesRepos and strelkaengine.rulesRepos based on airgap or not #}
|
||||||
|
{% if GLOBALS.airgap %}
|
||||||
|
{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.airgap}) %}
|
||||||
|
{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.airgap}) %}
|
||||||
|
{% do SOCMERGED.config.server.update({'airgapEnabled': true}) %}
|
||||||
|
{% else %}
|
||||||
|
{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.default}) %}
|
||||||
|
{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.default}) %}
|
||||||
|
{% do SOCMERGED.config.server.update({'airgapEnabled': false}) %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{# remove these modules if detections is disabled #}
|
{# remove these modules if detections is disabled #}
|
||||||
{% if not SOCMERGED.config.server.client.detectionsEnabled %}
|
{% if not SOCMERGED.config.server.client.detectionsEnabled %}
|
||||||
{% do SOCMERGED.config.server.modules.pop('elastalertengine') %}
|
{% do SOCMERGED.config.server.modules.pop('elastalertengine') %}
|
||||||
|
|||||||
@@ -107,21 +107,18 @@ soc:
|
|||||||
advanced: True
|
advanced: True
|
||||||
helpLink: sigma.html
|
helpLink: sigma.html
|
||||||
rulesRepos:
|
rulesRepos:
|
||||||
description: 'Custom Git repos to pull Sigma rules from. License field is required, folder is optional.'
|
default: &eerulesRepos
|
||||||
global: True
|
description: "Custom Git repos to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled."
|
||||||
advanced: True
|
global: True
|
||||||
forcedType: "[]{}"
|
advanced: True
|
||||||
helpLink: sigma.html
|
forcedType: "[]{}"
|
||||||
|
helpLink: sigma.html
|
||||||
|
airgap: *eerulesRepos
|
||||||
sigmaRulePackages:
|
sigmaRulePackages:
|
||||||
description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.'
|
description: 'Defines the Sigma Community Ruleset you want to run. One of these (core | core+ | core++ | all ) as well as an optional Add-on (emerging_threats_addon). Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 8 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update. WARNING! Changing the ruleset will remove all existing Sigma rules of the previous ruleset and their associated overrides. This removal cannot be undone.'
|
||||||
global: True
|
global: True
|
||||||
advanced: False
|
advanced: False
|
||||||
helpLink: sigma.html
|
helpLink: sigma.html
|
||||||
autoUpdateEnabled:
|
|
||||||
description: 'Set to true to enable automatic Internet-connected updates of the Sigma Community Ruleset. If this is an Airgap system, this setting will be overridden and set to false.'
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
helpLink: sigma.html
|
|
||||||
elastic:
|
elastic:
|
||||||
index:
|
index:
|
||||||
description: Comma-separated list of indices or index patterns (wildcard "*" supported) that SOC will search for records.
|
description: Comma-separated list of indices or index patterns (wildcard "*" supported) that SOC will search for records.
|
||||||
@@ -135,6 +132,7 @@ soc:
|
|||||||
description: Duration (in milliseconds) to wait for a response from the Elasticsearch host before giving up and showing an error on the SOC UI.
|
description: Duration (in milliseconds) to wait for a response from the Elasticsearch host before giving up and showing an error on the SOC UI.
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
|
forcedType: int
|
||||||
casesEnabled:
|
casesEnabled:
|
||||||
description: Set to true if the SOC case management module, natively integrated with Elasticsearch, should be enabled.
|
description: Set to true if the SOC case management module, natively integrated with Elasticsearch, should be enabled.
|
||||||
global: True
|
global: True
|
||||||
@@ -179,51 +177,47 @@ soc:
|
|||||||
description: Duration (in milliseconds) to wait for a response from the Salt API when executing tasks known for being long running before giving up and showing an error on the SOC UI.
|
description: Duration (in milliseconds) to wait for a response from the Salt API when executing tasks known for being long running before giving up and showing an error on the SOC UI.
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
|
forcedType: int
|
||||||
relayTimeoutMs:
|
relayTimeoutMs:
|
||||||
description: Duration (in milliseconds) to wait for a response from the Salt API when executing common grid management tasks before giving up and showing an error on the SOC UI.
|
description: Duration (in milliseconds) to wait for a response from the Salt API when executing common grid management tasks before giving up and showing an error on the SOC UI.
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
|
forcedType: int
|
||||||
strelkaengine:
|
strelkaengine:
|
||||||
allowRegex:
|
allowRegex:
|
||||||
description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.'
|
description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.'
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: yara.html
|
helpLink: yara.html
|
||||||
autoEnabledYaraRules:
|
autoEnabledYARARules:
|
||||||
description: 'Yara rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara'
|
description: 'YARA rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara'
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: sigma.html
|
helpLink: sigma.html
|
||||||
autoUpdateEnabled:
|
|
||||||
description: 'Set to true to enable automatic Internet-connected updates of the Yara rulesets. If this is an Airgap system, this setting will be overridden and set to false.'
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
denyRegex:
|
denyRegex:
|
||||||
description: 'Regex used to filter imported Yara rules. Deny regex takes precedence over the Allow regex setting.'
|
description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.'
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: yara.html
|
helpLink: yara.html
|
||||||
communityRulesImportFrequencySeconds:
|
communityRulesImportFrequencySeconds:
|
||||||
description: 'How often to check for new Yara rules (in seconds). This applies to both Community Rules and any configured Git repos.'
|
description: 'How often to check for new YARA rules (in seconds). This applies to both Community Rules and any configured Git repos.'
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: yara.html
|
helpLink: yara.html
|
||||||
rulesRepos:
|
rulesRepos:
|
||||||
description: 'Custom Git repos to pull Yara rules from. License field is required'
|
default: &serulesRepos
|
||||||
global: True
|
description: "Custom Git repos to pull YARA rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled."
|
||||||
advanced: True
|
global: True
|
||||||
forcedType: "[]{}"
|
advanced: True
|
||||||
helpLink: yara.html
|
forcedType: "[]{}"
|
||||||
|
helpLink: yara.html
|
||||||
|
airgap: *serulesRepos
|
||||||
suricataengine:
|
suricataengine:
|
||||||
allowRegex:
|
allowRegex:
|
||||||
description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.'
|
description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.'
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: suricata.html
|
helpLink: suricata.html
|
||||||
autoUpdateEnabled:
|
|
||||||
description: 'Set to true to enable automatic Internet-connected updates of the Suricata rulesets. If this is an Airgap system, this setting will be overridden and set to false.'
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
denyRegex:
|
denyRegex:
|
||||||
description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.'
|
description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.'
|
||||||
global: True
|
global: True
|
||||||
@@ -242,17 +236,21 @@ soc:
|
|||||||
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
|
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
|
forcedType: int
|
||||||
webSocketTimeoutMs:
|
webSocketTimeoutMs:
|
||||||
description: Duration (in milliseconds) to wait for a response from the SOC server websocket before giving up and reconnecting.
|
description: Duration (in milliseconds) to wait for a response from the SOC server websocket before giving up and reconnecting.
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
|
forcedType: int
|
||||||
tipTimeoutMs:
|
tipTimeoutMs:
|
||||||
description: Duration (in milliseconds) to show the popup tips, which typically indicate a successful operation.
|
description: Duration (in milliseconds) to show the popup tips, which typically indicate a successful operation.
|
||||||
global: True
|
global: True
|
||||||
|
forcedType: int
|
||||||
cacheExpirationMs:
|
cacheExpirationMs:
|
||||||
description: Duration (in milliseconds) of cached data within the browser, including users and settings.
|
description: Duration (in milliseconds) of cached data within the browser, including users and settings.
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
|
forcedType: int
|
||||||
casesEnabled:
|
casesEnabled:
|
||||||
description: Set to true to enable case management in SOC.
|
description: Set to true to enable case management in SOC.
|
||||||
global: True
|
global: True
|
||||||
|
|||||||
@@ -42,8 +42,8 @@ strelka_backend:
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
- restart_policy: on-failure
|
- restart_policy: on-failure
|
||||||
#- watch:
|
- watch:
|
||||||
#- file: strelkarules
|
- file: /opt/so/conf/strelka/rules/compiled/*
|
||||||
|
|
||||||
delete_so-strelka-backend_so-status.disabled:
|
delete_so-strelka-backend_so-status.disabled:
|
||||||
file.uncomment:
|
file.uncomment:
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ def check_syntax(rule_file):
|
|||||||
|
|
||||||
def compile_yara_rules(rules_dir):
|
def compile_yara_rules(rules_dir):
|
||||||
compiled_dir = os.path.join(rules_dir, "compiled")
|
compiled_dir = os.path.join(rules_dir, "compiled")
|
||||||
compiled_rules_path = os.path.join(compiled_dir, "rules.compiled")
|
compiled_rules_path = [ os.path.join(compiled_dir, "rules.compiled"), "/opt/so/saltstack/default/salt/strelka/rules/compiled/rules.compiled" ]
|
||||||
rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True)
|
rule_files = glob.glob(os.path.join(rules_dir, '**/*.yar'), recursive=True)
|
||||||
files_to_compile = {}
|
files_to_compile = {}
|
||||||
removed_count = 0
|
removed_count = 0
|
||||||
@@ -57,10 +57,11 @@ def compile_yara_rules(rules_dir):
|
|||||||
# Compile all remaining valid rules into a single file
|
# Compile all remaining valid rules into a single file
|
||||||
if files_to_compile:
|
if files_to_compile:
|
||||||
compiled_rules = yara.compile(filepaths=files_to_compile)
|
compiled_rules = yara.compile(filepaths=files_to_compile)
|
||||||
compiled_rules.save(compiled_rules_path)
|
for path in compiled_rules_path:
|
||||||
print(f"All remaining rules compiled and saved into {compiled_rules_path}")
|
compiled_rules.save(path)
|
||||||
|
print(f"All remaining rules compiled and saved into {path}")
|
||||||
|
|
||||||
# Print summary of compilation results
|
# Print summary of compilation results
|
||||||
print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.")
|
print(f"Summary: {success_count} rules compiled successfully, {removed_count} rules removed due to errors.")
|
||||||
|
|
||||||
compile_yara_rules("/opt/sensoroni/yara/rules/")
|
compile_yara_rules("/opt/sensoroni/yara/rules/")
|
||||||
|
|||||||
@@ -29,6 +29,16 @@ strelkarulesdir:
|
|||||||
- group: 939
|
- group: 939
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
|
{%- if grains.role in ['so-sensor', 'so-heavynode'] %}
|
||||||
|
strelkasensorrules:
|
||||||
|
file.recurse:
|
||||||
|
- name: /opt/so/conf/strelka/rules/compiled/
|
||||||
|
- source: salt://strelka/rules/compiled/
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- clean: True
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
strelkareposdir:
|
strelkareposdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /opt/so/conf/strelka/repos
|
- name: /opt/so/conf/strelka/repos
|
||||||
|
|||||||
45
salt/strelka/manager.sls
Normal file
45
salt/strelka/manager.sls
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls in allowed_states %}
|
||||||
|
|
||||||
|
# Strelka config
|
||||||
|
strelkaconfdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/strelka/rules/compiled/
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
strelkacompileyara:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/strelka/compile_yara.py
|
||||||
|
- source: salt://strelka/compile_yara/compile_yara.py
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
strelkarulesdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/strelka/rules
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
strelkareposdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/strelka/repos
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
0
salt/strelka/rules/compiled/DO.NOT.TOUCH
Normal file
0
salt/strelka/rules/compiled/DO.NOT.TOUCH
Normal file
@@ -87,6 +87,7 @@ base:
|
|||||||
- registry
|
- registry
|
||||||
- nginx
|
- nginx
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- strelka.manager
|
||||||
- soc
|
- soc
|
||||||
- kratos
|
- kratos
|
||||||
- firewall
|
- firewall
|
||||||
@@ -145,13 +146,13 @@ base:
|
|||||||
|
|
||||||
'*_searchnode and G@saltversion:{{saltversion}}':
|
'*_searchnode and G@saltversion:{{saltversion}}':
|
||||||
- match: compound
|
- match: compound
|
||||||
|
- firewall
|
||||||
- ssl
|
- ssl
|
||||||
|
- elasticsearch
|
||||||
|
- logstash
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- telegraf
|
- telegraf
|
||||||
- nginx
|
- nginx
|
||||||
- firewall
|
|
||||||
- elasticsearch
|
|
||||||
- logstash
|
|
||||||
- elasticfleet.install_agent_grid
|
- elasticfleet.install_agent_grid
|
||||||
- stig
|
- stig
|
||||||
|
|
||||||
@@ -163,6 +164,7 @@ base:
|
|||||||
- registry
|
- registry
|
||||||
- nginx
|
- nginx
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- strelka.manager
|
||||||
- soc
|
- soc
|
||||||
- kratos
|
- kratos
|
||||||
- firewall
|
- firewall
|
||||||
@@ -212,6 +214,7 @@ base:
|
|||||||
- manager
|
- manager
|
||||||
- nginx
|
- nginx
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- strelka.manager
|
||||||
- soc
|
- soc
|
||||||
- kratos
|
- kratos
|
||||||
- sensoroni
|
- sensoroni
|
||||||
|
|||||||
@@ -1270,6 +1270,13 @@ soc_pillar() {
|
|||||||
" config:"\
|
" config:"\
|
||||||
" server:"\
|
" server:"\
|
||||||
" srvKey: '$SOCSRVKEY'"\
|
" srvKey: '$SOCSRVKEY'"\
|
||||||
|
" modules:"\
|
||||||
|
" elastalertengine:"\
|
||||||
|
" allowRegex: '$ELASTALERT_ALLOW_REGEX'"\
|
||||||
|
" strelkaengine:"\
|
||||||
|
" allowRegex: '$STRELKA_ALLOW_REGEX'"\
|
||||||
|
" suricataengine:"\
|
||||||
|
" allowRegex: '$SURICATA_ALLOW_REGEX'"\
|
||||||
"" > "$soc_pillar_file"
|
"" > "$soc_pillar_file"
|
||||||
|
|
||||||
if [[ $telemetry -ne 0 ]]; then
|
if [[ $telemetry -ne 0 ]]; then
|
||||||
|
|||||||
@@ -245,6 +245,9 @@ if [ -n "$test_profile" ]; then
|
|||||||
WEBUSER=onionuser@somewhere.invalid
|
WEBUSER=onionuser@somewhere.invalid
|
||||||
WEBPASSWD1=0n10nus3r
|
WEBPASSWD1=0n10nus3r
|
||||||
WEBPASSWD2=0n10nus3r
|
WEBPASSWD2=0n10nus3r
|
||||||
|
STRELKA_ALLOW_REGEX="EquationGroup_Toolset_Apr17__ELV_.*"
|
||||||
|
ELASTALERT_ALLOW_REGEX="Security Onion"
|
||||||
|
SURICATA_ALLOW_REGEX="(200033\\d|2100538|2102466)"
|
||||||
|
|
||||||
update_sudoers_for_testing
|
update_sudoers_for_testing
|
||||||
fi
|
fi
|
||||||
@@ -818,7 +821,6 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
configure_minion "$minion_type"
|
configure_minion "$minion_type"
|
||||||
check_sos_appliance
|
check_sos_appliance
|
||||||
drop_install_options
|
drop_install_options
|
||||||
logCmd "salt-call state.apply setup.highstate_cron --local --file-root=../salt/"
|
|
||||||
verify_setup
|
verify_setup
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -67,6 +67,7 @@ log_has_errors() {
|
|||||||
grep -vE "Reading first line of patchfile" | \
|
grep -vE "Reading first line of patchfile" | \
|
||||||
grep -vE "Command failed with exit code" | \
|
grep -vE "Command failed with exit code" | \
|
||||||
grep -vE "Running scope as unit" | \
|
grep -vE "Running scope as unit" | \
|
||||||
|
grep -vE "securityonion-resources/sigma/stable" | \
|
||||||
grep -vE "log-.*-pipeline_failed_attempts" &> "$error_log"
|
grep -vE "log-.*-pipeline_failed_attempts" &> "$error_log"
|
||||||
|
|
||||||
if [[ $? -eq 0 ]]; then
|
if [[ $? -eq 0 ]]; then
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ whiptail_airgap() {
|
|||||||
[[ $is_manager || $is_import ]] && node_str='manager'
|
[[ $is_manager || $is_import ]] && node_str='manager'
|
||||||
|
|
||||||
INTERWEBS=$(whiptail --title "$whiptail_title" --menu \
|
INTERWEBS=$(whiptail --title "$whiptail_title" --menu \
|
||||||
"How should this $node_str be installed?" 10 70 2 \
|
"How should this $node_str be installed?\n\nFor more information, please see:\n$DOC_BASE_URL/airgap.html" 13 70 2 \
|
||||||
"Standard " "This $node_str has access to the Internet" \
|
"Standard " "This $node_str has access to the Internet" \
|
||||||
"Airgap " "This $node_str does not have access to the Internet" 3>&1 1>&2 2>&3 )
|
"Airgap " "This $node_str does not have access to the Internet" 3>&1 1>&2 2>&3 )
|
||||||
|
|
||||||
@@ -592,8 +592,8 @@ whiptail_install_type() {
|
|||||||
"IMPORT" "Import PCAP or log files " \
|
"IMPORT" "Import PCAP or log files " \
|
||||||
"EVAL" "Evaluation mode (not for production) " \
|
"EVAL" "Evaluation mode (not for production) " \
|
||||||
"STANDALONE" "Standalone production install " \
|
"STANDALONE" "Standalone production install " \
|
||||||
"DISTRIBUTED" "Distributed install submenu " \
|
"DISTRIBUTED" "Distributed deployment " \
|
||||||
"DESKTOP" "Install Security Onion Desktop" \
|
"DESKTOP" "Security Onion Desktop" \
|
||||||
3>&1 1>&2 2>&3
|
3>&1 1>&2 2>&3
|
||||||
)
|
)
|
||||||
elif [[ "$OSVER" == "focal" ]]; then
|
elif [[ "$OSVER" == "focal" ]]; then
|
||||||
|
|||||||
Reference in New Issue
Block a user