mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Merge remote-tracking branch 'origin/2.4/dev' into feature/fleet-artifacts
This commit is contained in:
@@ -41,6 +41,7 @@ file_roots:
|
||||
base:
|
||||
- /opt/so/saltstack/local/salt
|
||||
- /opt/so/saltstack/default/salt
|
||||
- /opt/so/rules
|
||||
elasticartifacts:
|
||||
- /nsm/elastic-fleet/artifacts
|
||||
|
||||
|
||||
@@ -118,6 +118,19 @@ esingestconf:
|
||||
- user: 930
|
||||
- group: 939
|
||||
|
||||
# Auto-generate Elasticsearch ingest node pipelines from pillar
|
||||
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
|
||||
es_ingest_conf_{{pipeline}}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/ingest/{{ pipeline }}
|
||||
- source: salt://elasticsearch/base-template.json.jinja
|
||||
- defaults:
|
||||
TEMPLATE_CONFIG: {{ config }}
|
||||
- template: jinja
|
||||
- onchanges_in:
|
||||
- file: so-pipelines-reload
|
||||
{% endfor %}
|
||||
|
||||
eslog4jfile:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/log4j2.properties
|
||||
|
||||
@@ -55,6 +55,87 @@ elasticsearch:
|
||||
key: /usr/share/elasticsearch/config/elasticsearch.key
|
||||
verification_mode: none
|
||||
enabled: false
|
||||
pipelines:
|
||||
custom001:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom001
|
||||
- pipeline:
|
||||
name: common
|
||||
custom002:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom002
|
||||
- pipeline:
|
||||
name: common
|
||||
custom003:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom003
|
||||
- pipeline:
|
||||
name: common
|
||||
custom004:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom004
|
||||
- pipeline:
|
||||
name: common
|
||||
custom005:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom005
|
||||
- pipeline:
|
||||
name: common
|
||||
custom006:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom006
|
||||
- pipeline:
|
||||
name: common
|
||||
custom007:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom007
|
||||
- pipeline:
|
||||
name: common
|
||||
custom008:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom008
|
||||
- pipeline:
|
||||
name: common
|
||||
custom009:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom009
|
||||
- pipeline:
|
||||
name: common
|
||||
custom010:
|
||||
description: Custom Pipeline
|
||||
processors:
|
||||
- set:
|
||||
field: tags
|
||||
value: custom010
|
||||
- pipeline:
|
||||
name: common
|
||||
index_settings:
|
||||
global_overrides:
|
||||
index_template:
|
||||
|
||||
@@ -45,6 +45,28 @@ elasticsearch:
|
||||
description: Max number of boolean clauses per query.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
pipelines:
|
||||
custom001: &pipelines
|
||||
description:
|
||||
description: Description of the ingest node pipeline
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
processors:
|
||||
description: Processors for the ingest node pipeline
|
||||
global: True
|
||||
advanced: True
|
||||
multiline: True
|
||||
helpLink: elasticsearch.html
|
||||
custom002: *pipelines
|
||||
custom003: *pipelines
|
||||
custom004: *pipelines
|
||||
custom005: *pipelines
|
||||
custom006: *pipelines
|
||||
custom007: *pipelines
|
||||
custom008: *pipelines
|
||||
custom009: *pipelines
|
||||
custom010: *pipelines
|
||||
index_settings:
|
||||
global_overrides:
|
||||
index_template:
|
||||
|
||||
@@ -63,6 +63,20 @@ lspipelinedir:
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
# Auto-generate Logstash pipeline config
|
||||
{% for pipeline, config in LOGSTASH_MERGED.pipeline_config.items() %}
|
||||
{% for assigned_pipeline in ASSIGNED_PIPELINES %}
|
||||
{% set custom_pipeline = 'custom/' + pipeline + '.conf' %}
|
||||
{% if custom_pipeline in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
|
||||
ls_custom_pipeline_conf_{{assigned_pipeline}}_{{pipeline}}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/logstash/pipelines/{{assigned_pipeline}}/{{ pipeline }}.conf
|
||||
- contents: LOGSTASH_MERGED.pipeline_config.{{pipeline}}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
{% for assigned_pipeline in ASSIGNED_PIPELINES %}
|
||||
{% for CONFIGFILE in LOGSTASH_MERGED.defined_pipelines[assigned_pipeline] %}
|
||||
ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}:
|
||||
|
||||
@@ -42,6 +42,24 @@ logstash:
|
||||
custom2: []
|
||||
custom3: []
|
||||
custom4: []
|
||||
pipeline_config:
|
||||
custom001: |-
|
||||
filter {
|
||||
if [event][module] =~ "zeek" {
|
||||
mutate {
|
||||
add_tag => ["network_stuff"]
|
||||
}
|
||||
}
|
||||
}
|
||||
custom002: PLACEHOLDER
|
||||
custom003: PLACEHOLDER
|
||||
custom004: PLACEHOLDER
|
||||
custom005: PLACEHOLDER
|
||||
custom006: PLACEHOLDER
|
||||
custom007: PLACEHOLDER
|
||||
custom008: PLACEHOLDER
|
||||
custom009: PLACEHOLDER
|
||||
custom010: PLACEHOLDER
|
||||
settings:
|
||||
lsheap: 500m
|
||||
config:
|
||||
|
||||
@@ -31,6 +31,22 @@ logstash:
|
||||
custom2: *defined_pipelines
|
||||
custom3: *defined_pipelines
|
||||
custom4: *defined_pipelines
|
||||
pipeline_config:
|
||||
custom001: &pipeline_config
|
||||
description: Pipeline configuration for Logstash
|
||||
advanced: True
|
||||
multiline: True
|
||||
forcedType: string
|
||||
helpLink: logstash.html
|
||||
custom002: *pipeline_config
|
||||
custom003: *pipeline_config
|
||||
custom004: *pipeline_config
|
||||
custom005: *pipeline_config
|
||||
custom006: *pipeline_config
|
||||
custom007: *pipeline_config
|
||||
custom008: *pipeline_config
|
||||
custom009: *pipeline_config
|
||||
custom010: *pipeline_config
|
||||
settings:
|
||||
lsheap:
|
||||
description: Heap size to use for logstash
|
||||
|
||||
@@ -16,12 +16,14 @@ lockFile = "/tmp/so-yaml.lock"
|
||||
def showUsage(args):
|
||||
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
|
||||
print(' General commands:')
|
||||
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.')
|
||||
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
|
||||
print(' help - Prints this usage information.')
|
||||
print('')
|
||||
print(' Where:')
|
||||
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
|
||||
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
|
||||
print(' LISTITEM - Item to add to the list.')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -35,6 +37,35 @@ def writeYaml(filename, content):
|
||||
file = open(filename, "w")
|
||||
return yaml.dump(content, file)
|
||||
|
||||
def appendItem(content, key, listItem):
|
||||
pieces = key.split(".", 1)
|
||||
if len(pieces) > 1:
|
||||
appendItem(content[pieces[0]], pieces[1], listItem)
|
||||
else:
|
||||
try:
|
||||
content[key].append(listItem)
|
||||
except AttributeError:
|
||||
print("The existing value for the given key is not a list. No action was taken on the file.")
|
||||
return 1
|
||||
except KeyError:
|
||||
print("The key provided does not exist. No action was taken on the file.")
|
||||
return 1
|
||||
|
||||
def append(args):
|
||||
if len(args) != 3:
|
||||
print('Missing filename, key arg, or list item to append', file=sys.stderr)
|
||||
showUsage(None)
|
||||
return
|
||||
|
||||
filename = args[0]
|
||||
key = args[1]
|
||||
listItem = args[2]
|
||||
|
||||
content = loadYaml(filename)
|
||||
appendItem(content, key, listItem)
|
||||
writeYaml(filename, content)
|
||||
|
||||
return 0
|
||||
|
||||
def removeKey(content, key):
|
||||
pieces = key.split(".", 1)
|
||||
@@ -69,6 +100,7 @@ def main():
|
||||
|
||||
commands = {
|
||||
"help": showUsage,
|
||||
"append": append,
|
||||
"remove": remove,
|
||||
}
|
||||
|
||||
|
||||
@@ -105,3 +105,99 @@ class TestRemove(unittest.TestCase):
|
||||
self.assertEqual(actual, expected)
|
||||
sysmock.assert_called_once_with(1)
|
||||
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
|
||||
|
||||
def test_append(self):
|
||||
filename = "/tmp/so-yaml_test-remove.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
|
||||
file.close()
|
||||
|
||||
soyaml.append([filename, "key3", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\n- d\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_append_nested(self):
|
||||
filename = "/tmp/so-yaml_test-remove.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.append([filename, "key1.child2", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_append_nested_deep(self):
|
||||
filename = "/tmp/so-yaml_test-remove.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.append([filename, "key1.child2.deep2", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2:\n - a\n - b\n - c\n - d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_append_key_noexist(self):
|
||||
filename = "/tmp/so-yaml_test-append.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "append", filename, "key4", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
|
||||
|
||||
def test_append_key_noexist_deep(self):
|
||||
filename = "/tmp/so-yaml_test-append.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "append", filename, "key1.child2.deep3", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
|
||||
|
||||
def test_append_key_nonlist(self):
|
||||
filename = "/tmp/so-yaml_test-append.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "append", filename, "key1", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
|
||||
|
||||
def test_append_key_nonlist_deep(self):
|
||||
filename = "/tmp/so-yaml_test-append.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [a,b,c] } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "append", filename, "key1.child2.deep1", "h"]
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
|
||||
|
||||
@@ -372,6 +372,17 @@ enable_highstate() {
|
||||
echo ""
|
||||
}
|
||||
|
||||
get_soup_script_hashes() {
|
||||
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
|
||||
GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
|
||||
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
|
||||
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||||
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
|
||||
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||||
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
|
||||
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
|
||||
}
|
||||
|
||||
highstate() {
|
||||
# Run a highstate.
|
||||
salt-call state.highstate -l info queue=True
|
||||
@@ -583,6 +594,19 @@ up_to_2.4.50() {
|
||||
touch /opt/so/saltstack/local/pillar/stig/adv_stig.sls
|
||||
touch /opt/so/saltstack/local/pillar/stig/soc_stig.sls
|
||||
|
||||
# the file_roots need to be update due to salt 3006.6 upgrade not allowing symlinks outside the file_roots
|
||||
# put new so-yaml in place
|
||||
echo "Updating so-yaml"
|
||||
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" "$DEFAULT_SALT_DIR/salt/manager/tools/sbin/"
|
||||
\cp -v "$UPDATE_DIR/salt/manager/tools/sbin/so-yaml.py" /usr/sbin/
|
||||
echo "Creating a backup of the salt-master config."
|
||||
# INSTALLEDVERSION is 2.4.40 at this point, but we want the backup to have the version
|
||||
# so was at prior to starting upgrade. use POSTVERSION here since it doesnt change until
|
||||
# post upgrade changes. POSTVERSION set to INSTALLEDVERSION at start of soup
|
||||
cp -v /etc/salt/master "/etc/salt/master.so-$POSTVERSION.bak"
|
||||
echo "Adding /opt/so/rules to file_roots in /etc/salt/master using so-yaml"
|
||||
so-yaml.py append /etc/salt/master file_roots.base /opt/so/rules
|
||||
|
||||
INSTALLEDVERSION=2.4.50
|
||||
}
|
||||
|
||||
@@ -758,31 +782,32 @@ upgrade_salt() {
|
||||
}
|
||||
|
||||
verify_latest_update_script() {
|
||||
# Check to see if the update scripts match. If not run the new one.
|
||||
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
|
||||
GITSOUP=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/soup | awk '{print $1}')
|
||||
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
|
||||
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||||
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
|
||||
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||||
CURRENTSOFIREWALL=$(md5sum /usr/sbin/so-firewall | awk '{print $1}')
|
||||
GITSOFIREWALL=$(md5sum $UPDATE_DIR/salt/manager/tools/sbin/so-firewall | awk '{print $1}')
|
||||
|
||||
get_soup_script_hashes
|
||||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
|
||||
echo "This version of the soup script is up to date. Proceeding."
|
||||
else
|
||||
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
|
||||
cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/manager/tools/sbin/soup $DEFAULT_SALT_DIR/salt/manager/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/manager/tools/sbin/so-firewall $DEFAULT_SALT_DIR/salt/manager/tools/sbin/
|
||||
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
|
||||
# Verify that soup scripts updated as expected
|
||||
get_soup_script_hashes
|
||||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" && "$CURRENTSOFIREWALL" == "$GITSOFIREWALL" ]]; then
|
||||
echo "Succesfully updated soup scripts."
|
||||
else
|
||||
# When STIGs are enabled soup scripts will fail to update using --file-root --local.
|
||||
# After checking that the expected hashes are not present, retry updating soup scripts using salt master.
|
||||
echo "There was a problem updating soup scripts.. Trying to rerun script update"
|
||||
salt-call state.apply common.soup_scripts queue=True -linfo
|
||||
fi
|
||||
echo ""
|
||||
echo "The soup script has been modified. Please run soup again to continue the upgrade."
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
}
|
||||
# Keeping this block in case we need to do a hotfix that requires salt update
|
||||
apply_hotfix() {
|
||||
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
|
||||
@@ -925,9 +950,6 @@ main() {
|
||||
|
||||
systemctl_func "stop" "$cron_service_name"
|
||||
|
||||
# update mine items prior to stopping salt-minion and salt-master
|
||||
update_salt_mine
|
||||
|
||||
echo "Updating dockers to $NEWVERSION."
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
airgap_update_dockers
|
||||
@@ -1003,6 +1025,9 @@ main() {
|
||||
salt-call state.apply salt.minion -l info queue=True
|
||||
echo ""
|
||||
|
||||
# ensure the mine is updated and populated before highstates run, following the salt-master restart
|
||||
update_salt_mine
|
||||
|
||||
enable_highstate
|
||||
|
||||
echo ""
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
master:
|
||||
version: 3006.5
|
||||
version: 3006.6
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
minion:
|
||||
version: 3006.5
|
||||
version: 3006.6
|
||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
service_start_delay: 30 # in seconds.
|
||||
|
||||
@@ -9,7 +9,7 @@ soc:
|
||||
icon: fa-crosshairs
|
||||
target:
|
||||
links:
|
||||
- '/#/hunt?q="{value|escape}" | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q="{value|escape}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- name: actionAddToCase
|
||||
description: actionAddToCaseHelp
|
||||
icon: fa-briefcase
|
||||
@@ -23,13 +23,13 @@ soc:
|
||||
icon: fab fa-searchengin
|
||||
target: ''
|
||||
links:
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q="{:log.id.fuid}" | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q="{:log.id.uid}" | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q="{:network.community_id}" | groupby event.module* event.dataset'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:log.id.uid}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q=("{:log.id.fuid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q=("{:log.id.uid}" OR "{:network.community_id}") | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q="{:log.id.fuid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q="{:log.id.uid}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- '/#/hunt?q="{:network.community_id}" | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby file.mime_type | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby ssl.server_name | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- name: actionPcap
|
||||
description: actionPcapHelp
|
||||
icon: fa-stream
|
||||
@@ -1424,8 +1424,11 @@ soc:
|
||||
- name: Zeek Notice
|
||||
description: Zeek notice logs
|
||||
query: 'event.dataset:zeek.notice | groupby -sankey notice.note destination.ip | groupby notice.note | groupby notice.message | groupby notice.sub_message | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name'
|
||||
- name: Connections
|
||||
description: Network connection metadata
|
||||
- name: Connections and Metadata with community_id
|
||||
description: Network connections that include community_id
|
||||
query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
|
||||
- name: Connections seen by Zeek or Suricata
|
||||
description: Network connections logged by Zeek or Suricata
|
||||
query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui'
|
||||
- name: DCE_RPC
|
||||
description: DCE_RPC (Distributed Computing Environment / Remote Procedure Calls) network metadata
|
||||
@@ -1562,6 +1565,9 @@ soc:
|
||||
- name: Firewall
|
||||
description: Firewall logs
|
||||
query: 'observer.type:firewall | groupby -sankey event.action observer.ingress.interface.name | groupby event.action | groupby observer.ingress.interface.name | groupby network.type | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port'
|
||||
- name: Firewall Auth
|
||||
description: Firewall authentication logs
|
||||
query: 'observer.type:firewall AND event.category:authentication | groupby user.name | groupby -sankey user.name source.ip | groupby source.ip | table soc_timestamp user.name source.ip message'
|
||||
- name: VLAN
|
||||
description: VLAN (Virtual Local Area Network) tagged logs
|
||||
query: '* AND _exists_:network.vlan.id | groupby network.vlan.id | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby event.dataset | groupby event.module | groupby observer.name | groupby source.geo.country_name | groupby destination.geo.country_name'
|
||||
|
||||
@@ -50,7 +50,7 @@ update_stig_profile:
|
||||
run_initial_scan:
|
||||
module.run:
|
||||
- name: openscap.xccdf
|
||||
- params: 'eval --remediate --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/pre-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/pre-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}'
|
||||
- params: 'eval --profile {{ OSCAP_PROFILE_NAME }} --results {{ OSCAP_OUTPUT_DIR }}/pre-oscap-results.xml --report {{ OSCAP_OUTPUT_DIR }}/pre-oscap-report.html {{ OSCAP_PROFILE_LOCATION }}'
|
||||
{% endif %}
|
||||
|
||||
run_remediate:
|
||||
|
||||
@@ -84,10 +84,12 @@ suridatadir:
|
||||
- mode: 770
|
||||
- makedirs: True
|
||||
|
||||
# salt:// would resolve to /opt/so/rules because of the defined file_roots and
|
||||
# nids not existing under /opt/so/saltstack/local/salt or /opt/so/saltstack/default/salt
|
||||
surirulesync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/suricata/rules/
|
||||
- source: salt://suricata/rules/
|
||||
- source: salt://nids/
|
||||
- user: 940
|
||||
- group: 940
|
||||
- show_changes: False
|
||||
|
||||
@@ -1933,7 +1933,11 @@ saltify() {
|
||||
logCmd "dnf -y install salt-$SALTVERSION salt-master-$SALTVERSION salt-minion-$SALTVERSION"
|
||||
else
|
||||
# We just need the minion
|
||||
logCmd "dnf -y install salt-$SALTVERSION salt-minion-$SALTVERSION"
|
||||
if [[ $is_airgap ]]; then
|
||||
logCmd "dnf -y install salt salt-minion"
|
||||
else
|
||||
logCmd "dnf -y install salt-$SALTVERSION salt-minion-$SALTVERSION"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
Reference in New Issue
Block a user