Merge remote-tracking branch 'origin/2.4/dev' into 2.4/soup-playbook

This commit is contained in:
DefensiveDepth
2024-04-04 08:51:09 -04:00
23 changed files with 578 additions and 23 deletions

View File

@@ -68,3 +68,10 @@ copy_so-firewall_sbin:
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-yaml_sbin:
file.copy:
- name: /usr/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True

View File

@@ -248,6 +248,14 @@ get_random_value() {
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
else
AGENTCOUNT=0
fi
}
gpg_rpm_import() {
if [[ $is_oracle ]]; then
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
@@ -329,7 +337,7 @@ lookup_salt_value() {
local=""
fi
salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local}
salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local}
}
lookup_pillar() {
@@ -570,8 +578,9 @@ sync_options() {
set_version
set_os
salt_minion_count
get_agent_count
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)"
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)"
}
systemctl_func() {

View File

@@ -198,6 +198,8 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp.
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
fi
@@ -207,6 +209,9 @@ RESULT=0
CONTAINER_IDS=$(docker ps -q)
exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary
exclude_container so-idstools # ignore due to known issues and noisy logging
exclude_container so-playbook # Playbook is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-mysql # MySQL is removed as of 2.4.70, disregard output in stopped containers
exclude_container so-soctopus # Soctopus is removed as of 2.4.70, disregard output in stopped containers
for container_id in $CONTAINER_IDS; do
container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names")
@@ -224,10 +229,12 @@ exclude_log "kibana.log" # kibana error logs are too verbose with large variet
exclude_log "spool" # disregard zeek analyze logs as this is data specific
exclude_log "import" # disregard imported test data the contains error strings
exclude_log "update.log" # ignore playbook updates due to several known issues
exclude_log "playbook.log" # ignore due to several playbook known issues
exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed
exclude_log "cron-close.log" # ignore since Curator has been removed
exclude_log "curator.log" # ignore since Curator has been removed
exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
for log_file in $(cat /tmp/log_check_files); do
status "Checking log file $log_file"

View File

@@ -2402,6 +2402,50 @@ elasticsearch:
set_priority:
priority: 50
min_age: 30d
so-logs-cef_x_log:
index_sorting: False
index_template:
index_patterns:
- "logs-cef.log-*"
template:
settings:
index:
lifecycle:
name: so-logs-cef.log-logs
number_of_replicas: 0
composed_of:
- "logs-cef.log@package"
- "logs-cef.log@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-checkpoint_x_firewall:
index_sorting: False
index_template:

View File

@@ -366,6 +366,7 @@ elasticsearch:
so-logs-azure_x_signinlogs: *indexSettings
so-logs-azure_x_springcloudlogs: *indexSettings
so-logs-barracuda_x_waf: *indexSettings
so-logs-cef_x_log: *indexSettings
so-logs-cisco_asa_x_log: *indexSettings
so-logs-cisco_ftd_x_log: *indexSettings
so-logs-cisco_ios_x_log: *indexSettings

View File

@@ -28,7 +28,7 @@ global:
description: Used for handling of authentication cookies.
global: True
airgap:
description: Sets airgap mode.
description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting.
global: True
readonly: True
imagerepo:

View File

@@ -27,6 +27,15 @@ repo_log_dir:
- user
- group
agents_log_dir:
file.directory:
- name: /opt/so/log/agents
- user: root
- group: root
- recurse:
- user
- group
yara_log_dir:
file.directory:
- name: /opt/so/log/yarasync
@@ -101,6 +110,17 @@ so-repo-sync:
- hour: '{{ MANAGERMERGED.reposync.hour }}'
- minute: '{{ MANAGERMERGED.reposync.minute }}'
so_fleetagent_status:
cron.present:
- name: /usr/sbin/so-elasticagent-status > /opt/so/log/agents/agentstatus.log 2>&1
- identifier: so_fleetagent_status
- user: root
- minute: '*/5'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
socore_own_saltstack:
file.directory:
- name: /opt/so/saltstack

View File

@@ -0,0 +1,10 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
curl -s -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agent_status" | jq .

View File

@@ -17,13 +17,16 @@ def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:')
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.')
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.')
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.')
print(' help - Prints this usage information.')
print('')
print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
print(' LISTITEM - Item to add to the list.')
print(' VALUE - Value to set for a given key')
print(' LISTITEM - Item to append to a given key\'s list value')
sys.exit(1)
@@ -37,6 +40,7 @@ def writeYaml(filename, content):
file = open(filename, "w")
return yaml.dump(content, file)
def appendItem(content, key, listItem):
pieces = key.split(".", 1)
if len(pieces) > 1:
@@ -51,6 +55,30 @@ def appendItem(content, key, listItem):
print("The key provided does not exist. No action was taken on the file.")
return 1
def convertType(value):
if len(value) > 0 and (not value.startswith("0") or len(value) == 1):
if "." in value:
try:
value = float(value)
return value
except ValueError:
pass
try:
value = int(value)
return value
except ValueError:
pass
lowered_value = value.lower()
if lowered_value == "false":
return False
elif lowered_value == "true":
return True
return value
def append(args):
if len(args) != 3:
print('Missing filename, key arg, or list item to append', file=sys.stderr)
@@ -62,11 +90,41 @@ def append(args):
listItem = args[2]
content = loadYaml(filename)
appendItem(content, key, listItem)
appendItem(content, key, convertType(listItem))
writeYaml(filename, content)
return 0
def addKey(content, key, value):
pieces = key.split(".", 1)
if len(pieces) > 1:
if not pieces[0] in content:
content[pieces[0]] = {}
addKey(content[pieces[0]], pieces[1], value)
elif key in content:
raise KeyError("key already exists")
else:
content[key] = value
def add(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
value = args[2]
content = loadYaml(filename)
addKey(content, key, convertType(value))
writeYaml(filename, content)
return 0
def removeKey(content, key):
pieces = key.split(".", 1)
if len(pieces) > 1:
@@ -91,6 +149,24 @@ def remove(args):
return 0
def replace(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
value = args[2]
content = loadYaml(filename)
removeKey(content, key)
addKey(content, key, convertType(value))
writeYaml(filename, content)
return 0
def main():
args = sys.argv[1:]
@@ -100,8 +176,10 @@ def main():
commands = {
"help": showUsage,
"add": add,
"append": append,
"remove": remove,
"replace": replace,
}
code = 1

View File

@@ -42,6 +42,14 @@ class TestRemove(unittest.TestCase):
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_remove_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.remove(["file"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
def test_remove(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
@@ -106,6 +114,14 @@ class TestRemove(unittest.TestCase):
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
def test_append_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.append(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n")
def test_append(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
@@ -201,3 +217,146 @@ class TestRemove(unittest.TestCase):
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
def test_add_key(self):
content = {}
soyaml.addKey(content, "foo", 123)
self.assertEqual(content, {"foo": 123})
try:
soyaml.addKey(content, "foo", "bar")
self.assertFail("expected key error since key already exists")
except KeyError:
pass
try:
soyaml.addKey(content, "foo.bar", 123)
self.assertFail("expected type error since key parent value is not a map")
except TypeError:
pass
content = {}
soyaml.addKey(content, "foo", "bar")
self.assertEqual(content, {"foo": "bar"})
soyaml.addKey(content, "badda.badda", "boom")
self.assertEqual(content, {"foo": "bar", "badda": {"badda": "boom"}})
def test_add_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.add(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
def test_add(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.add([filename, "key4", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\nkey4: d\n"
self.assertEqual(actual, expected)
def test_add_nested(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.add([filename, "key1.child3", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n child3: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_add_nested_deep(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.add([filename, "key1.child2.deep2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_replace_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.replace(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
def test_replace(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.replace([filename, "key2", True])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: true\nkey3:\n- a\n- b\n- c\n"
self.assertEqual(actual, expected)
def test_replace_nested(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.replace([filename, "key1.child2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_replace_nested_deep(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.replace([filename, "key1.child2.deep1", 46])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 46\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_convert(self):
self.assertEqual(soyaml.convertType("foo"), "foo")
self.assertEqual(soyaml.convertType("foo.bar"), "foo.bar")
self.assertEqual(soyaml.convertType("123"), 123)
self.assertEqual(soyaml.convertType("0"), 0)
self.assertEqual(soyaml.convertType("00"), "00")
self.assertEqual(soyaml.convertType("0123"), "0123")
self.assertEqual(soyaml.convertType("123.456"), 123.456)
self.assertEqual(soyaml.convertType("0123.456"), "0123.456")
self.assertEqual(soyaml.convertType("true"), True)
self.assertEqual(soyaml.convertType("TRUE"), True)
self.assertEqual(soyaml.convertType("false"), False)
self.assertEqual(soyaml.convertType("FALSE"), False)
self.assertEqual(soyaml.convertType(""), "")

View File

@@ -229,7 +229,7 @@ check_local_mods() {
# {% endraw %}
check_pillar_items() {
local pillar_output=$(salt-call pillar.items --out=json)
local pillar_output=$(salt-call pillar.items -lerror --out=json)
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
if [[ "$cond" == "true" ]]; then
@@ -375,6 +375,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
true
}
@@ -582,6 +583,46 @@ up_to_2.4.60() {
}
up_to_2.4.70() {
playbook_migration
toggle_telemetry
INSTALLEDVERSION=2.4.70
}
toggle_telemetry() {
if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then
cat << ASSIST_EOF
--------------- SOC Telemetry ---------------
The Security Onion development team could use your help! Enabling SOC
Telemetry will help the team understand which UI features are being
used and enables informed prioritization of future development.
Adjust this setting at anytime via the SOC Configuration screen.
Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
ASSIST_EOF
echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "
read -r input
input=$(echo "${input,,}" | xargs echo -n)
echo ""
if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then
echo "Thank you for helping improve Security Onion!"
else
if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then
echo "Disabled SOC Telemetry."
else
fail "Failed to disable SOC Telemetry; aborting."
fi
fi
echo ""
fi
}
playbook_migration() {
# Start SOC Detections migration
mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert}
@@ -649,8 +690,6 @@ up_to_2.4.70() {
echo
echo "Playbook Migration is complete...."
INSTALLEDVERSION=2.4.70
}
determine_elastic_agent_upgrade() {
@@ -835,7 +874,7 @@ verify_latest_update_script() {
else
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null
# Verify that soup scripts updated as expected
get_soup_script_hashes

View File

@@ -52,6 +52,15 @@ socsaltdir:
- mode: 770
- makedirs: True
socanalytics:
file.managed:
- name: /opt/so/conf/soc/analytics.js
- source: salt://soc/files/soc/analytics.js
- user: 939
- group: 939
- mode: 600
- show_changes: False
socconfig:
file.managed:
- name: /opt/so/conf/soc/soc.json

View File

@@ -1,5 +1,6 @@
soc:
enabled: False
telemetryEnabled: true
config:
logFilename: /opt/sensoroni/logs/sensoroni-server.log
logLevel: info
@@ -70,13 +71,13 @@ soc:
icon: fa-person-running
target: ''
links:
- '/#/hunt?q=(process.entity_id:"{:process.entity_id}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path'
- '/#/hunt?q=(process.entity_id:"{:process.entity_id}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path'
- name: actionProcessAncestors
description: actionProcessAncestorsHelp
icon: fa-people-roof
target: ''
links:
- '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path'
- '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.entity_id:"{:process.Ext.ancestry|processAncestors}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path'
eventFields:
default:
- soc_timestamp
@@ -87,12 +88,13 @@ soc:
- log.id.uid
- network.community_id
- event.dataset
':kratos:audit':
':kratos:':
- soc_timestamp
- http_request.headers.x-real-ip
- identity_id
- http_request.headers.user-agent
- event.dataset
- msg
'::conn':
- soc_timestamp
- source.ip
@@ -457,7 +459,7 @@ soc:
- ssh.server
- log.id.uid
- event.dataset
'::ssl':
':suricata:ssl':
- soc_timestamp
- source.ip
- source.port
@@ -465,10 +467,30 @@ soc:
- destination.port
- ssl.server_name
- ssl.certificate.subject
- ssl.version
- log.id.uid
- event.dataset
':zeek:ssl':
- soc_timestamp
- source.ip
- source.port
- destination.ip
- destination.port
- ssl.server_name
- ssl.validation_status
- ssl.version
- log.id.uid
- event.dataset
'::ssl':
- soc_timestamp
- source.ip
- source.port
- destination.ip
- destination.port
- ssl.server_name
- ssl.version
- log.id.uid
- event.dataset
':zeek:syslog':
- soc_timestamp
- source.ip
@@ -541,6 +563,15 @@ soc:
- process.executable
- user.name
- event.dataset
':strelka:':
- soc_timestamp
- file.name
- file.size
- hash.md5
- file.source
- file.mime_type
- log.id.fuid
- event.dataset
':strelka:file':
- soc_timestamp
- file.name
@@ -1180,6 +1211,17 @@ soc:
- soc_timestamp
- event.dataset
- message
':playbook:':
- soc_timestamp
- rule.name
- event.severity_label
- event_data.event.dataset
- event_data.source.ip
- event_data.source.port
- event_data.destination.host
- event_data.destination.port
- event_data.process.executable
- event_data.process.pid
server:
bindAddress: 0.0.0.0:9822
baseUrl: /
@@ -1196,11 +1238,16 @@ soc:
elastalertengine:
allowRegex: ''
autoUpdateEnabled: true
autoEnabledSigmaRules:
- core+critical
- securityonion-resources+critical
- securityonion-resources+high
communityRulesImportFrequencySeconds: 86400
denyRegex: ''
elastAlertRulesFolder: /opt/sensoroni/elastalert
reposFolder: /opt/sensoroni/sigma/repos
rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint
stateFilePath: /opt/so/conf/soc/fingerprints/elastalertengine.state
rulesRepos:
- repo: https://github.com/Security-Onion-Solutions/securityonion-resources
license: Elastic-2.0
@@ -1261,6 +1308,7 @@ soc:
- repo: https://github.com/Security-Onion-Solutions/securityonion-yara
license: DRL
yaraRulesFolder: /opt/sensoroni/yara/rules
stateFilePath: /opt/so/conf/soc/fingerprints/strelkaengine.state
suricataengine:
allowRegex: ''
autoUpdateEnabled: true
@@ -1268,6 +1316,7 @@ soc:
communityRulesFile: /nsm/rules/suricata/emerging-all.rules
denyRegex: ''
rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint
stateFilePath: /opt/so/conf/soc/fingerprints/suricataengine.state
client:
enableReverseLookup: false
docsUrl: /docs/
@@ -1612,9 +1661,12 @@ soc:
- name: Overview
description: Overview of all events
query: '* | groupby event.category | groupby -sankey event.category event.module | groupby event.module | groupby -sankey event.module event.dataset | groupby event.dataset | groupby observer.name | groupby host.name | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: SOC Auth
description: SOC (Security Onion Console) authentication logs
- name: SOC Logins
description: SOC (Security Onion Console) logins
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip identity_id | groupby identity_id | groupby http_request.headers.user-agent'
- name: SOC Login Failures
description: SOC (Security Onion Console) login failures
query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip http_request.headers.user-agent | groupby http_request.headers.user-agent'
- name: Elastalerts
description: Elastalert logs
query: '_index: "*:elastalert*" | groupby rule_name | groupby alert_info.type'
@@ -1740,7 +1792,13 @@ soc:
query: 'tags:ssh | groupby ssh.client | groupby -sankey ssh.client source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby ssh.server | groupby ssh.version | groupby ssh.hassh_version | groupby ssh.direction | groupby source_geo.organization_name | groupby destination_geo.organization_name'
- name: SSL
description: SSL/TLS network metadata
query: 'tags:ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject'
query: 'tags:ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name'
- name: SSL - Suricata
description: SSL/TLS network metadata from Suricata
query: 'event.dataset:suricata.ssl | groupby ssl.version | groupby -sankey ssl.version ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject'
- name: SSL - Zeek
description: SSL/TLS network metadata from Zeek
query: 'event.dataset:zeek.ssl | groupby ssl.version | groupby ssl.validation_status | groupby -sankey ssl.validation_status ssl.server_name | groupby ssl.server_name | groupby source.ip | groupby destination.ip | groupby destination.port | groupby destination_geo.organization_name'
- name: STUN
description: STUN (Session Traversal Utilities for NAT) network metadata
query: 'tags:stun* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby destination.geo.country_name | groupby event.dataset'
@@ -1850,11 +1908,22 @@ soc:
- soc_timestamp
- rule.name
- event.severity_label
- event_data.event.module
- event_data.event.category
- event_data.event.dataset
- event_data.source.ip
- event_data.source.port
- event_data.destination.host
- event_data.destination.port
- event_data.process.executable
- event_data.process.pid
- event_data.winlog.computer_name
':strelka:':
- soc_timestamp
- file.name
- file.size
- hash.md5
- file.source
- file.mime_type
- log.id.fuid
- event.dataset
queryBaseFilter: tags:alert
queryToggleFilters:
- name: acknowledged
@@ -1993,6 +2062,13 @@ soc:
mostRecentlyUsedLimit: 5
safeStringMaxLength: 100
queryBaseFilter: '_index:"*:so-detection" AND so_kind:detection'
presets:
manualSync:
customEnabled: false
labels:
- Suricata
- Strelka
- ElastAlert
eventFields:
default:
- so_detection.title
@@ -2000,6 +2076,7 @@ soc:
- so_detection.severity
- so_detection.language
- so_detection.ruleset
- soc_timestamp
queries:
- name: "All Detections"
query: "_id:*"
@@ -2017,6 +2094,8 @@ soc:
query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*"'
- name: "Detection Type - Yara (Strelka)"
query: "so_detection.language:yara"
- name: "Security Onion - Grid Detections"
query: "so_detection.ruleset:securityonion-resources"
detection:
presets:
severity:

View File

@@ -8,6 +8,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'soc/merged.map.jinja' import DOCKER_EXTRA_HOSTS %}
{% from 'soc/merged.map.jinja' import SOCMERGED %}
include:
- soc.config
@@ -31,6 +32,9 @@ so-soc:
- /nsm/soc/uploads:/nsm/soc/uploads:rw
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw
- /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro
{% if SOCMERGED.telemetryEnabled and not GLOBALS.airgap %}
- /opt/so/conf/soc/analytics.js:/opt/sensoroni/html/js/analytics.js:ro
{% endif %}
- /opt/so/conf/soc/motd.md:/opt/sensoroni/html/motd.md:ro
- /opt/so/conf/soc/banner.md:/opt/sensoroni/html/login/banner.md:ro
- /opt/so/conf/soc/sigma_so_pipeline.yaml:/opt/sensoroni/sigma_so_pipeline.yaml:ro
@@ -67,6 +71,7 @@ so-soc:
- file: socdatadir
- file: soclogdir
- file: socconfig
- file: socanalytics
- file: socmotd
- file: socbanner
- file: soccustom

View File

@@ -0,0 +1,5 @@
(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-TM46SL7T');

View File

@@ -12,6 +12,10 @@ To see all the latest features and fixes in this version of Security Onion, clic
Want the best hardware for your enterprise deployment? Check out our [enterprise appliances](https://securityonionsolutions.com/hardware/)!
## Premium Support
Experiencing difficulties and need priority support or remote assistance? We offer a [premium support plan](https://securityonionsolutions.com/support/) to assist corporate, educational, and government organizations.
## Customize This Space
Make this area your own by customizing the content in the [Config](/#/config?s=soc.files.soc.motd__md) interface.

View File

@@ -2,6 +2,11 @@ soc:
enabled:
description: You can enable or disable SOC.
advanced: True
telemetryEnabled:
title: SOC Telemetry
description: When this setting is enabled and the grid is not in airgap mode, SOC will provide feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting.
global: True
helpLink: telemetry.html
files:
soc:
banner__md:

View File

@@ -11,6 +11,7 @@ telegraf:
quiet: 'false'
scripts:
eval:
- agentstatus.sh
- checkfiles.sh
- influxdbsize.sh
- lasthighstate.sh
@@ -23,6 +24,7 @@ telegraf:
- zeekcaptureloss.sh
- zeekloss.sh
standalone:
- agentstatus.sh
- checkfiles.sh
- eps.sh
- influxdbsize.sh
@@ -38,6 +40,7 @@ telegraf:
- zeekloss.sh
- features.sh
manager:
- agentstatus.sh
- influxdbsize.sh
- lasthighstate.sh
- os.sh
@@ -46,6 +49,7 @@ telegraf:
- sostatus.sh
- features.sh
managersearch:
- agentstatus.sh
- eps.sh
- influxdbsize.sh
- lasthighstate.sh

View File

@@ -56,6 +56,7 @@ so-telegraf:
- /opt/so/log/raid:/var/log/raid:ro
- /opt/so/log/sostatus:/var/log/sostatus:ro
- /opt/so/log/salt:/var/log/salt:ro
- /opt/so/log/agents:/var/log/agents:ro
{% if DOCKER.containers['so-telegraf'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-telegraf'].custom_bind_mounts %}
- {{ BIND }}

View File

@@ -0,0 +1,34 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# if this script isn't already running
if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
LOGFILE=/var/log/agents/agentstatus.log
# Check to see if the file is there yet so we don't break install verification since there is a 5 minute delay for this file to show up
if [ -f $LOGFILE ]; then
ONLINE=$(cat $LOGFILE | grep -wF online | awk '{print $2}' | tr -d ',')
ERROR=$(cat $LOGFILE | grep -wF error | awk '{print $2}' | tr -d ',')
INACTIVE=$(cat $LOGFILE | grep -wF inactive | awk '{print $2}' | tr -d ',')
OFFLINE=$(cat $LOGFILE | grep -wF offline | awk '{print $2}' | tr -d ',')
UPDATING=$(cat $LOGFILE | grep -wF updating | awk '{print $2}' | tr -d ',')
UNENROLLED=$(cat $LOGFILE | grep -wF unenrolled | awk '{print $2}' | tr -d ',')
OTHER=$(cat $LOGFILE | grep -wF other | awk '{print $2}' | tr -d ',')
EVENTS=$(cat $LOGFILE | grep -wF events | awk '{print $2}' | tr -d ',')
TOTAL=$(cat $LOGFILE | grep -wF total | awk '{print $2}' | tr -d ',')
ALL=$(cat $LOGFILE | grep -wF all | awk '{print $2}' | tr -d ',')
ACTIVE=$(cat $LOGFILE | grep -wF active | awk '{print $2}')
echo "agentstatus online=$ONLINE,error=$ERROR,inactive=$INACTIVE,offline=$OFFLINE,updating=$UPDATING,unenrolled=$UNENROLLED,other=$OTHER,events=$EVENTS,total=$TOTAL,all=$ALL,active=$ACTIVE"
fi
fi
exit 0

View File

@@ -1258,6 +1258,10 @@ soc_pillar() {
" server:"\
" srvKey: '$SOCSRVKEY'"\
"" > "$soc_pillar_file"
if [[ $telemetry -ne 0 ]]; then
echo " telemetryEnabled: false" >> $soc_pillar_file
fi
}
telegraf_pillar() {

View File

@@ -447,6 +447,7 @@ if ! [[ -f $install_opt_file ]]; then
get_redirect
# Does the user want to allow access to the UI?
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_standalone ]]; then
waitforstate=true
@@ -468,6 +469,7 @@ if ! [[ -f $install_opt_file ]]; then
collect_webuser_inputs
get_redirect
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_manager ]]; then
info "Setting up as node type manager"
@@ -488,6 +490,7 @@ if ! [[ -f $install_opt_file ]]; then
collect_webuser_inputs
get_redirect
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_managersearch ]]; then
info "Setting up as node type managersearch"
@@ -508,6 +511,7 @@ if ! [[ -f $install_opt_file ]]; then
collect_webuser_inputs
get_redirect
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_sensor ]]; then
info "Setting up as node type sensor"
@@ -597,6 +601,7 @@ if ! [[ -f $install_opt_file ]]; then
collect_webuser_inputs
get_redirect
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_receiver ]]; then

View File

@@ -144,6 +144,26 @@ whiptail_cancel() {
exit 1
}
whiptail_accept_telemetry() {
[ -n "$TESTING" ] && return
read -r -d '' message <<- EOM
The Security Onion development team could use your help! Enabling SOC
Telemetry will help the team understand which UI features are being
used and enables informed prioritization of future development.
Adjust this setting at anytime via the SOC Configuration screen.
Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
Enable SOC Telemetry to help improve future releases?
EOM
whiptail --title "$whiptail_title" --yesno "$message" 15 75
telemetry=$?
}
whiptail_check_exitstatus() {
case $1 in
1)
@@ -431,6 +451,12 @@ whiptail_end_settings() {
done
fi
if [[ $telemetry -eq 0 ]]; then
__append_end_msg "SOC Telemetry: enabled"
else
__append_end_msg "SOC Telemetry: disabled"
fi
# ADVANCED
if [[ $MANAGERADV == 'ADVANCED' ]]; then
__append_end_msg "Advanced Manager Settings:"