Merge remote-tracking branch 'origin/2.4/dev' into kaffytaffy

This commit is contained in:
m0duspwnens
2024-04-10 13:14:13 -04:00
30 changed files with 709 additions and 39 deletions

View File

@@ -1,3 +1,5 @@
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
{% if SOC_GLOBAL.global.airgap %}
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
@@ -68,3 +70,19 @@ copy_so-firewall_sbin:
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
- force: True
- preserve: True
copy_so-yaml_sbin:
file.copy:
- name: /usr/sbin/so-yaml.py
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
- force: True
- preserve: True
{% else %}
fix_23_soup_sbin:
cmd.run:
- name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
fix_23_soup_salt:
cmd.run:
- name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
{% endif %}

View File

@@ -248,6 +248,14 @@ get_random_value() {
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
get_agent_count() {
if [ -f /opt/so/log/agents/agentstatus.log ]; then
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
else
AGENTCOUNT=0
fi
}
gpg_rpm_import() {
if [[ $is_oracle ]]; then
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
@@ -329,7 +337,7 @@ lookup_salt_value() {
local=""
fi
salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local}
salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local}
}
lookup_pillar() {
@@ -570,8 +578,9 @@ sync_options() {
set_version
set_os
salt_minion_count
get_agent_count
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)"
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)"
}
systemctl_func() {

View File

@@ -198,6 +198,8 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp.
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
fi
@@ -233,6 +235,7 @@ exclude_log "curator.log" # ignore since Curator has been removed
exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
for log_file in $(cat /tmp/log_check_files); do
status "Checking log file $log_file"

View File

@@ -0,0 +1,29 @@
{
"package": {
"name": "winlog",
"version": ""
},
"name": "windows-defender",
"namespace": "default",
"description": "Windows Defender - Operational logs",
"policy_id": "endpoints-initial",
"inputs": {
"winlogs-winlog": {
"enabled": true,
"streams": {
"winlog.winlog": {
"enabled": true,
"vars": {
"channel": "Microsoft-Windows-Windows Defender/Operational",
"data_stream.dataset": "winlog.winlog",
"preserve_original_event": false,
"providers": [],
"ignore_older": "72h",
"language": 0,
"tags": [] }
}
}
}
},
"force": true
}

View File

@@ -16,6 +16,9 @@
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"tags": [
"so-grid-node"
]
}
},
@@ -25,6 +28,9 @@
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [
"so-grid-node"
]
}
}

View File

@@ -16,6 +16,9 @@
"paths": [
"/var/log/auth.log*",
"/var/log/secure*"
],
"tags": [
"so-grid-node"
]
}
},
@@ -25,6 +28,9 @@
"paths": [
"/var/log/messages*",
"/var/log/syslog*"
],
"tags": [
"so-grid-node"
]
}
}

View File

@@ -2402,6 +2402,50 @@ elasticsearch:
set_priority:
priority: 50
min_age: 30d
so-logs-cef_x_log:
index_sorting: False
index_template:
index_patterns:
- "logs-cef.log-*"
template:
settings:
index:
lifecycle:
name: so-logs-cef.log-logs
number_of_replicas: 0
composed_of:
- "logs-cef.log@package"
- "logs-cef.log@custom"
- "so-fleet_globals-1"
- "so-fleet_agent_id_verification-1"
priority: 501
data_stream:
hidden: false
allow_custom_routing: false
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 30d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 30d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-logs-checkpoint_x_firewall:
index_sorting: False
index_template:

View File

@@ -83,6 +83,7 @@
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
],
"on_failure": [

View File

@@ -366,6 +366,7 @@ elasticsearch:
so-logs-azure_x_signinlogs: *indexSettings
so-logs-azure_x_springcloudlogs: *indexSettings
so-logs-barracuda_x_waf: *indexSettings
so-logs-cef_x_log: *indexSettings
so-logs-cisco_asa_x_log: *indexSettings
so-logs-cisco_ftd_x_log: *indexSettings
so-logs-cisco_ios_x_log: *indexSettings

View File

@@ -30,7 +30,8 @@
"type": "keyword"
},
"author": {
"type": "text"
"ignore_above": 1024,
"type": "keyword"
},
"description": {
"type": "text"

View File

@@ -28,7 +28,7 @@ global:
description: Used for handling of authentication cookies.
global: True
airgap:
description: Sets airgap mode.
description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting.
global: True
readonly: True
imagerepo:

View File

@@ -27,6 +27,15 @@ repo_log_dir:
- user
- group
agents_log_dir:
file.directory:
- name: /opt/so/log/agents
- user: root
- group: root
- recurse:
- user
- group
yara_log_dir:
file.directory:
- name: /opt/so/log/yarasync
@@ -101,6 +110,17 @@ so-repo-sync:
- hour: '{{ MANAGERMERGED.reposync.hour }}'
- minute: '{{ MANAGERMERGED.reposync.minute }}'
so_fleetagent_status:
cron.present:
- name: /usr/sbin/so-elasticagent-status > /opt/so/log/agents/agentstatus.log 2>&1
- identifier: so_fleetagent_status
- user: root
- minute: '*/5'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
socore_own_saltstack:
file.directory:
- name: /opt/so/saltstack

View File

@@ -0,0 +1,10 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
curl -s -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agent_status" | jq .

View File

@@ -17,13 +17,16 @@ def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:')
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.')
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.')
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.')
print(' help - Prints this usage information.')
print('')
print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
print(' LISTITEM - Item to add to the list.')
print(' VALUE - Value to set for a given key')
print(' LISTITEM - Item to append to a given key\'s list value')
sys.exit(1)
@@ -37,6 +40,7 @@ def writeYaml(filename, content):
file = open(filename, "w")
return yaml.dump(content, file)
def appendItem(content, key, listItem):
pieces = key.split(".", 1)
if len(pieces) > 1:
@@ -51,6 +55,30 @@ def appendItem(content, key, listItem):
print("The key provided does not exist. No action was taken on the file.")
return 1
def convertType(value):
if len(value) > 0 and (not value.startswith("0") or len(value) == 1):
if "." in value:
try:
value = float(value)
return value
except ValueError:
pass
try:
value = int(value)
return value
except ValueError:
pass
lowered_value = value.lower()
if lowered_value == "false":
return False
elif lowered_value == "true":
return True
return value
def append(args):
if len(args) != 3:
print('Missing filename, key arg, or list item to append', file=sys.stderr)
@@ -62,11 +90,41 @@ def append(args):
listItem = args[2]
content = loadYaml(filename)
appendItem(content, key, listItem)
appendItem(content, key, convertType(listItem))
writeYaml(filename, content)
return 0
def addKey(content, key, value):
pieces = key.split(".", 1)
if len(pieces) > 1:
if not pieces[0] in content:
content[pieces[0]] = {}
addKey(content[pieces[0]], pieces[1], value)
elif key in content:
raise KeyError("key already exists")
else:
content[key] = value
def add(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
value = args[2]
content = loadYaml(filename)
addKey(content, key, convertType(value))
writeYaml(filename, content)
return 0
def removeKey(content, key):
pieces = key.split(".", 1)
if len(pieces) > 1:
@@ -91,6 +149,24 @@ def remove(args):
return 0
def replace(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
filename = args[0]
key = args[1]
value = args[2]
content = loadYaml(filename)
removeKey(content, key)
addKey(content, key, convertType(value))
writeYaml(filename, content)
return 0
def main():
args = sys.argv[1:]
@@ -100,8 +176,10 @@ def main():
commands = {
"help": showUsage,
"add": add,
"append": append,
"remove": remove,
"replace": replace,
}
code = 1

View File

@@ -42,6 +42,14 @@ class TestRemove(unittest.TestCase):
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_remove_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.remove(["file"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
def test_remove(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
@@ -106,6 +114,14 @@ class TestRemove(unittest.TestCase):
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
def test_append_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.append(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n")
def test_append(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
@@ -201,3 +217,146 @@ class TestRemove(unittest.TestCase):
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
def test_add_key(self):
content = {}
soyaml.addKey(content, "foo", 123)
self.assertEqual(content, {"foo": 123})
try:
soyaml.addKey(content, "foo", "bar")
self.assertFail("expected key error since key already exists")
except KeyError:
pass
try:
soyaml.addKey(content, "foo.bar", 123)
self.assertFail("expected type error since key parent value is not a map")
except TypeError:
pass
content = {}
soyaml.addKey(content, "foo", "bar")
self.assertEqual(content, {"foo": "bar"})
soyaml.addKey(content, "badda.badda", "boom")
self.assertEqual(content, {"foo": "bar", "badda": {"badda": "boom"}})
def test_add_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.add(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
def test_add(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.add([filename, "key4", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\nkey4: d\n"
self.assertEqual(actual, expected)
def test_add_nested(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.add([filename, "key1.child3", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n child3: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_add_nested_deep(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.add([filename, "key1.child2.deep2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_replace_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.replace(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
def test_replace(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
file.close()
soyaml.replace([filename, "key2", True])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: abc\nkey2: true\nkey3:\n- a\n- b\n- c\n"
self.assertEqual(actual, expected)
def test_replace_nested(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.replace([filename, "key1.child2", "d"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_replace_nested_deep(self):
filename = "/tmp/so-yaml_test-add.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
soyaml.replace([filename, "key1.child2.deep1", 46])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 46\nkey2: false\nkey3:\n- e\n- f\n- g\n"
self.assertEqual(actual, expected)
def test_convert(self):
self.assertEqual(soyaml.convertType("foo"), "foo")
self.assertEqual(soyaml.convertType("foo.bar"), "foo.bar")
self.assertEqual(soyaml.convertType("123"), 123)
self.assertEqual(soyaml.convertType("0"), 0)
self.assertEqual(soyaml.convertType("00"), "00")
self.assertEqual(soyaml.convertType("0123"), "0123")
self.assertEqual(soyaml.convertType("123.456"), 123.456)
self.assertEqual(soyaml.convertType("0123.456"), "0123.456")
self.assertEqual(soyaml.convertType("true"), True)
self.assertEqual(soyaml.convertType("TRUE"), True)
self.assertEqual(soyaml.convertType("false"), False)
self.assertEqual(soyaml.convertType("FALSE"), False)
self.assertEqual(soyaml.convertType(""), "")

View File

@@ -229,7 +229,7 @@ check_local_mods() {
# {% endraw %}
check_pillar_items() {
local pillar_output=$(salt-call pillar.items --out=json)
local pillar_output=$(salt-call pillar.items -lerror --out=json)
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
if [[ "$cond" == "true" ]]; then
@@ -357,6 +357,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40
[[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50
[[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60
[[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
true
}
@@ -373,6 +374,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
true
}
@@ -435,6 +437,11 @@ post_to_2.4.60() {
POSTVERSION=2.4.60
}
post_to_2.4.70() {
echo "Nothing to apply"
POSTVERSION=2.4.70
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -574,6 +581,116 @@ up_to_2.4.60() {
INSTALLEDVERSION=2.4.60
}
up_to_2.4.70() {
playbook_migration
toggle_telemetry
INSTALLEDVERSION=2.4.70
}
toggle_telemetry() {
if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then
cat << ASSIST_EOF
--------------- SOC Telemetry ---------------
The Security Onion development team could use your help! Enabling SOC
Telemetry will help the team understand which UI features are being
used and enables informed prioritization of future development.
Adjust this setting at anytime via the SOC Configuration screen.
Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
ASSIST_EOF
echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "
read -r input
input=$(echo "${input,,}" | xargs echo -n)
echo ""
if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then
echo "Thank you for helping improve Security Onion!"
else
if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then
echo "Disabled SOC Telemetry."
else
fail "Failed to disable SOC Telemetry; aborting."
fi
fi
echo ""
fi
}
playbook_migration() {
# Start SOC Detections migration
mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert}
# Remove cronjobs
crontab -l | grep -v 'so-playbook-sync_cron' | crontab -
crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab -
if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then
# Check for active Elastalert rules
active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f -name "*.yaml" | wc -l)
if [[ "$active_rules_count" -gt 0 ]]; then
# Prompt the user to AGREE if active Elastalert rules found
echo
echo "$active_rules_count Active Elastalert/Playbook rules found."
echo "In preparation for the new Detections module, they will be backed up and then disabled."
echo
echo "If you would like to proceed, then type AGREE and press ENTER."
echo
# Read user input
read INPUT
if [ "${INPUT^^}" != 'AGREE' ]; then fail "SOUP canceled."; fi
echo "Backing up the Elastalert rules..."
rsync -av --stats /opt/so/rules/elastalert/playbook/*.yaml /nsm/backup/detections-migration/elastalert/
# Verify that rsync completed successfully
if [[ $? -eq 0 ]]; then
# Delete the Elastlaert rules
rm -f /opt/so/rules/elastalert/playbook/*.yaml
echo "Active Elastalert rules have been backed up."
else
fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up."
fi
fi
echo
echo "Exporting Sigma rules from Playbook..."
MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls)
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do
echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml"
done || fail "Failed to export Sigma rules..."
echo
echo "Exporting Sigma Filters from Playbook..."
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters."
echo
echo "Backing up Playbook database..."
docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database."
docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database."
fi
echo
echo "Stopping Playbook services & cleaning up..."
for container in so-playbook so-mysql so-soctopus; do
if [ -n "$(docker ps -q -f name=^${container}$)" ]; then
docker stop $container
fi
done
sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf
rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-*
echo
echo "Playbook Migration is complete...."
}
determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap
@@ -756,7 +873,7 @@ verify_latest_update_script() {
else
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null
# Verify that soup scripts updated as expected
get_soup_script_hashes
@@ -837,7 +954,6 @@ main() {
echo "### Preparing soup at $(date) ###"
echo ""
set_os
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."

View File

@@ -52,6 +52,15 @@ socsaltdir:
- mode: 770
- makedirs: True
socanalytics:
file.managed:
- name: /opt/so/conf/soc/analytics.js
- source: salt://soc/files/soc/analytics.js
- user: 939
- group: 939
- mode: 600
- show_changes: False
socconfig:
file.managed:
- name: /opt/so/conf/soc/soc.json

View File

@@ -1,5 +1,6 @@
soc:
enabled: False
telemetryEnabled: true
config:
logFilename: /opt/sensoroni/logs/sensoroni-server.log
logLevel: info
@@ -87,12 +88,13 @@ soc:
- log.id.uid
- network.community_id
- event.dataset
':kratos:audit':
':kratos:':
- soc_timestamp
- http_request.headers.x-real-ip
- identity_id
- http_request.headers.user-agent
- event.dataset
- msg
'::conn':
- soc_timestamp
- source.ip
@@ -1220,6 +1222,17 @@ soc:
- event_data.destination.port
- event_data.process.executable
- event_data.process.pid
':sigma:':
- soc_timestamp
- rule.name
- event.severity_label
- event_data.event.dataset
- event_data.source.ip
- event_data.source.port
- event_data.destination.host
- event_data.destination.port
- event_data.process.executable
- event_data.process.pid
server:
bindAddress: 0.0.0.0:9822
baseUrl: /
@@ -1236,11 +1249,16 @@ soc:
elastalertengine:
allowRegex: ''
autoUpdateEnabled: true
autoEnabledSigmaRules:
- core+critical
- securityonion-resources+critical
- securityonion-resources+high
communityRulesImportFrequencySeconds: 86400
denyRegex: ''
elastAlertRulesFolder: /opt/sensoroni/elastalert
reposFolder: /opt/sensoroni/sigma/repos
rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint
stateFilePath: /opt/so/conf/soc/fingerprints/elastalertengine.state
rulesRepos:
- repo: https://github.com/Security-Onion-Solutions/securityonion-resources
license: Elastic-2.0
@@ -1301,6 +1319,7 @@ soc:
- repo: https://github.com/Security-Onion-Solutions/securityonion-yara
license: DRL
yaraRulesFolder: /opt/sensoroni/yara/rules
stateFilePath: /opt/so/conf/soc/fingerprints/strelkaengine.state
suricataengine:
allowRegex: ''
autoUpdateEnabled: true
@@ -1308,6 +1327,7 @@ soc:
communityRulesFile: /nsm/rules/suricata/emerging-all.rules
denyRegex: ''
rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint
stateFilePath: /opt/so/conf/soc/fingerprints/suricataengine.state
client:
enableReverseLookup: false
docsUrl: /docs/
@@ -1652,21 +1672,42 @@ soc:
- name: Overview
description: Overview of all events
query: '* | groupby event.category | groupby -sankey event.category event.module | groupby event.module | groupby -sankey event.module event.dataset | groupby event.dataset | groupby observer.name | groupby host.name | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: SOC Auth
description: SOC (Security Onion Console) authentication logs
- name: SOC Logins
description: SOC (Security Onion Console) logins
query: 'event.dataset:kratos.audit AND msg:*authenticated* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip identity_id | groupby identity_id | groupby http_request.headers.user-agent'
- name: Elastalerts
description: Elastalert logs
query: '_index: "*:elastalert*" | groupby rule_name | groupby alert_info.type'
- name: SOC Login Failures
description: SOC (Security Onion Console) login failures
query: 'event.dataset:kratos.audit AND msg:*Encountered*self-service*login*error* | groupby http_request.headers.x-real-ip | groupby -sankey http_request.headers.x-real-ip http_request.headers.user-agent | groupby http_request.headers.user-agent'
- name: Alerts
description: Overview of all alerts
query: 'tags:alert | groupby event.module* | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby event.severity | groupby destination_geo.organization_name'
- name: NIDS Alerts
description: NIDS (Network Intrusion Detection System) alerts
query: 'event.category:network AND tags:alert | groupby rule.category | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby rule.name | groupby rule.uuid | groupby rule.gid | groupby destination_geo.organization_name'
- name: Sysmon Overview
description: Overview of all Sysmon data types
query: 'event.dataset:windows.sysmon_operational | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.category event.action | groupby dns.question.name | groupby process.executable | groupby file.name | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: Elastic Agent Overview
description: Overview of all events from Elastic Agents
query: 'event.module:endpoint | groupby event.dataset | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name'
- name: Elastic Agent API Events
description: API (Application Programming Interface) events from Elastic Agents
query: 'event.dataset:endpoint.events.api | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby process.Ext.api.name'
- name: Elastic Agent File Events
description: File events from Elastic Agents
query: 'event.dataset:endpoint.events.file | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby file.path'
- name: Elastic Agent Library Events
description: Library events from Elastic Agents
query: 'event.dataset:endpoint.events.library | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby dll.path | groupby dll.code_signature.status | groupby dll.code_signature.subject_name'
- name: Elastic Agent Network Events
description: Network events from Elastic Agents
query: 'event.dataset:endpoint.events.network | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: Elastic Agent Process Events
description: Process events from Elastic Agents
query: 'event.dataset:endpoint.events.process | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.parent.name | groupby process.parent.name | groupby -sankey process.parent.name process.name | groupby process.name | groupby event.action | groupby process.working_directory'
- name: Elastic Agent Registry Events
description: Registry events from Elastic Agents
query: 'event.dataset:endpoint.events.registry | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby registry.path'
- name: Elastic Agent Security Events
description: Security events from Elastic Agents
query: 'event.dataset:endpoint.events.security | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.executable | groupby process.executable | groupby event.action | groupby event.outcome'
- name: Host Overview
description: Overview of all host data types
query: '((event.category:registry OR event.category:host OR event.category:process OR event.category:driver OR event.category:configuration) OR (event.category:file AND _exists_:process.executable) OR (event.category:network AND _exists_:host.name)) | groupby event.dataset* event.category* event.action* | groupby event.type | groupby -sankey event.type host.name | groupby host.name | groupby user.name | groupby file.name | groupby process.executable'
@@ -1685,24 +1726,18 @@ soc:
- name: Host Network & Process Mappings
description: Network activity mapped to originating processes
query: 'event.category: network AND _exists_:process.executable | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.dataset* event.type* event.action* | groupby dns.question.name | groupby process.executable | groupby process.name | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: Host API Events
description: API (Application Programming Interface) events from endpoints
query: 'event.dataset:endpoint.events.api | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby process.Ext.api.name'
- name: Host Library Events
description: Library events from endpoints
query: 'event.dataset:endpoint.events.library | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.name | groupby process.name | groupby event.action | groupby dll.path | groupby dll.code_signature.status | groupby dll.code_signature.subject_name'
- name: Host Security Events
description: Security events from endpoints
query: 'event.dataset:endpoint.events.security | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby -sankey user.name process.executable | groupby process.executable | groupby event.action | groupby event.outcome'
- name: Sysmon Overview
description: Overview of all Sysmon data types
query: 'event.dataset:windows.sysmon_operational | groupby event.action | groupby -sankey event.action host.name | groupby host.name | groupby -sankey host.name user.name | groupby user.name | groupby event.category event.action | groupby dns.question.name | groupby process.executable | groupby file.name | groupby source.ip | groupby destination.ip | groupby destination.port'
- name: Strelka
description: Strelka file analysis
query: 'event.module:strelka | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby -sankey file.source file.name | groupby file.name'
- name: Zeek Notice
description: Zeek notice logs
query: 'event.dataset:zeek.notice | groupby notice.note | groupby -sankey notice.note source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby notice.message | groupby notice.sub_message | groupby source_geo.organization_name | groupby destination_geo.organization_name'
- name: Connections and Metadata with community_id
description: Network connections that include community_id
query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip source.port destination.ip destination.port | groupby network.protocol | groupby source_geo.organization_name source.geo.country_name | groupby destination_geo.organization_name destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- name: Connections and Metadata with Community ID
description: Network connections that include network.community_id
query: '_exists_:network.community_id | groupby event.module* | groupby -sankey event.module* event.dataset | groupby event.dataset | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby source_geo.organization_name | groupby source.geo.country_name | groupby destination_geo.organization_name | groupby destination.geo.country_name | groupby rule.name rule.category event.severity_label | groupby dns.query.name | groupby http.virtual_host http.uri | groupby notice.note notice.message notice.sub_message | groupby source.ip host.hostname user.name event.action event.type process.executable process.pid'
- name: Connections seen by Zeek or Suricata
description: Network connections logged by Zeek or Suricata
query: 'tags:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby -sankey destination.port network.protocol | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name | groupby client.ip_bytes | groupby server.ip_bytes | groupby client.oui'
@@ -1903,6 +1938,17 @@ soc:
- event_data.destination.port
- event_data.process.executable
- event_data.process.pid
':sigma:':
- soc_timestamp
- rule.name
- event.severity_label
- event_data.event.dataset
- event_data.source.ip
- event_data.source.port
- event_data.destination.host
- event_data.destination.port
- event_data.process.executable
- event_data.process.pid
':strelka:':
- soc_timestamp
- file.name
@@ -2076,11 +2122,11 @@ soc:
query: "so_detection.isEnabled:false"
- name: "Detection Type - Suricata (NIDS)"
query: "so_detection.language:suricata"
- name: "Detection Type - Sigma - All"
- name: "Detection Type - Sigma (Elastalert) - All"
query: "so_detection.language:sigma"
- name: "Detection Type - Sigma - Windows"
- name: "Detection Type - Sigma (Elastalert) - Windows"
query: 'so_detection.language:sigma AND so_detection.content: "*product: windows*"'
- name: "Detection Type - Yara (Strelka)"
- name: "Detection Type - YARA (Strelka)"
query: "so_detection.language:yara"
- name: "Security Onion - Grid Detections"
query: "so_detection.ruleset:securityonion-resources"

View File

@@ -8,6 +8,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'soc/merged.map.jinja' import DOCKER_EXTRA_HOSTS %}
{% from 'soc/merged.map.jinja' import SOCMERGED %}
include:
- soc.config
@@ -31,6 +32,9 @@ so-soc:
- /nsm/soc/uploads:/nsm/soc/uploads:rw
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw
- /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro
{% if SOCMERGED.telemetryEnabled and not GLOBALS.airgap %}
- /opt/so/conf/soc/analytics.js:/opt/sensoroni/html/js/analytics.js:ro
{% endif %}
- /opt/so/conf/soc/motd.md:/opt/sensoroni/html/motd.md:ro
- /opt/so/conf/soc/banner.md:/opt/sensoroni/html/login/banner.md:ro
- /opt/so/conf/soc/sigma_so_pipeline.yaml:/opt/sensoroni/sigma_so_pipeline.yaml:ro
@@ -67,6 +71,7 @@ so-soc:
- file: socdatadir
- file: soclogdir
- file: socconfig
- file: socanalytics
- file: socmotd
- file: socbanner
- file: soccustom

View File

@@ -0,0 +1,5 @@
(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-TM46SL7T');

View File

@@ -12,6 +12,10 @@ To see all the latest features and fixes in this version of Security Onion, clic
Want the best hardware for your enterprise deployment? Check out our [enterprise appliances](https://securityonionsolutions.com/hardware/)!
## Premium Support
Experiencing difficulties and need priority support or remote assistance? We offer a [premium support plan](https://securityonionsolutions.com/support/) to assist corporate, educational, and government organizations.
## Customize This Space
Make this area your own by customizing the content in the [Config](/#/config?s=soc.files.soc.motd__md) interface.

View File

@@ -79,3 +79,12 @@ transformations:
- type: logsource
product: windows
category: driver_load
- id: linux_security_add-fields
type: add_condition
conditions:
event.module: 'system'
event.dataset: 'system.auth'
rule_conditions:
- type: logsource
product: linux
service: auth

View File

@@ -30,6 +30,11 @@
{# since cases is not a valid soc config item and only used for the map files, remove it from being placed in the config #}
{% do SOCMERGED.config.server.modules.pop('cases') %}
{# do not automatically enable Sigma rules if install is Eval or Import #}
{% if grains['role'] in ['so-eval', 'so-import'] %}
{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': []}) %}
{% endif %}
{# remove these modules if detections is disabled #}
{% if not SOCMERGED.config.server.client.detectionsEnabled %}
{% do SOCMERGED.config.server.modules.pop('elastalertengine') %}

View File

@@ -2,6 +2,11 @@ soc:
enabled:
description: You can enable or disable SOC.
advanced: True
telemetryEnabled:
title: SOC Telemetry
description: When this setting is enabled and the grid is not in airgap mode, SOC will provide feature usage data to the Security Onion development team via Google Analytics. This data helps Security Onion developers determine which product features are being used and can also provide insight into improving the user interface. When changing this setting, wait for the grid to fully synchronize and then perform a hard browser refresh on SOC, to force the browser cache to update and reflect the new setting.
global: True
helpLink: telemetry.html
files:
soc:
banner__md:
@@ -83,6 +88,11 @@ soc:
global: True
advanced: True
helpLink: sigma.html
autoEnabledSigmaRules:
description: 'Sigma rules to automatically enable on initial import. Format is $Ruleset+$Level - for example, for the core community ruleset and critical level rules: core+critical'
global: True
advanced: True
helpLink: sigma.html
denyRegex:
description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.'
global: True

View File

@@ -11,6 +11,7 @@ telegraf:
quiet: 'false'
scripts:
eval:
- agentstatus.sh
- checkfiles.sh
- influxdbsize.sh
- lasthighstate.sh
@@ -23,6 +24,7 @@ telegraf:
- zeekcaptureloss.sh
- zeekloss.sh
standalone:
- agentstatus.sh
- checkfiles.sh
- eps.sh
- influxdbsize.sh
@@ -38,6 +40,7 @@ telegraf:
- zeekloss.sh
- features.sh
manager:
- agentstatus.sh
- influxdbsize.sh
- lasthighstate.sh
- os.sh
@@ -46,6 +49,7 @@ telegraf:
- sostatus.sh
- features.sh
managersearch:
- agentstatus.sh
- eps.sh
- influxdbsize.sh
- lasthighstate.sh

View File

@@ -56,6 +56,7 @@ so-telegraf:
- /opt/so/log/raid:/var/log/raid:ro
- /opt/so/log/sostatus:/var/log/sostatus:ro
- /opt/so/log/salt:/var/log/salt:ro
- /opt/so/log/agents:/var/log/agents:ro
{% if DOCKER.containers['so-telegraf'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-telegraf'].custom_bind_mounts %}
- {{ BIND }}

View File

@@ -0,0 +1,34 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# if this script isn't already running
if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
LOGFILE=/var/log/agents/agentstatus.log
# Check to see if the file is there yet so we don't break install verification since there is a 5 minute delay for this file to show up
if [ -f $LOGFILE ]; then
ONLINE=$(cat $LOGFILE | grep -wF online | awk '{print $2}' | tr -d ',')
ERROR=$(cat $LOGFILE | grep -wF error | awk '{print $2}' | tr -d ',')
INACTIVE=$(cat $LOGFILE | grep -wF inactive | awk '{print $2}' | tr -d ',')
OFFLINE=$(cat $LOGFILE | grep -wF offline | awk '{print $2}' | tr -d ',')
UPDATING=$(cat $LOGFILE | grep -wF updating | awk '{print $2}' | tr -d ',')
UNENROLLED=$(cat $LOGFILE | grep -wF unenrolled | awk '{print $2}' | tr -d ',')
OTHER=$(cat $LOGFILE | grep -wF other | awk '{print $2}' | tr -d ',')
EVENTS=$(cat $LOGFILE | grep -wF events | awk '{print $2}' | tr -d ',')
TOTAL=$(cat $LOGFILE | grep -wF total | awk '{print $2}' | tr -d ',')
ALL=$(cat $LOGFILE | grep -wF all | awk '{print $2}' | tr -d ',')
ACTIVE=$(cat $LOGFILE | grep -wF active | awk '{print $2}')
echo "agentstatus online=$ONLINE,error=$ERROR,inactive=$INACTIVE,offline=$OFFLINE,updating=$UPDATING,unenrolled=$UNENROLLED,other=$OTHER,events=$EVENTS,total=$TOTAL,all=$ALL,active=$ACTIVE"
fi
fi
exit 0

View File

@@ -1259,6 +1259,10 @@ soc_pillar() {
" server:"\
" srvKey: '$SOCSRVKEY'"\
"" > "$soc_pillar_file"
if [[ $telemetry -ne 0 ]]; then
echo " telemetryEnabled: false" >> $soc_pillar_file
fi
}
telegraf_pillar() {

View File

@@ -447,6 +447,7 @@ if ! [[ -f $install_opt_file ]]; then
get_redirect
# Does the user want to allow access to the UI?
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_standalone ]]; then
waitforstate=true
@@ -468,6 +469,7 @@ if ! [[ -f $install_opt_file ]]; then
collect_webuser_inputs
get_redirect
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_manager ]]; then
info "Setting up as node type manager"
@@ -488,6 +490,7 @@ if ! [[ -f $install_opt_file ]]; then
collect_webuser_inputs
get_redirect
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_managersearch ]]; then
info "Setting up as node type managersearch"
@@ -508,6 +511,7 @@ if ! [[ -f $install_opt_file ]]; then
collect_webuser_inputs
get_redirect
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_sensor ]]; then
info "Setting up as node type sensor"
@@ -597,6 +601,7 @@ if ! [[ -f $install_opt_file ]]; then
collect_webuser_inputs
get_redirect
collect_so_allow
[[ ! $is_airgap ]] && whiptail_accept_telemetry
whiptail_end_settings
elif [[ $is_receiver ]]; then

View File

@@ -144,6 +144,26 @@ whiptail_cancel() {
exit 1
}
whiptail_accept_telemetry() {
[ -n "$TESTING" ] && return
read -r -d '' message <<- EOM
The Security Onion development team could use your help! Enabling SOC
Telemetry will help the team understand which UI features are being
used and enables informed prioritization of future development.
Adjust this setting at anytime via the SOC Configuration screen.
Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
Enable SOC Telemetry to help improve future releases?
EOM
whiptail --title "$whiptail_title" --yesno "$message" 15 75
telemetry=$?
}
whiptail_check_exitstatus() {
case $1 in
1)
@@ -431,6 +451,14 @@ whiptail_end_settings() {
done
fi
if [[ ! $is_airgap ]]; then
if [[ $telemetry -eq 0 ]]; then
__append_end_msg "SOC Telemetry: enabled"
else
__append_end_msg "SOC Telemetry: disabled"
fi
fi
# ADVANCED
if [[ $MANAGERADV == 'ADVANCED' ]]; then
__append_end_msg "Advanced Manager Settings:"