Compare commits

..

17 Commits

Author SHA1 Message Date
Matthew Wright
90eee49ab6 whitespace issue pt2 2026-02-19 16:35:35 -05:00
Matthew Wright
f025886b31 whitespace issue 2026-02-19 16:33:40 -05:00
Matthew Wright
7fa01f5fd5 added new funcs to so-yaml.py to support gemini tests 2026-02-19 16:20:44 -05:00
Jorge Reyes
93f52453b4 Merge pull request #15499 from Security-Onion-Solutions/reyesj2-patch-15
rework autosoup for intermediate upgrades
2026-02-19 09:08:00 -06:00
Jorge Reyes
a9307aa308 Clarify duration for Elasticsearch upgrade verification
Added a note about the potential duration of the Elasticsearch upgrade verification process.
2026-02-19 08:31:26 -06:00
coreyogburn
8fc3011f92 Merge pull request #15501 from Security-Onion-Solutions/cogburn/protocols
Add OpenAI Protocols
2026-02-18 14:34:10 -07:00
Corey Ogburn
911c9d56db Add OpenAI Protocols 2026-02-18 14:32:18 -07:00
Josh Patterson
c1273c3d2c Merge pull request #15500 from Security-Onion-Solutions/bravo
upgrade docker
2026-02-18 16:29:50 -05:00
Matthew Wright
3349c1a936 Merge pull request #15492 from Security-Onion-Solutions/mwright/investigate-refactor
Assistant: Investigated Query Toggle Filter
2026-02-18 15:04:33 -05:00
reyesj2
58c0a9183c unmount current agupdate dir, before final upgrade on airgap 2026-02-18 10:04:32 -06:00
Jorge Reyes
7dfd212519 Merge pull request #15497 from Security-Onion-Solutions/revert-15465-reyesj2/iso-soup
Revert "allow network installs to use ISO for faster soupin"
2026-02-18 10:04:16 -06:00
Jorge Reyes
b8fb0fa735 Revert "allow network installs to use ISO for faster soupin" 2026-02-18 10:02:24 -06:00
Jorge Reyes
e6f767b613 Merge pull request #15496 from Security-Onion-Solutions/revert-15468-reyesj2/iso-soup
Revert "don't set is_airgap when using nonairgap_useiso: not a true airgap sy…"
2026-02-18 10:02:13 -06:00
Jorge Reyes
d00fb4ccf7 Revert "don't set is_airgap when using nonairgap_useiso: not a true airgap sy…" 2026-02-18 09:42:12 -06:00
Josh Patterson
a29eff37a0 Merge pull request #15494 from Security-Onion-Solutions/bravo
fix sensor and heavynode first highstate failure
2026-02-18 09:32:37 -05:00
reyesj2
534a0ad41f clean up ES version compatibility check and autosoups 2026-02-17 16:20:11 -06:00
Matthew Wright
3d1a2c12ec add investigated query toggle filter 2026-02-17 13:17:12 -05:00
6 changed files with 691 additions and 131 deletions

View File

@@ -117,4 +117,4 @@ sos_docker_net:
com.docker.network.bridge.enable_ip_masquerade: 'true' com.docker.network.bridge.enable_ip_masquerade: 'true'
com.docker.network.bridge.enable_icc: 'true' com.docker.network.bridge.enable_icc: 'true'
com.docker.network.bridge.host_binding_ipv4: '0.0.0.0' com.docker.network.bridge.host_binding_ipv4: '0.0.0.0'
- unless: ip l | grep sobridge - unless: 'docker network ls | grep sobridge'

View File

@@ -9,6 +9,7 @@ import os
import sys import sys
import time import time
import yaml import yaml
import json
lockFile = "/tmp/so-yaml.lock" lockFile = "/tmp/so-yaml.lock"
@@ -16,19 +17,24 @@ lockFile = "/tmp/so-yaml.lock"
def showUsage(args): def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]), file=sys.stderr) print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]), file=sys.stderr)
print(' General commands:', file=sys.stderr) print(' General commands:', file=sys.stderr)
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr) print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr)
print(' removelistitem - Remove a list item from a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr) print(' appendlistobject - Append an object to a yaml list key. Requires KEY and JSON_OBJECT args.', file=sys.stderr)
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.', file=sys.stderr) print(' removelistitem - Remove a list item from a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr)
print(' get - Displays (to stdout) the value stored in the given key. Requires KEY arg.', file=sys.stderr) print(' replacelistobject - Replace a list object based on a condition. Requires KEY, CONDITION_FIELD, CONDITION_VALUE, and JSON_OBJECT args.', file=sys.stderr)
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.', file=sys.stderr) print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.', file=sys.stderr)
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.', file=sys.stderr) print(' get - Displays (to stdout) the value stored in the given key. Requires KEY arg.', file=sys.stderr)
print(' help - Prints this usage information.', file=sys.stderr) print(' remove - Removes a yaml key, if it exists. Requires KEY arg.', file=sys.stderr)
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.', file=sys.stderr)
print(' help - Prints this usage information.', file=sys.stderr)
print('', file=sys.stderr) print('', file=sys.stderr)
print(' Where:', file=sys.stderr) print(' Where:', file=sys.stderr)
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml', file=sys.stderr) print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml', file=sys.stderr)
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2', file=sys.stderr) print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2', file=sys.stderr)
print(' VALUE - Value to set for a given key. Can be a literal value or file:<path> to load from a YAML file.', file=sys.stderr) print(' VALUE - Value to set for a given key. Can be a literal value or file:<path> to load from a YAML file.', file=sys.stderr)
print(' LISTITEM - Item to append to a given key\'s list value. Can be a literal value or file:<path> to load from a YAML file.', file=sys.stderr) print(' LISTITEM - Item to append to a given key\'s list value. Can be a literal value or file:<path> to load from a YAML file.', file=sys.stderr)
print(' JSON_OBJECT - JSON string representing an object to append to a list.', file=sys.stderr)
print(' CONDITION_FIELD - Field name to match in list items (e.g., "name").', file=sys.stderr)
print(' CONDITION_VALUE - Value to match for the condition field.', file=sys.stderr)
sys.exit(1) sys.exit(1)
@@ -122,6 +128,52 @@ def append(args):
return 0 return 0
def appendListObjectItem(content, key, listObject):
pieces = key.split(".", 1)
if len(pieces) > 1:
appendListObjectItem(content[pieces[0]], pieces[1], listObject)
else:
try:
if not isinstance(content[key], list):
raise AttributeError("Value is not a list")
content[key].append(listObject)
except AttributeError:
print("The existing value for the given key is not a list. No action was taken on the file.", file=sys.stderr)
return 1
except KeyError:
print("The key provided does not exist. No action was taken on the file.", file=sys.stderr)
return 1
def appendlistobject(args):
if len(args) != 3:
print('Missing filename, key arg, or JSON object to append', file=sys.stderr)
showUsage(None)
return 1
filename = args[0]
key = args[1]
jsonString = args[2]
try:
# Parse the JSON string into a Python dictionary
listObject = json.loads(jsonString)
except json.JSONDecodeError as e:
print(f'Invalid JSON string: {e}', file=sys.stderr)
return 1
# Verify that the parsed content is a dictionary (object)
if not isinstance(listObject, dict):
print('The JSON string must represent an object (dictionary), not an array or primitive value.', file=sys.stderr)
return 1
content = loadYaml(filename)
appendListObjectItem(content, key, listObject)
writeYaml(filename, content)
return 0
def removelistitem(args): def removelistitem(args):
if len(args) != 3: if len(args) != 3:
print('Missing filename, key arg, or list item to remove', file=sys.stderr) print('Missing filename, key arg, or list item to remove', file=sys.stderr)
@@ -139,6 +191,68 @@ def removelistitem(args):
return 0 return 0
def replaceListObjectByCondition(content, key, conditionField, conditionValue, newObject):
pieces = key.split(".", 1)
if len(pieces) > 1:
replaceListObjectByCondition(content[pieces[0]], pieces[1], conditionField, conditionValue, newObject)
else:
try:
if not isinstance(content[key], list):
raise AttributeError("Value is not a list")
# Find and replace the item that matches the condition
found = False
for i, item in enumerate(content[key]):
if isinstance(item, dict) and item.get(conditionField) == conditionValue:
content[key][i] = newObject
found = True
break
if not found:
print(f"No list item found with {conditionField}={conditionValue}. No action was taken on the file.", file=sys.stderr)
return 1
except AttributeError:
print("The existing value for the given key is not a list. No action was taken on the file.", file=sys.stderr)
return 1
except KeyError:
print("The key provided does not exist. No action was taken on the file.", file=sys.stderr)
return 1
def replacelistobject(args):
if len(args) != 5:
print('Missing filename, key arg, condition field, condition value, or JSON object', file=sys.stderr)
showUsage(None)
return 1
filename = args[0]
key = args[1]
conditionField = args[2]
conditionValue = args[3]
jsonString = args[4]
try:
# Parse the JSON string into a Python dictionary
newObject = json.loads(jsonString)
except json.JSONDecodeError as e:
print(f'Invalid JSON string: {e}', file=sys.stderr)
return 1
# Verify that the parsed content is a dictionary (object)
if not isinstance(newObject, dict):
print('The JSON string must represent an object (dictionary), not an array or primitive value.', file=sys.stderr)
return 1
content = loadYaml(filename)
result = replaceListObjectByCondition(content, key, conditionField, conditionValue, newObject)
if result != 1:
writeYaml(filename, content)
return result if result is not None else 0
def addKey(content, key, value): def addKey(content, key, value):
pieces = key.split(".", 1) pieces = key.split(".", 1)
if len(pieces) > 1: if len(pieces) > 1:
@@ -247,7 +361,9 @@ def main():
"help": showUsage, "help": showUsage,
"add": add, "add": add,
"append": append, "append": append,
"appendlistobject": appendlistobject,
"removelistitem": removelistitem, "removelistitem": removelistitem,
"replacelistobject": replacelistobject,
"get": get, "get": get,
"remove": remove, "remove": remove,
"replace": replace, "replace": replace,

View File

@@ -580,3 +580,340 @@ class TestRemoveListItem(unittest.TestCase):
soyaml.main() soyaml.main()
sysmock.assert_called() sysmock.assert_called()
self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue()) self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue())
class TestAppendListObject(unittest.TestCase):
def test_appendlistobject_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "help"]
soyaml.appendlistobject(["file", "key"])
sysmock.assert_called()
self.assertIn("Missing filename, key arg, or JSON object to append", mock_stderr.getvalue())
def test_appendlistobject(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123 }, key2: [{name: item1, value: 10}]}")
file.close()
json_obj = '{"name": "item2", "value": 20}'
soyaml.appendlistobject([filename, "key2", json_obj])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\nkey2:\n- name: item1\n value: 10\n- name: item2\n value: 20\n"
self.assertEqual(actual, expected)
def test_appendlistobject_nested(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: [{name: a, id: 1}], child2: abc }, key2: false}")
file.close()
json_obj = '{"name": "b", "id": 2}'
soyaml.appendlistobject([filename, "key1.child1", json_obj])
file = open(filename, "r")
actual = file.read()
file.close()
# YAML doesn't guarantee key order in dictionaries, so check for content
self.assertIn("child1:", actual)
self.assertIn("name: a", actual)
self.assertIn("id: 1", actual)
self.assertIn("name: b", actual)
self.assertIn("id: 2", actual)
self.assertIn("child2: abc", actual)
self.assertIn("key2: false", actual)
def test_appendlistobject_nested_deep(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [{x: 1}] } }, key2: false}")
file.close()
json_obj = '{"x": 2, "y": 3}'
soyaml.appendlistobject([filename, "key1.child2.deep2", json_obj])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2:\n - x: 1\n - x: 2\n y: 3\nkey2: false\n"
self.assertEqual(actual, expected)
def test_appendlistobject_invalid_json(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1}]}")
file.close()
with patch('sys.stderr', new=StringIO()) as mock_stderr:
result = soyaml.appendlistobject([filename, "key1", "{invalid json"])
self.assertEqual(result, 1)
self.assertIn("Invalid JSON string:", mock_stderr.getvalue())
def test_appendlistobject_not_dict(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1}]}")
file.close()
with patch('sys.stderr', new=StringIO()) as mock_stderr:
# Try to append an array instead of an object
result = soyaml.appendlistobject([filename, "key1", "[1, 2, 3]"])
self.assertEqual(result, 1)
self.assertIn("The JSON string must represent an object (dictionary)", mock_stderr.getvalue())
def test_appendlistobject_not_dict_primitive(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1}]}")
file.close()
with patch('sys.stderr', new=StringIO()) as mock_stderr:
# Try to append a primitive value
result = soyaml.appendlistobject([filename, "key1", "123"])
self.assertEqual(result, 1)
self.assertIn("The JSON string must represent an object (dictionary)", mock_stderr.getvalue())
def test_appendlistobject_key_noexist(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1}]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "appendlistobject", filename, "key2", '{"name": "item2"}']
soyaml.main()
sysmock.assert_called()
self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue())
def test_appendlistobject_key_noexist_deep(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: [{name: a}] }}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "appendlistobject", filename, "key1.child2", '{"name": "b"}']
soyaml.main()
sysmock.assert_called()
self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue())
def test_appendlistobject_key_nonlist(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123 }}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "appendlistobject", filename, "key1", '{"name": "item"}']
soyaml.main()
sysmock.assert_called()
self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue())
def test_appendlistobject_key_nonlist_deep(self):
filename = "/tmp/so-yaml_test-appendlistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "appendlistobject", filename, "key1.child2.deep1", '{"name": "item"}']
soyaml.main()
sysmock.assert_called()
self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue())
class TestReplaceListObject(unittest.TestCase):
def test_replacelistobject_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "help"]
soyaml.replacelistobject(["file", "key", "field"])
sysmock.assert_called()
self.assertIn("Missing filename, key arg, condition field, condition value, or JSON object", mock_stderr.getvalue())
def test_replacelistobject(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1, value: 10}, {name: item2, value: 20}]}")
file.close()
json_obj = '{"name": "item2", "value": 25, "extra": "field"}'
soyaml.replacelistobject([filename, "key1", "name", "item2", json_obj])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n- name: item1\n value: 10\n- extra: field\n name: item2\n value: 25\n"
self.assertEqual(actual, expected)
def test_replacelistobject_nested(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: [{id: '1', status: active}, {id: '2', status: inactive}] }}")
file.close()
json_obj = '{"id": "2", "status": "active", "updated": true}'
soyaml.replacelistobject([filename, "key1.child1", "id", "2", json_obj])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1:\n - id: '1'\n status: active\n - id: '2'\n status: active\n updated: true\n"
self.assertEqual(actual, expected)
def test_replacelistobject_nested_deep(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: [{name: a, val: 1}, {name: b, val: 2}] } }}")
file.close()
json_obj = '{"name": "b", "val": 99}'
soyaml.replacelistobject([filename, "key1.child2.deep2", "name", "b", json_obj])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2:\n - name: a\n val: 1\n - name: b\n val: 99\n"
self.assertEqual(actual, expected)
def test_replacelistobject_invalid_json(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1}]}")
file.close()
with patch('sys.stderr', new=StringIO()) as mock_stderr:
result = soyaml.replacelistobject([filename, "key1", "name", "item1", "{invalid json"])
self.assertEqual(result, 1)
self.assertIn("Invalid JSON string:", mock_stderr.getvalue())
def test_replacelistobject_not_dict(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1}]}")
file.close()
with patch('sys.stderr', new=StringIO()) as mock_stderr:
result = soyaml.replacelistobject([filename, "key1", "name", "item1", "[1, 2, 3]"])
self.assertEqual(result, 1)
self.assertIn("The JSON string must represent an object (dictionary)", mock_stderr.getvalue())
def test_replacelistobject_condition_not_found(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1, value: 10}, {name: item2, value: 20}]}")
file.close()
with patch('sys.stderr', new=StringIO()) as mock_stderr:
json_obj = '{"name": "item3", "value": 30}'
result = soyaml.replacelistobject([filename, "key1", "name", "item3", json_obj])
self.assertEqual(result, 1)
self.assertIn("No list item found with name=item3", mock_stderr.getvalue())
# Verify file was not modified
file = open(filename, "r")
actual = file.read()
file.close()
self.assertIn("item1", actual)
self.assertIn("item2", actual)
self.assertNotIn("item3", actual)
def test_replacelistobject_key_noexist(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1}]}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "replacelistobject", filename, "key2", "name", "item1", '{"name": "item2"}']
soyaml.main()
sysmock.assert_called()
self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue())
def test_replacelistobject_key_noexist_deep(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: [{name: a}] }}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "replacelistobject", filename, "key1.child2", "name", "a", '{"name": "b"}']
soyaml.main()
sysmock.assert_called()
self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue())
def test_replacelistobject_key_nonlist(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123 }}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "replacelistobject", filename, "key1", "name", "item", '{"name": "item"}']
soyaml.main()
sysmock.assert_called()
self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue())
def test_replacelistobject_key_nonlist_deep(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }}")
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "replacelistobject", filename, "key1.child2.deep1", "name", "item", '{"name": "item"}']
soyaml.main()
sysmock.assert_called()
self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue())
def test_replacelistobject_string_condition_value(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{name: item1, value: 10}, {name: item2, value: 20}]}")
file.close()
json_obj = '{"name": "item1", "value": 15}'
soyaml.replacelistobject([filename, "key1", "name", "item1", json_obj])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n- name: item1\n value: 15\n- name: item2\n value: 20\n"
self.assertEqual(actual, expected)
def test_replacelistobject_numeric_condition_value(self):
filename = "/tmp/so-yaml_test-replacelistobject.yaml"
file = open(filename, "w")
file.write("{key1: [{id: '1', status: active}, {id: '2', status: inactive}]}")
file.close()
json_obj = '{"id": "1", "status": "updated"}'
soyaml.replacelistobject([filename, "key1", "id", "1", json_obj])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n- id: '1'\n status: updated\n- id: '2'\n status: inactive\n"
self.assertEqual(actual, expected)

View File

@@ -93,6 +93,10 @@ check_err() {
161) 161)
echo 'Required intermediate Elasticsearch upgrade not complete' echo 'Required intermediate Elasticsearch upgrade not complete'
;; ;;
170)
echo "Intermediate upgrade completed successfully to $next_step_so_version, but next soup to Security Onion $originally_requested_so_version could not be started automatically."
echo "Start soup again manually to continue the upgrade to Security Onion $originally_requested_so_version."
;;
*) *)
echo 'Unhandled error' echo 'Unhandled error'
echo "$err_msg" echo "$err_msg"
@@ -154,7 +158,7 @@ EOF
echo "Ensure you verify the ISO that you downloaded." echo "Ensure you verify the ISO that you downloaded."
exit 0 exit 0
else else
echo "Device has been mounted!" echo "Device has been mounted! $(cat /tmp/soagupdate/SecurityOnion/VERSION)"
fi fi
else else
echo "Could not find Security Onion ISO content at ${ISOLOC}" echo "Could not find Security Onion ISO content at ${ISOLOC}"
@@ -165,7 +169,7 @@ EOF
} }
airgap_update_dockers() { airgap_update_dockers() {
if [[ $is_airgap -eq 0 ]] || [[ $nonairgap_useiso -eq 0 ]]; then if [[ $is_airgap -eq 0 ]] || [[ ! -z "$ISOLOC" ]]; then
# Let's copy the tarball # Let's copy the tarball
if [[ ! -f $AGDOCKER/registry.tar ]]; then if [[ ! -f $AGDOCKER/registry.tar ]]; then
echo "Unable to locate registry. Exiting" echo "Unable to locate registry. Exiting"
@@ -200,24 +204,13 @@ update_registry() {
check_airgap() { check_airgap() {
# See if this is an airgap install # See if this is an airgap install
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}' | tr '[:upper:]' '[:lower:]') AIRGAP=$(cat /opt/so/saltstack/local/pillar/global/soc_global.sls | grep airgap: | awk '{print $2}' | tr '[:upper:]' '[:lower:]')
if [[ ! -z "$ISOLOC" ]]; then
# flag to use ISO for non-airgap installs, won't be used everywhere is_airgap -eq 0 is used. Used to speed up network soups by using local storage for large files.
nonairgap_useiso=0
else
nonairgap_useiso=1
fi
if [[ "$AIRGAP" == "true" ]]; then if [[ "$AIRGAP" == "true" ]]; then
is_airgap=0 is_airgap=0
else
is_airgap=1
fi
# use ISO if its airgap install OR ISOLOC was set with -f <path>
if [[ "$AIRGAP" == "true" ]] || [[ $nonairgap_useiso -eq 0 ]]; then
UPDATE_DIR=/tmp/soagupdate/SecurityOnion UPDATE_DIR=/tmp/soagupdate/SecurityOnion
AGDOCKER=/tmp/soagupdate/docker AGDOCKER=/tmp/soagupdate/docker
AGREPO=/tmp/soagupdate/minimal/Packages AGREPO=/tmp/soagupdate/minimal/Packages
else
is_airgap=1
fi fi
} }
@@ -1398,7 +1391,7 @@ so-yaml.py removelistitem /etc/salt/master file_roots.base /opt/so/rules/nids
} }
determine_elastic_agent_upgrade() { determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]] || [[ $nonairgap_useiso -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap update_elastic_agent_airgap
else else
set +e set +e
@@ -1695,115 +1688,218 @@ verify_latest_update_script() {
verify_es_version_compatibility() { verify_es_version_compatibility() {
local es_required_version_statefile="/opt/so/state/so_es_required_upgrade_version.txt" local es_required_version_statefile_base="/opt/so/state/so_es_required_upgrade_version"
local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh" local es_verification_script="/tmp/so_intermediate_upgrade_verification.sh"
# supported upgrade paths for SO-ES versions local is_active_intermediate_upgrade=1
declare -A es_upgrade_map=( # supported upgrade paths for SO-ES versions
["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8" declare -A es_upgrade_map=(
["8.17.3"]="8.18.4 8.18.6 8.18.8" ["8.14.3"]="8.17.3 8.18.4 8.18.6 8.18.8"
["8.18.4"]="8.18.6 8.18.8 9.0.8" ["8.17.3"]="8.18.4 8.18.6 8.18.8"
["8.18.6"]="8.18.8 9.0.8" ["8.18.4"]="8.18.6 8.18.8 9.0.8"
["8.18.8"]="9.0.8" ["8.18.6"]="8.18.8 9.0.8"
) ["8.18.8"]="9.0.8"
)
# Elasticsearch MUST upgrade through these versions # Elasticsearch MUST upgrade through these versions
declare -A es_to_so_version=( declare -A es_to_so_version=(
["8.18.8"]="2.4.190-20251024" ["8.18.8"]="2.4.190-20251024"
) )
# Get current Elasticsearch version # Get current Elasticsearch version
if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then if es_version_raw=$(so-elasticsearch-query / --fail --retry 5 --retry-delay 10); then
es_version=$(echo "$es_version_raw" | jq -r '.version.number' ) es_version=$(echo "$es_version_raw" | jq -r '.version.number' )
else
echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
exit 160
fi
if ! target_es_version=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version | sed -n '1p'); then
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
exit 160
fi
# allow upgrade to version < 2.4.110 without checking ES version compatibility
return 0
fi
# if this statefile exists then we have done an intermediate upgrade and we need to ensure that ALL ES nodes have been upgraded to the version in the statefile before allowing soup to continue
if [[ -f "$es_required_version_statefile" ]]; then
# required so verification script should have already been created
if [[ ! -f "$es_verification_script" ]]; then
create_intermediate_upgrade_verification_script $es_verification_script
fi
local es_required_version_statefile_value=$(cat $es_required_version_statefile)
echo -e "\n##############################################################################################################################\n"
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss!"
# create script using version in statefile
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$es_required_version_statefile"
if [[ $? -ne 0 ]]; then
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
exit 161
fi
echo -e "\n##############################################################################################################################\n"
fi
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
# supported upgrade
return 0
else
compatible_versions=${es_upgrade_map[$es_version]}
if [[ -z "$compatible_versions" ]]; then
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
next_step_so_version=${es_to_so_version[$first_es_required_version]}
required_es_upgrade_version="$first_es_required_version"
else else
next_step_so_version=${es_to_so_version[${compatible_versions##* }]} echo "Could not determine current Elasticsearch version to validate compatibility with post soup Elasticsearch version."
required_es_upgrade_version="${compatible_versions##* }"
exit 160
fi fi
echo -e "\n##############################################################################################################################\n"
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
echo "$required_es_upgrade_version" > "$es_required_version_statefile" if ! target_es_version_raw=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
# We expect to upgrade to the latest compatiable minor version of ES # if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
create_intermediate_upgrade_verification_script $es_verification_script if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
if [[ $is_airgap -eq 0 ]]; then exit 160
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso" fi
echo "*** Once you have updated to $next_step_so_version, you can then run soup again to update to $(cat $UPDATE_DIR/VERSION). ***"
echo -e "\n##############################################################################################################################\n" # allow upgrade to version < 2.4.110 without checking ES version compatibility
exit 160 return 0
else else
# preserve BRANCH value if set originally target_es_version=$(sed -n '1p' <<< "$target_es_version_raw")
if [[ -n "$BRANCH" ]]; then
local originally_requested_so_version="$BRANCH"
else
local originally_requested_so_version="2.4/main"
fi
echo "Starting automated intermediate upgrade to $next_step_so_version."
echo "After completion, the system will automatically attempt to upgrade to the latest version."
echo -e "\n##############################################################################################################################\n"
exec bash -c "BRANCH=$next_step_so_version soup -y && BRANCH=$next_step_so_version soup -y && \
echo -e \"\n##############################################################################################################################\n\" && \
echo -e \"Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n\" \
&& timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
echo -e \"\n##############################################################################################################################\n\" \
&& BRANCH=$originally_requested_so_version soup -y && BRANCH=$originally_requested_so_version soup -y"
fi fi
fi
for statefile in "${es_required_version_statefile_base}"-*; do
[[ -f $statefile ]] || continue
local es_required_version_statefile_value=$(cat "$statefile")
if [[ "$es_required_version_statefile_value" == "$target_es_version" ]]; then
echo "Intermediate upgrade to ES $target_es_version is in progress. Skipping Elasticsearch version compatibility check."
is_active_intermediate_upgrade=0
continue
fi
# use sort to check if es_required_statefile_value is < the current es_version.
if [[ "$(printf '%s\n' $es_required_version_statefile_value $es_version | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then
rm -f "$statefile"
continue
fi
if [[ ! -f "$es_verification_script" ]]; then
create_intermediate_upgrade_verification_script "$es_verification_script"
fi
echo -e "\n##############################################################################################################################\n"
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss! This command can take up to an hour to complete."
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"
if [[ $? -ne 0 ]]; then
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
exit 161
fi
echo -e "\n##############################################################################################################################\n"
done
# if current soup is an intermediate upgrade we can skip the upgrade map check below
if [[ $is_active_intermediate_upgrade -eq 0 ]]; then
return 0
fi
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
# supported upgrade
return 0
else
compatible_versions=${es_upgrade_map[$es_version]}
if [[ -z "$compatible_versions" ]]; then
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
next_step_so_version=${es_to_so_version[$first_es_required_version]}
required_es_upgrade_version="$first_es_required_version"
else
next_step_so_version=${es_to_so_version[${compatible_versions##* }]}
required_es_upgrade_version="${compatible_versions##* }"
fi
echo -e "\n##############################################################################################################################\n"
echo -e "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version $next_step_so_version before updating to $(cat $UPDATE_DIR/VERSION).\n"
es_required_version_statefile="${es_required_version_statefile_base}-${required_es_upgrade_version}"
echo "$required_es_upgrade_version" > "$es_required_version_statefile"
# We expect to upgrade to the latest compatiable minor version of ES
create_intermediate_upgrade_verification_script "$es_verification_script"
if [[ $is_airgap -eq 0 ]]; then
run_airgap_intermediate_upgrade
else
if [[ ! -z $ISOLOC ]]; then
originally_requested_iso_location="$ISOLOC"
fi
# Make sure ISOLOC is not set. Network installs that used soup -f would have ISOLOC set.
unset ISOLOC
run_network_intermediate_upgrade
fi
fi
}
run_airgap_intermediate_upgrade() {
local originally_requested_so_version=$(cat $UPDATE_DIR/VERSION)
# preserve ISOLOC value, so we can try to use it post intermediate upgrade
local originally_requested_iso_location="$ISOLOC"
# make sure a fresh ISO gets mounted
unmount_update
echo "You can download the $next_step_so_version ISO image from https://download.securityonion.net/file/securityonion/securityonion-$next_step_so_version.iso"
echo -e "\nIf you have the next ISO / USB ready, enter the path now eg. /dev/sdd, /home/onion/securityonion-$next_step_so_version.iso:"
while [[ -z "$next_iso_location" ]] || [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; do
# List removable devices if any are present
local removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
if [[ -n "$removable_devices" ]]; then
echo "PATH SIZE TYPE MOUNTPOINTS RM"
echo "$removable_devices"
fi
read -rp "Device/ISO Path (or 'exit' to quit): " next_iso_location
if [[ "${next_iso_location,,}" == "exit" ]]; then
echo "Exiting soup. Before reattempting to upgrade to $originally_requested_so_version, please first upgrade to $next_step_so_version to ensure Elasticsearch can properly update through the required versions."
exit 160
fi
if [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; then
echo "$next_iso_location is not a valid file or block device."
next_iso_location=""
fi
done
echo "Using $next_iso_location for required intermediary upgrade."
exec bash <<EOF
ISOLOC=$next_iso_location soup -y && \
ISOLOC=$next_iso_location soup -y && \
echo -e "\n##############################################################################################################################\n" && \
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
echo -e "\n##############################################################################################################################\n" && \
# automatically start the next soup if the original ISO isn't using the same block device we just used
if [[ -n "$originally_requested_iso_location" ]] && [[ "$originally_requested_iso_location" != "$next_iso_location" ]]; then
umount /tmp/soagupdate
ISOLOC=$originally_requested_iso_location soup -y && \
ISOLOC=$originally_requested_iso_location soup -y
else
echo "Could not automatically start next soup to $originally_requested_so_version. Soup will now exit here at $(cat /etc/soversion)" && \
exit 170
fi
echo -e "\n##############################################################################################################################\n"
EOF
}
run_network_intermediate_upgrade() {
# preserve BRANCH value if set originally
if [[ -n "$BRANCH" ]]; then
local originally_requested_so_branch="$BRANCH"
else
local originally_requested_so_branch="2.4/main"
fi
echo "Starting automated intermediate upgrade to $next_step_so_version."
echo "After completion, the system will automatically attempt to upgrade to the latest version."
echo -e "\n##############################################################################################################################\n"
exec bash << EOF
BRANCH=$next_step_so_version soup -y && \
BRANCH=$next_step_so_version soup -y && \
echo -e "\n##############################################################################################################################\n" && \
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
echo -e "\n##############################################################################################################################\n" && \
if [[ -n "$originally_requested_iso_location" ]]; then
# nonairgap soup that used -f originally, runs intermediate upgrade using network + BRANCH, later coming back to the original ISO for the last soup
ISOLOC=$originally_requested_iso_location soup -y && \
ISOLOC=$originally_requested_iso_location soup -y
else
BRANCH=$originally_requested_so_branch soup -y && \
BRANCH=$originally_requested_so_branch soup -y
fi
echo -e "\n##############################################################################################################################\n"
EOF
} }
create_intermediate_upgrade_verification_script() { create_intermediate_upgrade_verification_script() {
@@ -2016,10 +2112,15 @@ main() {
MINION_ROLE=$(lookup_role) MINION_ROLE=$(lookup_role)
echo "Found that Security Onion $INSTALLEDVERSION is currently installed." echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
echo "" echo ""
if [[ $is_airgap -eq 0 ]] || [[ $nonairgap_useiso -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
# Let's mount the ISO since this is airgap or non-airgap with -f used # Let's mount the ISO since this is airgap
airgap_mounted airgap_mounted
else else
# if not airgap but -f was used
if [[ ! -z "$ISOLOC" ]]; then
airgap_mounted
AGDOCKER=/tmp/soagupdate/docker
fi
echo "Cloning Security Onion github repo into $UPDATE_DIR." echo "Cloning Security Onion github repo into $UPDATE_DIR."
echo "Removing previous upgrade sources." echo "Removing previous upgrade sources."
rm -rf $UPDATE_DIR rm -rf $UPDATE_DIR
@@ -2029,6 +2130,7 @@ main() {
echo "Verifying we have the latest soup script." echo "Verifying we have the latest soup script."
verify_latest_update_script verify_latest_update_script
echo "Verifying Elasticsearch version compatibility before upgrading."
verify_es_version_compatibility verify_es_version_compatibility
echo "Let's see if we need to update Security Onion." echo "Let's see if we need to update Security Onion."
@@ -2039,8 +2141,7 @@ main() {
upgrade_check_salt upgrade_check_salt
set -e set -e
if [[ $is_airgap -eq 0 ]] || [[ $nonairgap_useiso -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
# non-airgap with -f used can do an initial ISO repo update and so-repo-sync cron job will sync any diff later via network
update_airgap_repo update_airgap_repo
dnf clean all dnf clean all
check_os_updates check_os_updates

View File

@@ -2380,6 +2380,10 @@ soc:
exclusive: true exclusive: true
enablesToggles: enablesToggles:
- acknowledged - acknowledged
- name: investigated
filter: event.investigated:true
enabled: false
exclusive: false
queries: queries:
- name: 'Group By Name, Module' - name: 'Group By Name, Module'
query: '* | groupby rule.name event.module* event.severity_label rule.uuid' query: '* | groupby rule.name event.module* event.severity_label rule.uuid'

View File

@@ -683,6 +683,8 @@ soc:
options: options:
- securityonion_ai_cloud - securityonion_ai_cloud
- gemini - gemini
- openai_responses
- openai_chat
- field: apiUrl - field: apiUrl
label: API URL label: API URL
required: False required: False