Merge branch '2.4/dev' into mergeback

This commit is contained in:
Mike Reeves
2023-12-06 13:38:53 -05:00
committed by GitHub
27 changed files with 509 additions and 98 deletions

1
HOTFIX
View File

@@ -1 +0,0 @@
20231204

View File

@@ -1 +1 @@
2.4.30
2.4.40

View File

@@ -109,6 +109,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout exceeded" # server not yet ready (telegraf waiting on elasticsearch)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|influxsize kbytes" # server not yet ready (telegraf waiting on influx)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|expected field at" # server not yet ready (telegraf waiting on health data)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|connection timed out" # server not yet ready (telegraf plugin unable to connect)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cached the public key" # server not yet ready (salt minion waiting on key acceptance)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no ingest nodes" # server not yet ready (logstash waiting on elastic)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to poll" # server not yet ready (sensoroni waiting on soc)

View File

@@ -10,6 +10,7 @@ elasticfleet:
logging:
zeek:
excluded:
- analyzer
- broker
- capture_loss
- cluster

View File

@@ -2,8 +2,8 @@
"description" : "common.nids",
"processors" : [
{ "convert": { "if": "ctx.rule.uuid != null", "field": "rule.uuid", "type": "integer" } },
{ "set": { "if": "ctx.rule?.uuid < 1000000", "field": "rule.reference", "value": "https://www.snort.org/search?query={{rule.gid}}-{{rule.uuid}}" } },
{ "set": { "if": "ctx.rule?.uuid > 1999999", "field": "rule.reference", "value": "https://doc.emergingthreats.net/{{rule.uuid}}" } },
{ "set": { "if": "ctx.rule?.uuid < 1000000", "field": "rule.reference", "value": "https://www.snort.org/rule_docs/{{rule.gid}}-{{rule.uuid}}" } },
{ "set": { "if": "ctx.rule?.uuid > 1999999", "field": "rule.reference", "value": "https://community.emergingthreats.net" } },
{ "convert": { "if": "ctx.rule.uuid != null", "field": "rule.uuid", "type": "string" } },
{ "dissect": { "if": "ctx.rule.name != null", "field": "rule.name", "pattern" : "%{rule_type} %{rest_of_rulename} ", "ignore_failure": true } },
{ "set": { "if": "ctx.rule_type == 'GPL'", "field": "rule.ruleset", "value": "Snort GPL" } },

View File

@@ -12,7 +12,9 @@
{ "rename": { "field": "message2.last_alert", "target_field": "ssl.last_alert", "ignore_missing": true } },
{ "rename": { "field": "message2.next_protocol", "target_field": "ssl.next_protocol", "ignore_missing": true } },
{ "rename": { "field": "message2.established", "target_field": "ssl.established", "ignore_missing": true } },
{ "rename": { "field": "message2.cert_chain_fuids", "target_field": "ssl.certificate.chain_fuids", "ignore_missing": true } },
{ "rename": { "if": "ctx.message2?.cert_chain_fps != null", "field": "message2.cert_chain_fps", "target_field": "tls.server.hash.sha256", "ignore_missing": true } },
{ "rename": { "field": "message2?.cert_chain_fuids", "target_field": "ssl.certificate.chain_fuids", "ignore_missing": true } },
{ "rename": { "if": "ctx.message2?.client_cert_chain_fps != null", "field": "message2.client_cert_chain_fps", "target_field": "tls.client.hash.sha256", "ignore_failure": true, "ignore_missing": true } },
{ "rename": { "field": "message2.client_cert_chain_fuids", "target_field": "ssl.client.certificate.chain_fuids", "ignore_missing": true } },
{ "rename": { "field": "message2.subject", "target_field": "ssl.certificate.subject", "ignore_missing": true } },
{ "rename": { "field": "message2.issuer", "target_field": "ssl.certificate.issuer", "ignore_missing": true } },
@@ -21,6 +23,18 @@
{ "rename": { "field": "message2.validation_status","target_field": "ssl.validation_status", "ignore_missing": true } },
{ "rename": { "field": "message2.ja3", "target_field": "hash.ja3", "ignore_missing": true } },
{ "rename": { "field": "message2.ja3s", "target_field": "hash.ja3s", "ignore_missing": true } },
{ "foreach":
{
"if": "ctx?.tls?.client?.hash?.sha256 !=null",
"field": "tls.client.hash.sha256",
"processor": {
"append": {
"field": "hash.sha256",
"value": "{{_ingest._value}}"
}
}
}
},
{ "pipeline": { "name": "zeek.common_ssl" } }
]
}

View File

@@ -41,6 +41,7 @@
{ "rename": { "field": "message2.basic_constraints.ca", "target_field": "x509.basic_constraints.ca", "ignore_missing": true } },
{ "dot_expander": { "field": "basic_constraints.path_length", "path": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.basic_constraints.path_length", "target_field": "x509.basic_constraints.path_length", "ignore_missing": true } },
{ "rename": { "field": "message2.fingerprint", "target_field": "hash.sha256", "ignore_missing": true } },
{ "pipeline": { "name": "zeek.common_ssl" } }
]
}

View File

@@ -16,12 +16,12 @@ lockFile = "/tmp/so-yaml.lock"
def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:')
print(' remove - Removes a yaml top-level key, if it exists. Requires KEY arg.')
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
print(' help - Prints this usage information.')
print('')
print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - Top level key only, does not support dot-notations for nested keys at this time. Ex: level1')
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
sys.exit(1)
@@ -36,6 +36,14 @@ def writeYaml(filename, content):
return yaml.dump(content, file)
def removeKey(content, key):
pieces = key.split(".", 1)
if len(pieces) > 1:
removeKey(content[pieces[0]], pieces[1])
else:
content.pop(key, None)
def remove(args):
if len(args) != 2:
print('Missing filename or key arg', file=sys.stderr)
@@ -43,11 +51,12 @@ def remove(args):
return
filename = args[0]
key = args[1]
content = loadYaml(filename)
content.pop(args[1], None)
removeKey(content, key)
writeYaml(filename, content)
return 0

View File

@@ -57,6 +57,36 @@ class TestRemove(unittest.TestCase):
expected = "key2: false\n"
self.assertEqual(actual, expected)
def test_remove_nested(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false}")
file.close()
soyaml.remove([filename, "key1.child2"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\nkey2: false\n"
self.assertEqual(actual, expected)
def test_remove_nested_deep(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45, deep2: ab } }, key2: false}")
file.close()
soyaml.remove([filename, "key1.child2.deep1"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key1:\n child1: 123\n child2:\n deep2: ab\nkey2: false\n"
self.assertEqual(actual, expected)
def test_remove_missing_args(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:

View File

@@ -27,6 +27,12 @@ sensoroni:
spamhaus:
lookup_host: zen.spamhaus.org
nameservers: []
sublime_platform:
base_url: https://api.platform.sublimesecurity.com
api_key:
live_flow: False
mailbox_email_address:
message_source_id:
urlscan:
base_url: https://urlscan.io/api/v1/
api_key:

View File

@@ -6,19 +6,20 @@ Security Onion provides a means for performing data analysis on varying inputs.
The built-in analyzers support the following observable types:
| Name | Domain | Hash | IP | Mail | Other | URI | URL | User Agent |
| ------------------------|--------|-------|-------|-------|-------|-------|-------|-------|
| Alienvault OTX |&check; |&check;|&check;|&cross;|&cross;|&cross;|&check;|&cross;|
| EmailRep |&cross; |&cross;|&cross;|&check;|&cross;|&cross;|&cross;|&cross;|
| Greynoise |&cross; |&cross;|&check;|&cross;|&cross;|&cross;|&cross;|&cross;|
| LocalFile |&check; |&check;|&check;|&cross;|&check;|&cross;|&check;|&cross;|
| Malware Hash Registry |&cross; |&check;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
| Pulsedive |&check; |&check;|&check;|&cross;|&cross;|&check;|&check;|&check;|
| Spamhaus |&cross; |&cross;|&check;|&cross;|&cross;|&cross;|&cross;|&cross;|
| Urlhaus |&cross; |&cross;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
| Urlscan |&cross; |&cross;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
| Virustotal |&check; |&check;|&check;|&cross;|&cross;|&cross;|&check;|&cross;|
| WhoisLookup |&check; |&cross;|&cross;|&cross;|&cross;|&check;|&cross;|&cross;|
| Name | Domain | EML | Hash | IP | Mail | Other | URI | URL | User Agent |
| ------------------------|--------|-------|-------|-------|-------|-------|-------|-------|-------|
| Alienvault OTX |&check; |&cross;|&check;|&check;|&cross;|&cross;|&cross;|&check;|&cross;|
| EmailRep |&cross; |&cross;|&cross;|&cross;|&check;|&cross;|&cross;|&cross;|&cross;|
| Greynoise |&cross; |&cross;|&cross;|&check;|&cross;|&cross;|&cross;|&cross;|&cross;|
| LocalFile |&check; |&cross;|&check;|&check;|&cross;|&check;|&cross;|&check;|&cross;|
| Malware Hash Registry |&cross; |&cross;|&check;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
| Pulsedive |&check; |&cross;|&check;|&check;|&cross;|&cross;|&check;|&check;|&check;|
| Spamhaus |&cross; |&cross;|&cross;|&check;|&cross;|&cross;|&cross;|&cross;|&cross;|
| Sublime Platform |&cross; |&check;|&cross;|&cross;|&cross;|&cross;|&cross;|&cross;|&cross;|
| Urlhaus |&cross; |&cross;|&cross;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
| Urlscan |&cross; |&cross;|&cross;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|
| Virustotal |&check; |&cross;|&check;|&check;|&cross;|&cross;|&cross;|&check;|&cross;|
| WhoisLookup |&check; |&cross;|&cross;|&cross;|&cross;|&cross;|&check;|&cross;|&cross;|
## Authentication
@@ -29,10 +30,11 @@ Many analyzers require authentication, via an API key or similar. The table belo
[AlienVault OTX](https://otx.alienvault.com/api) |&check;|
[EmailRep](https://emailrep.io/key) |&check;|
[GreyNoise](https://www.greynoise.io/plans/community) |&check;|
LocalFile |&cross;|
[LocalFile](https://github.com/Security-Onion-Solutions/securityonion/tree/fix/sublime_analyzer_documentation/salt/sensoroni/files/analyzers/localfile) |&cross;|
[Malware Hash Registry](https://hash.cymru.com/docs_whois) |&cross;|
[Pulsedive](https://pulsedive.com/api/) |&check;|
[Spamhaus](https://www.spamhaus.org/dbl/) |&cross;|
[Sublime Platform](https://sublime.security) |&check;|
[Urlhaus](https://urlhaus.abuse.ch/) |&cross;|
[Urlscan](https://urlscan.io/docs/api/) |&check;|
[VirusTotal](https://developers.virustotal.com/reference/overview) |&check;|

View File

@@ -0,0 +1,24 @@
# Sublime
## Description
Submit a base64-encoded EML file to Sublime Platform for analysis.
## Configuration Requirements
In SOC, navigate to `Administration`, toggle `Show all configurable settings, including advanced settings.`, and navigate to `sensoroni` -> `analyzers` -> `sublime_platform`.
![image](https://github.com/Security-Onion-Solutions/securityonion/assets/16829864/a914f59d-c09f-40b6-ae8b-d644df236b81)
The following configuration options are available for:
``api_key`` - API key used for communication with the Sublime Platform API (Required)
``base_url`` - URL used for communication with Sublime Platform. If no value is supplied, the default of `https://api.platform.sublimesecurity.com` will be used.
The following options relate to [Live Flow](https://docs.sublimesecurity.com/reference/analyzerawmessageliveflow-1) analysis only:
``live_flow`` - Determines if live flow analysis should be used. Defaults to `False`.
``mailbox_email_address`` - The mailbox address to use for during live flow analysis. (Required for live flow analysis)
``message_source_id`` - The ID of the message source to use during live flow analysis. (Required for live flow analysis)

View File

@@ -0,0 +1,2 @@
requests>=2.27.1
pyyaml>=6.0

View File

@@ -0,0 +1,7 @@
{
"name": "Sublime",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "This analyzer analyzes an email with Sublime Security to determine if it is considered malicious.",
"supportedTypes" : ["eml"]
}

View File

@@ -0,0 +1,83 @@
import json
import requests
import sys
import os
import helpers
import argparse
def checkConfigRequirements(conf):
if "api_key" not in conf or len(conf['api_key']) == 0:
sys.exit(126)
def buildReq(conf, artifact_value):
headers = {"Authorization": "Bearer " + conf['api_key']}
base_url = conf['base_url']
if str(conf['live_flow']).lower() == "true":
uri = "/v1/live-flow/raw-messages/analyze"
data = {"create_mailbox": True, "mailbox_email_address": str(conf['mailbox_email_address']), "message_source_id": str(conf['message_source_id']), "raw_message": artifact_value}
else:
uri = "/v0/messages/analyze"
data = {"raw_message": artifact_value,
"run_active_detection_rules": True}
url = base_url + uri
return url, headers, data
def sendReq(url, headers, data):
response = requests.request('POST',
url=url,
headers=headers,
data=json.dumps(data)).json()
return response
def prepareResults(raw):
matched = []
if "rule_results" in raw:
for r in raw["rule_results"]:
if r["matched"] is True:
matched.append(r)
if len(matched) > 0:
raw = matched
status = "threat"
summary = "malicious"
else:
raw = "No rules matched."
status = "ok"
summary = "harmless"
elif "flagged_rules" in raw:
if raw["flagged_rules"] is not None:
status = "threat"
summary = "malicious"
else:
status = "ok"
summary = "harmless"
results = {'response': raw, 'status': status, 'summary': summary}
return results
def analyze(conf, input):
checkConfigRequirements(conf)
meta = helpers.loadMetadata(__file__)
data = helpers.parseArtifact(input)
helpers.checkSupportedType(meta, data["artifactType"])
request = buildReq(conf, data["value"])
response = sendReq(request[0], request[1], request[2])
return prepareResults(response)
def main():
dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(description="Submit an email to Sublime Platform's EML Analyzer")
parser.add_argument('artifact', help='the artifact represented in JSON format')
parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/sublime.yaml", help='optional config file to use instead of the default config file')
args = parser.parse_args()
if args.artifact:
results = analyze(helpers.loadConfig(args.config), args.artifact)
print(json.dumps(results))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,5 @@
base_url: "{{ salt['pillar.get']('sensoroni:analyzers:sublime_platform:base_url', 'https://api.platform.sublimesecurity.com') }}"
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:sublime_platform:api_key', '') }}"
live_flow: "{{ salt['pillar.get']('sensoroni:analyzers:sublime_platform:live_flow', 'False') }}"
mailbox_email_address: "{{ salt['pillar.get']('sensoroni:analyzers:sublime_platform:mailbox_email_address', '') }}"
message_source_id: "{{ salt['pillar.get']('sensoroni:analyzers:sublime_platform:message_source_id', '') }}"

View File

@@ -0,0 +1,188 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
from sublime import sublime
import json
import unittest
class TestSublimePlatformMethods(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd"]
sublime.main()
self.assertEqual(mock_stderr.getvalue(), '''usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n''')
sysmock.assert_called_once_with(2)
def test_main_success(self):
output = {"foo": "bar"}
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('sublime.sublime.analyze', new=MagicMock(return_value=output)) as mock:
sys.argv = ["cmd", "input"]
sublime.main()
expected = '{"foo": "bar"}\n'
self.assertEqual(mock_stdout.getvalue(), expected)
mock.assert_called_once()
def test_checkKeyNonexistent(self):
conf = {"not_a_key": "abcd12345"}
with self.assertRaises(SystemExit) as cm:
sublime.checkConfigRequirements(conf)
self.assertEqual(cm.exception.code, 126)
def test_buildReqLiveFlow(self):
conf = {'base_url': 'https://api.platform.sublimesecurity.com', 'api_key': 'abcd12345', 'live_flow': True, 'mailbox_email_address': 'user@test.local', 'message_source_id': 'abcd1234'}
artifact_value = "abcd1234"
result = sublime.buildReq(conf, artifact_value)
self.assertEqual("https://api.platform.sublimesecurity.com/v1/live-flow/raw-messages/analyze", result[0])
self.assertEqual({'Authorization': 'Bearer abcd12345'}, result[1])
def test_buildReqNotLiveFlow(self):
conf = {'base_url': 'https://api.platform.sublimesecurity.com', 'api_key': 'abcd12345', 'live_flow': False, 'mailbox_email_address': 'user@test.local'}
artifact_value = "abcd1234"
result = sublime.buildReq(conf, artifact_value)
self.assertEqual("https://api.platform.sublimesecurity.com/v0/messages/analyze", result[0])
self.assertEqual({'Authorization': 'Bearer abcd12345'}, result[1])
def test_prepareResultsRuleResultsMatched(self):
raw = ''' {
"rule_results": [{
"rule": {
"id": "9147f589-39d5-4dd0-a0ee-00433a6e2632",
"name": "AnonymousFox Indicators",
"source": "type.inbound\\nand regex.icontains(sender.email.email, \\"(anonymous|smtp)fox-\\"))\\n",
"severity": "medium"
},
"matched": true,
"success": true,
"error": null,
"external_errors": null,
"execution_time": 0.000071679
}]}'''
results = sublime.prepareResults(json.loads(raw))
print(results)
self.assertEqual(results["response"], json.loads(raw)["rule_results"])
self.assertEqual(results["summary"], "malicious")
self.assertEqual(results["status"], "threat")
def test_prepareResultsRuleResultsNotMatched(self):
raw = ''' {
"rule_results": [{
"rule": {
"id": "9147f589-39d5-4dd0-a0ee-00433a6e2632",
"name": "AnonymousFox Indicators",
"source": "type.inbound and regex.icontains(.value, \\"(anonymous|smtp)fox-\\"))\\n",
"severity": "medium"
},
"matched": false,
"success": true,
"error": null,
"external_errors": null,
"execution_time": 0.000071679
}]}'''
results = sublime.prepareResults(json.loads(raw))
print(results)
self.assertEqual(results["response"], "No rules matched.")
self.assertEqual(results["summary"], "harmless")
self.assertEqual(results["status"], "ok")
def test_prepareResultsLiveFlowMatched(self):
raw = '''{
"canonical_id": "fb8b46e3317ac7d5036c6b21517d363634293c6d4f6bf1b1e67548c80948a1c6",
"flagged_rules": [
{
"actions": null,
"active": true,
"active_updated_at": "2023-08-09T14:58:25.669495Z",
"attack_types": [
"Credential Phishing",
"Malware/Ransomware"
],
"authors": null,
"created_at": "2023-08-09 01:00:25.642489+00",
"created_by_api_request_id": null,
"created_by_org_id": null,
"created_by_org_name": null,
"created_by_user_id": null,
"created_by_user_name": null,
"description": "Recursively scans files and archives to detect HTML smuggling techniques.\\n",
"detection_methods": [
"Archive analysis",
"File analysis",
"HTML analysis",
"Javascript analysis"
],
"exclusion_mql": null,
"false_positives": null,
"feed_external_rule_id": "0b0fed36-735a-50f1-bf10-6673237a4623",
"feed_id": "4e5d7da3-d566-4910-a613-f00709702240",
"full_type": "detection_rule",
"id": "537bf73d-a4f0-4389-b2a1-272192efa0d5",
"immutable": true,
"internal_type": null,
"label": null,
"maturity": null,
"name": "Attachment: HTML smuggling with unescape",
"org_id": "dac92af8-2bd6-4861-9ee1-a04e713e3ae2",
"references": [
"https://www.microsoft.com/security/blog/2021/11/11/html-smuggling-surges-highly-evasive-loader"
],
"severity": "high",
"source_md5": "b68388617d78ccc20075ca8fffc7e3f8",
"tactics_and_techniques": [
"Evasion",
"HTML smuggling",
"Scripting"
],
"tags": null,
"type": "detection",
"updated_at": "2023-11-01 15:25:47.212056+00",
"user_provided_tags": [
]
}
],
"message_id": "0071b1ac-d7ca-4e37-91c5-068a96b9dda8",
"raw_message_id": "1dc90473-b028-4754-942c-476cfb1ca2ff"
}'''
results = sublime.prepareResults(json.loads(raw))
print(results)
self.assertEqual(results["response"], json.loads(raw))
self.assertEqual(results["summary"], "malicious")
self.assertEqual(results["status"], "threat")
def test_prepareResultsLiveFlowNotMatched(self):
raw = '''{
"canonical_id": "092459fa0d9edd5d8e2d0ccf3af50120c63ec58717a8cfdeb15854706940346f",
"flagged_rules": null,
"message_id": "1e8693b4-bf44-4cb9-ac9a-85fc2a99eeb8",
"raw_message_id": "5d2f03c2-86e1-47d8-81ae-620ecb5c6553"
}'''
results = sublime.prepareResults(json.loads(raw))
self.assertEqual(results["response"], json.loads(raw))
self.assertEqual(results["summary"], "harmless")
self.assertEqual(results["status"], "ok")
def test_sendReq(self):
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
url = "https://api.platform.sublimesecurity.com/v1/live-flow/raw-messages/analyze"
headers = {'Authorization': 'Bearer abcd12345'}
data = {"create_mailbox": True, "mailbox_email_address": "user@test.local", "message_source_id": "abcd1234", "raw_message": "abcd1234"}
url = "https://api.platform.sublimesecurity.com/v1/live-flow/raw-messages/analyze"
response = sublime.sendReq(url=url, headers=headers, data=data)
mock.assert_called_once_with('POST', url=url, headers=headers, data=json.dumps(data))
self.assertIsNotNone(response)
def test_analyze(self):
output = '{"message_id":"abcd1234","raw_message_id":"abcd1234","canonical_id":"abcd1234","flagged_rules":null}'
artifactInput = '{"value":"RnJvbTogQWxpY2UgPGFsaWNlQGV4YW1wbGUuY29tPgpUbzogQm9iIDxib2JA","artifactType":"eml"}'
conf = {'base_url': 'https://api.platform.sublimesecurity.com', 'api_key': 'abcd12345', 'live_flow': False, 'mailbox_email_address': 'user@test.local'}
with patch('sublime.sublime.sendReq', new=MagicMock(return_value=json.loads(output))) as mock:
results = sublime.analyze(conf, artifactInput)
print(results)
self.assertEqual(results["summary"], "harmless")
mock.assert_called_once()

View File

@@ -128,6 +128,42 @@ sensoroni:
sensitive: False
advanced: True
forcedTypes: "[]string"
sublime_platform:
api_key:
description: API key for the Sublime Platform analyzer.
helpLink: cases.html
global: False
sensitive: True
advanced: True
forcedType: string
base_url:
description: Base URL for the Sublime Platform analyzer.
helpLink: cases.html
global: False
sensitive: False
advanced: True
forcedType: string
live_flow:
description: Determines if live flow analysis is used.
helpLink: cases.html
global: False
sensitive: False
advanced: True
forcedType: bool
mailbox_email_address:
description: Source mailbox address used for live flow analysis.
helpLink: cases.html
global: False
sensitive: False
advanced: True
forcedType: string
message_source_id:
description: ID of the message source used for live flow analysis.
helpLink: cases.html
global: False
sensitive: False
advanced: True
forcedType: string
urlscan:
api_key:
description: API key for the Urlscan analyzer.

View File

@@ -1240,7 +1240,7 @@ soc:
showSubtitle: true
- name: HTTP
description: HTTP with exe downloads
query: 'tags:http AND (file.resp_mime_types:dosexec OR file.resp_mime_types:executable) | groupby http.virtual_host'
query: 'tags:http AND file.resp_mime_types:*exec* | groupby http.virtual_host'
showSubtitle: true
- name: Intel
description: Intel framework hits grouped by indicator
@@ -1675,6 +1675,7 @@ soc:
labels:
- autonomous-system
- domain
- eml
- file
- filename
- fqdn

View File

@@ -37,12 +37,14 @@ function poll() {
function respond() {
file="$QUEUE_DIR/$1.response"
tmpfile="${file}.tmp"
response=$2
touch "$file"
chmod 660 "$file"
chown "$QUEUE_OWNER:$QUEUE_GROUP" "$file"
echo "$response" > "$file"
touch "$tmpfile"
chmod 660 "$tmpfile"
chown "$QUEUE_OWNER:$QUEUE_GROUP" "$tmpfile"
echo "$response" > "$tmpfile"
mv $tmpfile $file
}
function list_minions() {