diff --git a/salt/sensoroni/defaults.yaml b/salt/sensoroni/defaults.yaml index 1a5c7beef..4ebd666a9 100644 --- a/salt/sensoroni/defaults.yaml +++ b/salt/sensoroni/defaults.yaml @@ -1,44 +1,58 @@ -sensoroni: - enabled: False - config: - analyze: - enabled: False - timeout_ms: 900000 - parallel_limit: 5 - node_checkin_interval_ms: 10000 - sensoronikey: - soc_host: - analyzers: - emailrep: - base_url: https://emailrep.io/ - api_key: - greynoise: - base_url: https://api.greynoise.io/ - api_key: - api_version: community - localfile: - file_path: [] - otx: - base_url: https://otx.alienvault.com/api/v1/ - api_key: - pulsedive: - base_url: https://pulsedive.com/api/ - api_key: - spamhaus: - lookup_host: zen.spamhaus.org - nameservers: [] - sublime_platform: - base_url: https://api.platform.sublimesecurity.com - api_key: - live_flow: False - mailbox_email_address: - message_source_id: - urlscan: - base_url: https://urlscan.io/api/v1/ - api_key: - enabled: False - visibility: public - timeout: 180 - virustotal: - base_url: https://www.virustotal.com/api/v3/search?query= - api_key: +sensoroni: + enabled: False + config: + analyze: + enabled: False + timeout_ms: 900000 + parallel_limit: 5 + node_checkin_interval_ms: 10000 + sensoronikey: + soc_host: + analyzers: + echotrail: + base_url: https://api.echotrail.io/insights/ + api_key: + elasticsearch: + base_url: + auth_user: + auth_pwd: + num_results: 10 + api_key: + index: _all + time_delta_minutes: 14400 + timestamp_field_name: '@timestamp' + map: {} + cert_path: + emailrep: + base_url: https://emailrep.io/ + api_key: + greynoise: + base_url: https://api.greynoise.io/ + api_key: + api_version: community + localfile: + file_path: [] + otx: + base_url: https://otx.alienvault.com/api/v1/ + api_key: + pulsedive: + base_url: https://pulsedive.com/api/ + api_key: + spamhaus: + lookup_host: zen.spamhaus.org + nameservers: [] + sublime_platform: + base_url: https://api.platform.sublimesecurity.com + api_key: + live_flow: False + mailbox_email_address: + message_source_id: + urlscan: + base_url: https://urlscan.io/api/v1/ + api_key: + enabled: False + visibility: public + timeout: 180 + virustotal: + base_url: https://www.virustotal.com/api/v3/search?query= + api_key: diff --git a/salt/sensoroni/files/analyzers/echotrail/README.md b/salt/sensoroni/files/analyzers/echotrail/README.md new file mode 100644 index 000000000..349f872bc --- /dev/null +++ b/salt/sensoroni/files/analyzers/echotrail/README.md @@ -0,0 +1,25 @@ +# EchoTrail + + +## Description +Submit a filename, hash, commandline to EchoTrail for analysis + +## Configuration Requirements + +In SOC, navigate to `Administration`, toggle `Show all configurable settings, including advanced settings.`, and navigate to `sensoroni` -> `analyzers` -> `echotrail`. + +![echotrail](https://github.com/RyHoa/securityonion/assets/129560634/43b55869-1fba-4907-8418-c0745c37237b) + + +The following configuration options are available for: + +``api_key`` - API key used for communication with the Echotrail API (Required) + +This value should be set in the ``sensoroni`` pillar, like so: + +``` +sensoroni: + analyzers: + echotrail: + api_key: $yourapikey +``` diff --git a/salt/sensoroni/files/analyzers/echotrail/echotrail.json b/salt/sensoroni/files/analyzers/echotrail/echotrail.json new file mode 100644 index 000000000..081643b0c --- /dev/null +++ b/salt/sensoroni/files/analyzers/echotrail/echotrail.json @@ -0,0 +1,10 @@ +{ + "name": "Echotrail", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries Echotrail to see if a related filename, hash, or commandline is considered malicious.", + "supportedTypes" : ["filename","hash","commandline"], + "baseUrl": "https://api.echotrail.io/insights/" + } + + \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/echotrail/echotrail.py b/salt/sensoroni/files/analyzers/echotrail/echotrail.py new file mode 100644 index 000000000..11d8931be --- /dev/null +++ b/salt/sensoroni/files/analyzers/echotrail/echotrail.py @@ -0,0 +1,67 @@ +import json +import os +import sys +import requests +import helpers +import argparse + +# for test usage: +# python3 echotrail.py '{"artifactType":"hash", "value":"438b6ccd84f4dd32d9684ed7d58fd7d1e5a75fe3f3d12ab6c788e6bb0ffad5e7"}' +# You will need to provide an API key in the .yaml file. + +def checkConfigRequirements(conf): + if not conf['api_key']: + sys.exit(126) + else: + return True + + +def sendReq(conf, observ_value): + # send a get requests using a user-provided API key and the API url + url = conf['base_url'] + observ_value + headers = {'x-api-key': conf['api_key']} + response = requests.request('GET', url=url, headers=headers) + return response.json() + + +def prepareResults(raw): + # checking for the 'filenames' key alone does + # not work when querying by filename. + # So, we can account for a hash query, a filename query, + # and anything else with these if statements. + if 'filenames' in raw.keys(): + summary = raw['filenames'][0][0] + elif 'tags' in raw.keys(): + summary = raw['tags'][0][0] + else: + summary = 'inconclusive' + status = 'info' + return {'response': raw, 'summary': summary, 'status': status} + + +def analyze(conf, input): + # put all of our methods together and return a properly formatted output. + checkConfigRequirements(conf) + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data['artifactType']) + response = sendReq(conf, data['value']) + return prepareResults(response) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser( + description='Search Echotrail for a given artifact') + parser.add_argument( + 'artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar='CONFIG_FILE', default=dir + '/echotrail.yaml', + help='optional config file to use instead of the default config file') + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/echotrail/echotrail.yaml b/salt/sensoroni/files/analyzers/echotrail/echotrail.yaml new file mode 100644 index 000000000..f5db57f24 --- /dev/null +++ b/salt/sensoroni/files/analyzers/echotrail/echotrail.yaml @@ -0,0 +1,3 @@ +base_url: "{{ salt['pillar.get']('sensoroni:analyzers:echotrail:base_url', 'https://api.echotrail.io/insights/') }}" +api_key: "{{ salt['pillar.get']('sensoroni:analyzers:echotrail:api_key', '') }}" + diff --git a/salt/sensoroni/files/analyzers/echotrail/echotrail_test.py b/salt/sensoroni/files/analyzers/echotrail/echotrail_test.py new file mode 100644 index 000000000..53b816cd4 --- /dev/null +++ b/salt/sensoroni/files/analyzers/echotrail/echotrail_test.py @@ -0,0 +1,61 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +import unittest +import echotrail +import helpers + + +class TestEchoTrailMethods(unittest.TestCase): + def test_main_success(self): + with patch('sys.stdout', new=StringIO()) as mock_cmd: + with patch('echotrail.analyze', new=MagicMock(return_value={'test': 'val'})) as mock: + sys.argv = ["test", "test"] + echotrail.main() + expected = '{"test": "val"}\n' + self.assertEqual(mock_cmd.getvalue(), expected) + mock.assert_called_once() + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + echotrail.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + + def test_checkConfigRequirements(self): + conf = {'base_url': 'https://www.randurl.xyz/', 'api_key':''} + with self.assertRaises(SystemExit) as cm: + echotrail.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_sendReq(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + response = echotrail.sendReq(conf={'base_url': 'https://www.randurl.xyz/', 'api_key':'randkey'}, observ_value='example_data') + self.assertIsNotNone(response) + + def test_prepareResults_noinput(self): + raw = {} + sim_results = {'response': raw, + 'status': 'info', 'summary': 'inconclusive'} + results = echotrail.prepareResults(raw) + self.assertEqual(results, sim_results) + + def test_prepareResults_none(self): + raw = {'query_status': 'no_result'} + sim_results = {'response': raw, + 'status': 'info', 'summary': 'inconclusive'} + results = echotrail.prepareResults(raw) + self.assertEqual(results, sim_results) + + def test_analyze(self): + sendReqOutput = {'threat': 'no_result'} + input = '{"artifactType":"hash", "value":"1234"}' + prepareResultOutput = {'response': '', + 'summary': 'inconclusive', 'status': 'info'} + conf = {"api_key": "xyz"} + + with patch('echotrail.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock: + with patch('echotrail.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2: + results = echotrail.analyze(conf, input) + self.assertEqual(results["summary"], "inconclusive") diff --git a/salt/sensoroni/files/analyzers/echotrail/requirements.txt b/salt/sensoroni/files/analyzers/echotrail/requirements.txt new file mode 100644 index 000000000..925ada01e --- /dev/null +++ b/salt/sensoroni/files/analyzers/echotrail/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.31.0 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/echotrail/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl b/salt/sensoroni/files/analyzers/echotrail/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl new file mode 100644 index 000000000..dac873718 Binary files /dev/null and b/salt/sensoroni/files/analyzers/echotrail/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/echotrail/source-packages/certifi-2023.11.17-py3-none-any.whl b/salt/sensoroni/files/analyzers/echotrail/source-packages/certifi-2023.11.17-py3-none-any.whl new file mode 100644 index 000000000..de0787f64 Binary files /dev/null and b/salt/sensoroni/files/analyzers/echotrail/source-packages/certifi-2023.11.17-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/echotrail/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl b/salt/sensoroni/files/analyzers/echotrail/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl new file mode 100644 index 000000000..b1cd02e9d Binary files /dev/null and b/salt/sensoroni/files/analyzers/echotrail/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/echotrail/source-packages/idna-3.6-py3-none-any.whl b/salt/sensoroni/files/analyzers/echotrail/source-packages/idna-3.6-py3-none-any.whl new file mode 100644 index 000000000..fdf65ae30 Binary files /dev/null and b/salt/sensoroni/files/analyzers/echotrail/source-packages/idna-3.6-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/echotrail/source-packages/requests-2.31.0-py3-none-any.whl b/salt/sensoroni/files/analyzers/echotrail/source-packages/requests-2.31.0-py3-none-any.whl new file mode 100644 index 000000000..bfd5d2ea9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/echotrail/source-packages/requests-2.31.0-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/echotrail/source-packages/urllib3-2.1.0-py3-none-any.whl b/salt/sensoroni/files/analyzers/echotrail/source-packages/urllib3-2.1.0-py3-none-any.whl new file mode 100644 index 000000000..0951ac354 Binary files /dev/null and b/salt/sensoroni/files/analyzers/echotrail/source-packages/urllib3-2.1.0-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/elasticsearch/README.md b/salt/sensoroni/files/analyzers/elasticsearch/README.md new file mode 100644 index 000000000..6020c92ad --- /dev/null +++ b/salt/sensoroni/files/analyzers/elasticsearch/README.md @@ -0,0 +1,58 @@ +# Elasticsearch +Elasticsearch returns an informational breakdown of the queried observable. + +## Overview +Elasticsearch facilitates queries within the user's database. User can use these observable type: hash, domain, file, filename, fqdn, gimphash, IP, mail, mail_subject, regexp, registry, telfhash, tlsh, uri_path, URL, and user-agent values. + +## Description +Configure and submit the field you want to search for in your database. Ex: domain, hash, IP, or URL + +## Requirement +An API key or User Credentials is necessary for utilizing Elasticsearch. + +## Configuration Requirements + +In SOC, navigate to `Administration`, toggle `Show all configurable settings, including advanced settings.`, and navigate to `sensoroni` -> `analyzers` -> `elasticsearch`. + +![image](https://github.com/RyHoa/securityonion/assets/129560634/31c612d3-39f8-4d9e-881b-210c87a56b50) + + +The following configuration options are available for: + +``api_key`` - API key used for communication with the Elasticsearch API (Optional if auth_user and auth_pwd are used) + +``auth_user`` - Username used for communication with Elasticsearch + +``auth_pwd`` - Password used for communication with Elasticsearch + +``base_url`` - URL that connect to Elasticsearch VM on port 9200. Example format :"https://:9200 + +``index`` - The index of the data in Elasticsearch database. Default value is _all. + +``num_results`` - The max number of results will be displayed. Default value is 10. + +``time_delta_minutes`` - Range of time the users want the data in minutes. The value is in minutes and will be converted to days. Defaults value is is 1440. + +``timestamp_field_name`` - The name of your timestamp field name. Default value is @timestamp. + +``map`` - This is the dictionary of the field name in the user's Elasticsearch database. Example value {"hash":"userhashfieldname"}. This value will map the Security Onion hash field name to user hash field name. + +``cert_path`` - This is the path to the certificate in the host for authentication purpose (Required) + +This value should be set in the ``sensoroni`` pillar, like so: + +``` +sensoroni: + analyzers: + elasticsearch: + base_url:$yourbase_url + api_key: $yourapi_key + numResults:$yournum_results + auth_user:$yourauth_user + auth_pwd:$yourauth_pwd + index:$yourindex + timeDeltaMinutes:$yourtime_delta_minutes + timestampFieldName:$yourtimestamp_field_name + cert_path:$yourcert_path + map:$yourmap +``` diff --git a/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.json b/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.json new file mode 100644 index 000000000..c0d14de56 --- /dev/null +++ b/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.json @@ -0,0 +1,9 @@ +{ + "name": "Elastic Search", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "Queries an ElasticSearch instance for specified field values.", + "supportedTypes": ["hash", "ip", "domain"] +} + + \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.py b/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.py new file mode 100644 index 000000000..90d3f44de --- /dev/null +++ b/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.py @@ -0,0 +1,138 @@ +from datetime import datetime, timedelta +import argparse +import requests +import helpers +import json +import sys +import os + +# As it stands, this analyzer does not support querying for mixed-case fields without disregarding case completely. +# So the current version will only support querying for all-lowercase alphanumerical values. + +# default usage is: +# python3 elasticsearch.py '{"artifactType":"hash", "value":"*"}' + +# To use outside of a Security Onion box, pass in '-c test.yaml' at the end +# of the above command to give this analyzer some test values. You may edit the +# values in the test.yaml file freely. + + +def checkConfigRequirements(conf): + # if the user hasn't given valid configurables, quit. + if not conf['num_results']: + sys.exit(126) + if not conf['time_delta_minutes']: + sys.exit(126) + if (not conf['auth_user'] or not conf['auth_pwd']) and not conf['api_key']: + sys.exit(126) + if not conf['index']: + sys.exit(126) + if not conf['base_url']: + sys.exit(126) + if not conf['timestamp_field_name']: + sys.exit(126) + if not conf['cert_path']: + sys.exit(126) + return True + + +def buildReq(conf, input): + # structure a query to send to the Elasticsearch machine + # based off of user configurable values + num_results = conf['num_results'] + + if conf['map'] != None: + mappings = conf['map'] + else: + mappings = dict() + + cur_time = datetime.now() + start_time = cur_time - timedelta(minutes=int(conf['time_delta_minutes'])) + + if input['artifactType'] in mappings: + type = mappings[input['artifactType']] + else: + type = input['artifactType'] + + query = { + "from": 0, + "size": num_results, + "query": { + "bool": { + "must": [{ + "wildcard": { + type: input['value'], + }, + } + ], + "filter": { + "range": { + conf['timestamp_field_name']: { + "gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'), + "lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S') + } + } + } + } + } + } + + return json.dumps(query) + + +def sendReq(conf, query): + # send configured query with even more user specification + headers = {} + url = conf['base_url'] + conf['index'] + '/_search' + uname = conf['auth_user'] + pwd = conf['auth_pwd'] + apikey = conf['api_key'] + cert_path = conf['cert_path'] + + if pwd and uname: + headers = { + 'Content-Type': 'application/json', + } + response = requests.post(str(url), auth=( + uname, pwd), verify=cert_path, data=query, headers=headers) + elif apikey: + headers = { + 'Content-Type': 'application/json', + 'Authorization': f"Apikey {apikey}" + } + response = requests.post( + str(url), verify=cert_path, data=query, headers=headers) + + return response.json() + + +def prepareResults(raw): + # returns raw API response, amount of hits found, and status of request in order + summary = f"Documents returned: {len(raw['hits']['hits'])}" + status = 'info' + return {'response': raw, 'summary': summary, 'status': status} + + +def analyze(conf, input): + checkConfigRequirements(conf) + data = json.loads(input) + query = buildReq(conf, data) + response = sendReq(conf, query) + return prepareResults(response) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser( + description='Search Elastic Search for a given artifact?') + parser.add_argument('artifact', help='required artifact') + parser.add_argument('-c', '--config', metavar='CONFIG_FILE', default=dir + '/elasticsearch.yaml', + help='optional config file to use instead of the default config file') + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.yaml b/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.yaml new file mode 100644 index 000000000..2633569ad --- /dev/null +++ b/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.yaml @@ -0,0 +1,10 @@ +base_url: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:base_url', '') }}" +auth_user: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:auth_user', '') }}" +auth_pwd: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:auth_pwd', '') }}" +num_results: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:num_results', 10) }}" +api_key: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:api_key', '') }}" +index: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:index', '_all') }}" +time_delta_minutes: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:time_delta_minutes', 14400) }}" +timestamp_field_name: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:timestamp_field_name', '@timestamp') }}" +map: {{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:map', '') }} +cert_path: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:cert_path', '') }}" diff --git a/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch_test.py b/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch_test.py new file mode 100644 index 000000000..43177fdfc --- /dev/null +++ b/salt/sensoroni/files/analyzers/elasticsearch/elasticsearch_test.py @@ -0,0 +1,194 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +import unittest +import elasticsearch +import helpers +import json +from datetime import datetime, timedelta + +class TestElasticSearchMethods(unittest.TestCase): + '''Test that the analyzer main method work as expect when not given enough input''' + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + elasticsearch.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + '''Test that analyzer main method work as expect when all required input is given''' + def test_main_success(self): + conf = {"base_url":"test", "auth_user":"test", "auth_pwd":"test", "num_results":10,"api_key":"test","index":"test","time_delta_minutes": 14400,"timestamp_field_name":"test", "map":{}, "cert_path":""} + with patch('elasticsearch.helpers.loadConfig', new=MagicMock(return_value=conf))as mock_yaml: + with patch('sys.stdout', new=StringIO()) as mock_cmd: + with patch('elasticsearch.analyze', new=MagicMock(return_value={'foo': 'bar'})) as mock: + sys.argv = ["cmd", "conf"] + elasticsearch.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_cmd.getvalue(), expected) + mock.assert_called_once() + mock_yaml.assert_called_once() + + '''Test that checks for empty and none values in configurables''' + def test_checkConfigRequirements(self): + conf = {"base_url":"", "auth_user":"", "auth_pwd":"", "num_results":None,"api_key":"","index":"","time_delta_minutes": None,"timestamp_field_name":"", "map":{}, "cert_path":""} + with self.assertRaises(SystemExit) as cm: + elasticsearch.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + + '''Test that checks buildReq method, by comparing a mock buildReq result with an expectedQuery, used a mock object to simulate an expectedQuery + since Elasticsearch buildReq uses values in the config''' + def test_buildReq(self): + numberOfResults = 1 + observableType = "hash" + expectedQuery = { + "from": 0, + "size": numberOfResults, + "query": { + "bool": { + "must": [{ + "wildcard": { + observableType: observableType, + }, + } + ], + "filter": { + "range": { + "@timestamp": { + "gte": ('2023-11-29T14:23:45'), + "lte": ('2023-11-29T14:23:45') + } + } + } + } + } + } + with patch('elasticsearch.buildReq', new=MagicMock(return_value=expectedQuery)) as mock: + response = elasticsearch.buildReq(observableType,numberOfResults) + self.assertEqual(json.dumps(response), json.dumps(expectedQuery)) + mock.assert_called_once() + + def test_wrongbuildReq(self): + result={'map':'123','artifactType':'hash','timestamp_field_name':'abc', 'time_delta_minutes':14400, 'num_results':10,'value':'0' } + cur_time = datetime.now() + start_time = cur_time - timedelta(minutes=result['time_delta_minutes']) + query=elasticsearch.buildReq(result, result) + comparequery=json.dumps({ + "from": 0, + "size":10, + "query": { + "bool":{ + "must": [{ + "wildcard": { + 'hash': result['value'], + }, + } + ], + "filter":{ + "range":{ + result['timestamp_field_name']:{ + "gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'), + "lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S') + } + } + } + } + } + + }) + self.assertEqual(query, comparequery ) + + def test_rightbuildReq(self): + result={'map':{'hash':'testingHash'},'artifactType':'hash','timestamp_field_name':'abc', 'time_delta_minutes':14400, 'num_results':10,'value':'0'} + cur_time = datetime.now() + start_time = cur_time - timedelta(minutes=result['time_delta_minutes']) + query=elasticsearch.buildReq(result, result) + comparequery=json.dumps({ + "from": 0, + "size": 10, + "query": { + "bool":{ + "must":[{ + "wildcard": { + result['map'][result['artifactType']]: result['value'], + }, + } + ] + , + "filter":{ + "range":{ + result['timestamp_field_name']:{ + "gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'), + "lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S') + } + } + } + } + } + }) + self.assertEqual(query, comparequery ) + + def test_rightbuildReq100result(self): + result={'map':{'hash':'testingHash'},'artifactType':'hash','timestamp_field_name':'abc', 'time_delta_minutes':14400, 'num_results':100,'value':'0'} + cur_time = datetime.now() + start_time = cur_time - timedelta(minutes=result['time_delta_minutes']) + query=elasticsearch.buildReq(result, result) + comparequery=json.dumps({ + "from": 0, + "size": 100, + "query": { + "bool":{ + "must":[{ + "wildcard": { + result['map'][result['artifactType']]: result['value'], + }, + } + ] + , + "filter":{ + "range":{ + result['timestamp_field_name']:{ + "gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'), + "lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S') + } + } + } + } + } + }) + self.assertEqual(query, comparequery ) + + + '''Test that checks sendReq method to expect a response from a requests.post''' + def test_sendReq(self): + conf = {"base_url":"test", "auth_user":"test", "auth_pwd":"test", "api_key":"test","index":"test", "cert_path":""} + with patch('requests.post', new=MagicMock(return_value=MagicMock())) as mock: + response = elasticsearch.sendReq(conf, 'example_query') + self.assertIsNotNone(response) + + '''Test that checks prepareResults method, by comparing a mock prepareResults return_value with an expectedResult''' + def test_prepareResults(self): + summary = "Documents returned: 5" + status = 'info' + raw = {'_id': "0", "hash": "123"} + expectedResult = {'response': raw, 'summary': summary, 'status': status} + + with patch('elasticsearch.prepareResults', new=MagicMock(return_value=expectedResult)) as mock: + response = elasticsearch.prepareResults(raw) + self.assertEqual(expectedResult, response) + mock.assert_called_once() + + '''Test that checks analyze method, simulated sendReq and prepareResults with 2 mock objects and variables sendReqOutput and prepareResultOutput, + input created for analyze method call and then we compared results['summary'] with 'Documents returned: 5' ''' + def test_analyze(self): + sendReqOutput = {'_id': "0", "hash": "123"} + input = '{"artifactType":"hash", "value":"123"}' + prepareResultOutput = {'response': {'_id': "0", "hash": "123"},'summary': "Documents returned: 5", 'status': 'info'} + conf = {"base_url":"test", "auth_user":"test", "auth_pwd":"test", "num_results":10,"api_key":"test","index":"test","time_delta_minutes": 14400,"timestamp_field_name":"test", "map":{}, "cert_path":"test"} + with patch('elasticsearch.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock: + with patch('elasticsearch.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2: + results = elasticsearch.analyze(conf, input) + self.assertEqual(results["summary"], "Documents returned: 5") + mock.assert_called_once() \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/elasticsearch/requirements.txt b/salt/sensoroni/files/analyzers/elasticsearch/requirements.txt new file mode 100644 index 000000000..43b300e1d --- /dev/null +++ b/salt/sensoroni/files/analyzers/elasticsearch/requirements.txt @@ -0,0 +1,3 @@ +requests>=2.31.0 +pyyaml>=6.0 +urllib3>=2.1.0 \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/elasticsearch/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl new file mode 100644 index 000000000..dac873718 Binary files /dev/null and b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/elasticsearch/source-packages/certifi-2023.11.17-py3-none-any.whl b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/certifi-2023.11.17-py3-none-any.whl new file mode 100644 index 000000000..de0787f64 Binary files /dev/null and b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/certifi-2023.11.17-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/elasticsearch/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl new file mode 100644 index 000000000..b1cd02e9d Binary files /dev/null and b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/elasticsearch/source-packages/idna-3.6-py3-none-any.whl b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/idna-3.6-py3-none-any.whl new file mode 100644 index 000000000..fdf65ae30 Binary files /dev/null and b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/idna-3.6-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/elasticsearch/source-packages/requests-2.31.0-py3-none-any.whl b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/requests-2.31.0-py3-none-any.whl new file mode 100644 index 000000000..bfd5d2ea9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/requests-2.31.0-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/elasticsearch/source-packages/urllib3-2.1.0-py3-none-any.whl b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/urllib3-2.1.0-py3-none-any.whl new file mode 100644 index 000000000..0951ac354 Binary files /dev/null and b/salt/sensoroni/files/analyzers/elasticsearch/source-packages/urllib3-2.1.0-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/README.md b/salt/sensoroni/files/analyzers/malwarebazaar/README.md new file mode 100644 index 000000000..6ec28f79e --- /dev/null +++ b/salt/sensoroni/files/analyzers/malwarebazaar/README.md @@ -0,0 +1,5 @@ +# Malwarebazaar + +## Description +Submit a gimphash, hash, tlsh, telfhash to Malwarebazaar for analysis. + diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/__init__.py b/salt/sensoroni/files/analyzers/malwarebazaar/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar.json b/salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar.json new file mode 100644 index 000000000..7eb43f5ba --- /dev/null +++ b/salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar.json @@ -0,0 +1,8 @@ +{ + "name": "Malwarebazaar", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries Malwarebazaar to see if a hash, gimphash, tlsh, or telfhash is considered malicious.", + "supportedTypes" : ["gimphash","hash","tlsh", "telfhash"], + "baseUrl": "https://mb-api.abuse.ch/api/v1/" + } \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar.py b/salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar.py new file mode 100644 index 000000000..1c2b93d1d --- /dev/null +++ b/salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar.py @@ -0,0 +1,157 @@ +import requests +import helpers +import json +import sys + +# supports querying for hash, gimphash, tlsh, and telfhash +# usage is as follows: +# python3 malwarebazaar.py '{"artifactType":"x", "value":"y"}' + +def buildReq(observ_type, observ_value): + # determine correct query type to send based off of observable type + unique_types = {'gimphash': 1, 'telfhash': 1, 'tlsh': 1} + if observ_type in unique_types: + qtype = 'get_' + observ_type + else: + qtype = 'get_info' + return {'query': qtype, observ_type: observ_value} + + +def sendReq(meta, query): + # send a post request with our compiled query to the API + url = meta['baseUrl'] + response = requests.post(url, query) + return response.json() + + +def isInJson(data, target_string, maxdepth): + # searches a JSON object for an occurance of a string + # recursively. + + # depth limiter (arbitrary value of 1000) + if maxdepth > 1000: + return False + + if isinstance(data, dict): + for key, value in data.items(): + if isinstance(value, (dict, list)): + # recursive call + if isInJson(value, target_string, maxdepth + 1): + return True + elif isinstance(value, str) and target_string in value.lower(): + # found target string + return True + + elif isinstance(data, list): + for item in data: + if isinstance(item, (dict, list)): + # recursive call + if isInJson(item, target_string, maxdepth + 1): + return True + elif isinstance(item, str) and target_string in item.lower(): + # found target string + return True + + return False + + +def prepareResults(raw): + # parse raw API response, gauge threat level and return status and a short summary + if raw == {}: + status = 'caution' + summary = 'internal_failure' + elif raw['query_status'] == 'ok': + parsed = raw['data'][0] + vendor_data = parsed['vendor_intel'] + + # get summary + if parsed['signature']: + summary = parsed['signature'] + elif parsed['tags']: + summary = str(parsed['tags'][0]) + elif vendor_data['YOROI_YOMI']: + summary = vendor_data['YOROI_YOMI']['detection'] + + # gauge vendors to determine an approximation of status, normalized to a value out of 100 + # only updates score if it finds a higher indicator value + score = 0 + if 'vxCube' in vendor_data: + score = int(vendor_data['vxCube']['maliciousness']) + + if 'Triage' in vendor_data: + score = int(vendor_data['Triage']['score'])*10 if int( + vendor_data['Triage']['score'])*10 > score else score + + if 'DocGuard' in vendor_data: + score = int(vendor_data['DocGuard']['alertlevel'])*10 if int( + vendor_data['DocGuard']['alertlevel'])*10 > score else score + + if 'YOROI_YOMI' in vendor_data: + score = int(float(vendor_data['YOROI_YOMI']['score']))*100 if int( + float(vendor_data['YOROI_YOMI']['score']))*100 > score else score + + if 'Inquest' in vendor_data and vendor_data['Inquest']['verdict'] == 'MALICIOUS': + score = 100 if 100 > score else score + + if 'ReversingLabs' in vendor_data and vendor_data['ReversingLabs']['status'] == 'MALICIOUS': + score = 100 if 100 > score else score + + if 'Spamhaus_HBL' in vendor_data and vendor_data['Spamhaus_HBL'][0]['detection'] == 'MALICIOUS': + score = 100 if 100 > score else score + + # compute status + if score >= 75 or isInJson(raw, 'MALICIOUS'.lower()): + # if score >= 75: + status = 'threat' + elif score >= 50: + status = 'caution' + elif score >= 25: + status = 'info' + + else: + status = 'ok' + elif raw['query_status'] != 'ok': + status = 'info' + summary = 'no result' + + return {'response': raw, 'summary': summary, 'status': status} + + +def analyze(input): + # put all of our methods together, pass them input, and return + # properly formatted json/python dict output + data = json.loads(input) + meta = helpers.loadMetadata(__file__) + helpers.checkSupportedType(meta, data["artifactType"]) + + if (data['artifactType'] == 'tlsh' or data['artifactType'] == 'gimphash' or data['artifactType'] == 'telfhash'): + # To get accurate reporting for TLSH, telfhash and gimphash, we deem it necessary to query + # twice for the sake of retrieving more specific data. + + initialQuery = buildReq(data['artifactType'], data['value']) + initialRaw = sendReq(meta, initialQuery) + + # To prevent double-querying when a tlsh/gimphash is invalid, this if statement is necessary. + if initialRaw['query_status'] == 'ok': + # Setting artifactType and value to our new re-query arguments + # to get a more detailed report. + data['artifactType'] = 'hash' + data['value'] = initialRaw['data'][0]['sha256_hash'] + else: + return prepareResults(initialRaw) + + query = buildReq(data['artifactType'], data['value']) + response = sendReq(meta, query) + return prepareResults(response) + + +def main(): + if len(sys.argv) == 2: + results = analyze(sys.argv[1]) + print(json.dumps(results)) + else: + print("ERROR: Input is not in proper JSON format") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar_test.py b/salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar_test.py new file mode 100644 index 000000000..57211e2d8 --- /dev/null +++ b/salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar_test.py @@ -0,0 +1,66 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +import malwarebazaar +import unittest + +class TestMalwarebazaarMethods(unittest.TestCase): + def test_main_missing_input(self): + with patch('sys.stdout', new=StringIO()) as mock_cmd: + sys.argv = ["cmd"] + malwarebazaar.main() + self.assertEqual(mock_cmd.getvalue(), + 'ERROR: Input is not in proper JSON format\n') + + def test_main_success(self): + with patch('sys.stdout', new=StringIO()) as mock_cmd: + with patch('malwarebazaar.analyze', new=MagicMock(return_value={'test': 'val'})) as mock: + sys.argv = ["cmd", "input"] + malwarebazaar.main() + expected = '{"test": "val"}\n' + self.assertEqual(mock_cmd.getvalue(), expected) + mock.assert_called_once() + def test_analyze(self): + """simulated sendReq and prepareResults with 2 mock objects and variables sendReqOutput and prepareResultOutput, + input created for analyze method call and then we compared results['summary'] with 'no result' """ + sendReqOutput = {'threat': 'no_result',"query_status":"ok",'data':[{'sha256_hash':'notavalidhash'}]} + input = '{"artifactType":"hash", "value":"1234"}' + input2 ='{"artifactType":"tlsh", "value":"1234"}' + input3='{"artifactType":"gimphash", "value":"1234"}' + prepareResultOutput = {'response': '', + 'summary': 'no result', 'status': 'info'} + + with patch('malwarebazaar.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock: + with patch('malwarebazaar.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2: + results = malwarebazaar.analyze(input) + results2 = malwarebazaar.analyze(input2) + results3 =malwarebazaar.analyze(input3) + self.assertEqual(results["summary"],prepareResultOutput['summary']) + self.assertEqual(results2["summary"], prepareResultOutput['summary']) + self.assertEqual(results3["summary"], prepareResultOutput['summary']) + self.assertEqual(results["status"], "info") + self.assertEqual(results2["status"], "info") + self.assertEqual(results3["status"], "info") + + mock.assert_called() + + def test_prepareResults_illegal_search_term(self): + # illegal search term + raw = {'query_status': 'illegal_search_term'} + expected = {'response': raw, 'status': 'info', 'summary': 'no result'} + results = malwarebazaar.prepareResults(raw) + self.assertEqual(results, expected) + + def test_buildReqGimqhash(self): + result = malwarebazaar.buildReq('gimphash', '') + self.assertEqual( + result, {'query': 'get_gimphash', 'gimphash': ''}) + + def test_buildReqHash(self): + result = malwarebazaar.buildReq('hash', '') + self.assertEqual( + result, {'query': 'get_info', 'hash': ''}) + def test_buildReqtlshhash(self): + result = malwarebazaar.buildReq('tlsh', '') + self.assertEqual( + result, {'query': 'get_tlsh', 'tlsh': ''}) \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/requirements.txt b/salt/sensoroni/files/analyzers/malwarebazaar/requirements.txt new file mode 100644 index 000000000..925ada01e --- /dev/null +++ b/salt/sensoroni/files/analyzers/malwarebazaar/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.31.0 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl new file mode 100644 index 000000000..dac873718 Binary files /dev/null and b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/certifi-2023.11.17-py3-none-any.whl b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/certifi-2023.11.17-py3-none-any.whl new file mode 100644 index 000000000..de0787f64 Binary files /dev/null and b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/certifi-2023.11.17-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl new file mode 100644 index 000000000..b1cd02e9d Binary files /dev/null and b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/idna-3.6-py3-none-any.whl b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/idna-3.6-py3-none-any.whl new file mode 100644 index 000000000..fdf65ae30 Binary files /dev/null and b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/idna-3.6-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/requests-2.31.0-py3-none-any.whl b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/requests-2.31.0-py3-none-any.whl new file mode 100644 index 000000000..bfd5d2ea9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/requests-2.31.0-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/urllib3-2.1.0-py3-none-any.whl b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/urllib3-2.1.0-py3-none-any.whl new file mode 100644 index 000000000..0951ac354 Binary files /dev/null and b/salt/sensoroni/files/analyzers/malwarebazaar/source-packages/urllib3-2.1.0-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/threatfox/README.md b/salt/sensoroni/files/analyzers/threatfox/README.md new file mode 100644 index 000000000..e3b80918b --- /dev/null +++ b/salt/sensoroni/files/analyzers/threatfox/README.md @@ -0,0 +1,6 @@ +# Threatfox + +## Description +Submit a domain, hash, IP, or URL to Threatfox for analysis. + + diff --git a/salt/sensoroni/files/analyzers/threatfox/requirements.txt b/salt/sensoroni/files/analyzers/threatfox/requirements.txt new file mode 100644 index 000000000..925ada01e --- /dev/null +++ b/salt/sensoroni/files/analyzers/threatfox/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.31.0 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/threatfox/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl b/salt/sensoroni/files/analyzers/threatfox/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl new file mode 100644 index 000000000..dac873718 Binary files /dev/null and b/salt/sensoroni/files/analyzers/threatfox/source-packages/PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/threatfox/source-packages/certifi-2023.11.17-py3-none-any.whl b/salt/sensoroni/files/analyzers/threatfox/source-packages/certifi-2023.11.17-py3-none-any.whl new file mode 100644 index 000000000..de0787f64 Binary files /dev/null and b/salt/sensoroni/files/analyzers/threatfox/source-packages/certifi-2023.11.17-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/threatfox/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl b/salt/sensoroni/files/analyzers/threatfox/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl new file mode 100644 index 000000000..b1cd02e9d Binary files /dev/null and b/salt/sensoroni/files/analyzers/threatfox/source-packages/charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/threatfox/source-packages/idna-3.6-py3-none-any.whl b/salt/sensoroni/files/analyzers/threatfox/source-packages/idna-3.6-py3-none-any.whl new file mode 100644 index 000000000..fdf65ae30 Binary files /dev/null and b/salt/sensoroni/files/analyzers/threatfox/source-packages/idna-3.6-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/threatfox/source-packages/requests-2.31.0-py3-none-any.whl b/salt/sensoroni/files/analyzers/threatfox/source-packages/requests-2.31.0-py3-none-any.whl new file mode 100644 index 000000000..bfd5d2ea9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/threatfox/source-packages/requests-2.31.0-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/threatfox/source-packages/urllib3-2.1.0-py3-none-any.whl b/salt/sensoroni/files/analyzers/threatfox/source-packages/urllib3-2.1.0-py3-none-any.whl new file mode 100644 index 000000000..0951ac354 Binary files /dev/null and b/salt/sensoroni/files/analyzers/threatfox/source-packages/urllib3-2.1.0-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/threatfox/threatfox.json b/salt/sensoroni/files/analyzers/threatfox/threatfox.json new file mode 100644 index 000000000..076e7619d --- /dev/null +++ b/salt/sensoroni/files/analyzers/threatfox/threatfox.json @@ -0,0 +1,10 @@ +{ + "name": "Threatfox", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries Threatfox to see if a domain, hash, or IP is considered malicious.", + "supportedTypes" : ["domain","hash","ip"], + "baseUrl": "https://threatfox-api.abuse.ch/api/v1/" +} + + \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/threatfox/threatfox.py b/salt/sensoroni/files/analyzers/threatfox/threatfox.py new file mode 100644 index 000000000..a3b674c58 --- /dev/null +++ b/salt/sensoroni/files/analyzers/threatfox/threatfox.py @@ -0,0 +1,74 @@ +import requests +import helpers +import json +import sys + + +def buildReq(observ_type, observ_value): + # supports hash, ip, and domain. determines which query type to send. + if observ_type == 'hash': + qterms = {'query': 'search_hash', 'hash': observ_value} + elif observ_type == 'ip' or observ_type == 'domain': + qterms = {'query': 'search_ioc', 'search_term': observ_value} + return qterms + + +def sendReq(meta, query): + # send a post request based off of our compiled query + url = meta['baseUrl'] + response = requests.post(url, json.dumps(query)) + return response.json() + + +def prepareResults(raw): + # gauge threat level based off of threatfox's confidence level + if raw != {} and raw['query_status'] == 'ok': + parsed = raw['data'][0] + + # get summary + if parsed['threat_type'] != '': + summary = parsed['threat_type'] + else: + summary = parsed['threat_type_desc'] + + if parsed['confidence_level'] > 75: + status = 'threat' + elif parsed['confidence_level'] > 50: + status = 'caution' + elif parsed['confidence_level'] > 25: + status = 'info' + else: + status = 'ok' + elif raw != {} and raw['query_status'] in ['no_result', 'illegal_search_term', 'illegl_hash']: + status = 'info' + summary = 'no result' + else: + raw = {} + status = 'caution' + summary = 'internal_failure' + + results = {'response': raw, 'summary': summary, 'status': status} + return results + + +def analyze(input): + # put all of our methods together, pass them input, and return + # properly formatted json/python dict output + data = json.loads(input) + meta = helpers.loadMetadata(__file__) + helpers.checkSupportedType(meta, data["artifactType"]) + query = buildReq(data['artifactType'], data['value']) + response = sendReq(meta, query) + return prepareResults(response) + + +def main(): + if len(sys.argv) == 2: + results = analyze(sys.argv[1]) + print(json.dumps(results)) + else: + print("ERROR: Input is not in proper JSON format") + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/threatfox/threatfox_test.py b/salt/sensoroni/files/analyzers/threatfox/threatfox_test.py new file mode 100644 index 000000000..af35979c9 --- /dev/null +++ b/salt/sensoroni/files/analyzers/threatfox/threatfox_test.py @@ -0,0 +1,121 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +import threatfox +import unittest + + +class TestThreatfoxMethods(unittest.TestCase): + # This should 1. create a fake cmd input with no args + # and 2. hit the else statement in main. It then + # compares the console output to a hardcoded string. + + # DOES NOT WORK WITH ARGPARSE/MAIN METHOD + + def test_main_missing_input(self): + with patch('sys.stdout', new=StringIO()) as mock_cmd: + sys.argv = ["cmd"] + threatfox.main() + self.assertEqual(mock_cmd.getvalue(), + 'ERROR: Input is not in proper JSON format\n') + + # This should 1. create a fake cmd input with 1 arg + # and 2. hit the if statement in main which runs a mock + # analyze method with return value of {'test': 'val'}. + # threatfox.main() should then print that to the console, + # which is then asserted equal against an expected value. + + def test_main_success(self): + with patch('sys.stdout', new=StringIO()) as mock_cmd: + with patch('threatfox.analyze', new=MagicMock(return_value={'test': 'val'})) as mock: + sys.argv = ["cmd", "input"] + threatfox.main() + expected = '{"test": "val"}\n' + self.assertEqual(mock_cmd.getvalue(), expected) + mock.assert_called_once() + + # result stores the output of the buildReq method + # comparing result with expected output + def test_buildReqHash(self): + result = threatfox.buildReq('hash', '2151c4b970eff0071948dbbc19066aa4') + self.assertEqual( + result, {'query': 'search_hash', 'hash': '2151c4b970eff0071948dbbc19066aa4'}) + + def test_buildReqIP(self): + result = threatfox.buildReq('ip', '139.180.203.104:443') + self.assertEqual( + result, {'query': 'search_ioc', 'search_term': '139.180.203.104:443'}) + + def test_buildReqDomain(self): + result = threatfox.buildReq('domain', 'https://google.com') + self.assertEqual( + result, {'query': 'search_ioc', 'search_term': 'https://google.com'}) + + def test_buildReqFalse(self): + result = threatfox.buildReq('hash', '2151c4b970eff0071948dbbc19066aa4') + self.assertNotEqual(result, {}) + + + # simulate API response and makes sure sendReq gives a response + # we are just checking if sendReq gives back anything + def test_sendReq(self): + with patch('requests.post', new=MagicMock(return_value=MagicMock())) as mock: + response = threatfox.sendReq( + {'baseUrl': 'https://www.randurl.xyz'}, 'example_data') + self.assertIsNotNone(response) + + # result stores the output of the prepareResults method + # comparing result with expected output + + def test_prepareResults_noinput(self): + # no/improper given input + raw = {} + sim_results = {'response': raw, 'status': 'caution', + 'summary': 'internal_failure'} + results = threatfox.prepareResults(raw) + self.assertEqual(results, sim_results) + + def test_prepareResults_none(self): + # no results + raw = {'query_status': 'no_result'} + sim_results = {'response': raw, + 'status': 'info', 'summary': 'no result'} + results = threatfox.prepareResults(raw) + self.assertEqual(results, sim_results) + + def test_prepareResults_illegal_search_term(self): + # illegal search term + raw = {'query_status': 'illegal_search_term'} + expected = {'response': raw, 'status': 'info', 'summary': 'no result'} + results = threatfox.prepareResults(raw) + self.assertEqual(results, expected) + + def test_prepareResults_threat(self): + # threat exists + raw = {'query_status': 'ok', 'data': [ + {'threat_type': 'threat', 'confidence_level': 94}]} + sim_results = {'response': raw, + 'summary': 'threat', 'status': 'threat'} + results = threatfox.prepareResults(raw) + self.assertEqual(results, sim_results) + + def test_prepareResults_error(self): + raw = {} + sim_results = {'response': raw, 'status': 'caution', + 'summary': 'internal_failure'} + results = threatfox.prepareResults(raw) + self.assertEqual(results, sim_results) + + def test_analyze(self): + """simulated sendReq and prepareResults with 2 mock objects and variables sendReqOutput and prepareResultOutput, + input created for analyze method call and then we compared results['summary'] with 'no result' """ + sendReqOutput = {'threat': 'no_result'} + input = '{"artifactType":"hash", "value":"1234"}' + prepareResultOutput = {'response': '', + 'summary': 'no result', 'status': ''} + + with patch('threatfox.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock: + with patch('threatfox.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2: + results = threatfox.analyze(input) + self.assertEqual(results["summary"], "no result") + mock.assert_called_once() diff --git a/salt/sensoroni/soc_sensoroni.yaml b/salt/sensoroni/soc_sensoroni.yaml index 9c2304d6c..242c74cc8 100644 --- a/salt/sensoroni/soc_sensoroni.yaml +++ b/salt/sensoroni/soc_sensoroni.yaml @@ -1,217 +1,303 @@ -sensoroni: - enabled: - description: Enable or disable Sensoroni. - advanced: True - helpLink: grid.html - config: - analyze: - enabled: - description: Enable or disable the analyzer. - advanced: True - helpLink: cases.html - timeout_ms: - description: Timeout period for the analyzer. - advanced: True - helpLink: cases.html - parallel_limit: - description: Parallel limit for the analyzer. - advanced: True - helpLink: cases.html - node_checkin_interval_ms: - description: Interval in ms to checkin to the soc_host. - advanced: True - helpLink: grid.html - node_description: - description: Description of the specific node. - helpLink: grid.html - node: True - forcedType: string - sensoronikey: - description: Shared key for sensoroni authentication. - helpLink: grid.html - global: True - sensitive: True - advanced: True - soc_host: - description: Host for sensoroni agents to connect to. - helpLink: grid.html - global: True - advanced: True - analyzers: - emailrep: - api_key: - description: API key for the EmailRep analyzer. - helpLink: cases.html - global: False - sensitive: True - advanced: True - forcedType: string - base_url: - description: Base URL for the EmailRep analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - greynoise: - api_key: - description: API key for the GreyNoise analyzer. - helpLink: cases.html - global: False - sensitive: True - advanced: True - forcedType: string - api_version: - description: API version for the GreyNoise analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - base_url: - description: Base URL for the GreyNoise analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - localfile: - file_path: - description: File path for the LocalFile analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: "[]string" - otx: - api_key: - description: API key for the OTX analyzer. - helpLink: cases.html - global: False - sensitive: True - advanced: True - forcedType: string - base_url: - description: Base URL for the OTX analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - pulsedive: - api_key: - description: API key for the Pulsedive analyzer. - helpLink: cases.html - global: False - sensitive: True - advanced: True - forcedType: string - base_url: - description: Base URL for the Pulsedive analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - spamhaus: - lookup_host: - description: Host to use for lookups. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - nameservers: - description: Nameservers used for queries. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedTypes: "[]string" - sublime_platform: - api_key: - description: API key for the Sublime Platform analyzer. - helpLink: cases.html - global: False - sensitive: True - advanced: True - forcedType: string - base_url: - description: Base URL for the Sublime Platform analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - live_flow: - description: Determines if live flow analysis is used. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: bool - mailbox_email_address: - description: Source mailbox address used for live flow analysis. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - message_source_id: - description: ID of the message source used for live flow analysis. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - urlscan: - api_key: - description: API key for the Urlscan analyzer. - helpLink: cases.html - global: False - sensitive: True - advanced: True - forcedType: string - base_url: - description: Base URL for the Urlscan analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - enabled: - description: Analyzer enabled - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: bool - timeout: - description: Timeout for the Urlscan analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: int - visibility: - description: Type of visibility. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string - virustotal: - api_key: - description: API key for the VirusTotal analyzer. - helpLink: cases.html - global: False - sensitive: True - advanced: True - forcedType: string - base_url: - description: Base URL for the VirusTotal analyzer. - helpLink: cases.html - global: False - sensitive: False - advanced: True - forcedType: string +sensoroni: + enabled: + description: Enable or disable Sensoroni. + advanced: True + helpLink: grid.html + config: + analyze: + enabled: + description: Enable or disable the analyzer. + advanced: True + helpLink: cases.html + timeout_ms: + description: Timeout period for the analyzer. + advanced: True + helpLink: cases.html + parallel_limit: + description: Parallel limit for the analyzer. + advanced: True + helpLink: cases.html + node_checkin_interval_ms: + description: Interval in ms to checkin to the soc_host. + advanced: True + helpLink: grid.html + node_description: + description: Description of the specific node. + helpLink: grid.html + node: True + forcedType: string + sensoronikey: + description: Shared key for sensoroni authentication. + helpLink: grid.html + global: True + sensitive: True + advanced: True + soc_host: + description: Host for sensoroni agents to connect to. + helpLink: grid.html + global: True + advanced: True + analyzers: + echotrail: + api_key: + description: API key for the Echotrail analyzer. + helpLink: sensoroni.html + global: False + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the Echotrail analyzer. + helpLink: sensoroni.html + global: False + sensitive: False + advanced: True + forcedType: string + elasticsearch: + api_key: + description: API key for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: True + advanced: True + forcedType: string + base_url: + description: Connection URL for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: False + advanced: True + forcedType: string + auth_user: + description: Username for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: False + advanced: True + forcedType: string + auth_pwd: + description: User password for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: True + advanced: True + forcedType: string + num_results: + description: Number of documents to return for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: False + advanced: True + forcedType: string + index: + description: Search index for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: False + advanced: True + forcedType: string + time_delta_minutes: + description: Time (in minutes) to search back for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: False + advanced: True + forcedType: int + timestamp_field_name: + description: Specified name for a documents' timestamp field for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: False + advanced: True + forcedType: string + map: + description: Map between observable types and search field for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: False + advanced: True + forcedType: string + cert_path: + description: Path to a TLS certificate for the Elasticsearch analyzer. + helpLink: sensoroni.html + global: False + sensitive: False + advanced: True + forcedType: string + emailrep: + api_key: + description: API key for the EmailRep analyzer. + helpLink: cases.html + global: False + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the EmailRep analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + greynoise: + api_key: + description: API key for the GreyNoise analyzer. + helpLink: cases.html + global: False + sensitive: True + advanced: True + forcedType: string + api_version: + description: API version for the GreyNoise analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + base_url: + description: Base URL for the GreyNoise analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + localfile: + file_path: + description: File path for the LocalFile analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: "[]string" + otx: + api_key: + description: API key for the OTX analyzer. + helpLink: cases.html + global: False + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the OTX analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + pulsedive: + api_key: + description: API key for the Pulsedive analyzer. + helpLink: cases.html + global: False + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the Pulsedive analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + spamhaus: + lookup_host: + description: Host to use for lookups. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + nameservers: + description: Nameservers used for queries. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedTypes: "[]string" + sublime_platform: + api_key: + description: API key for the Sublime Platform analyzer. + helpLink: cases.html + global: False + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the Sublime Platform analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + live_flow: + description: Determines if live flow analysis is used. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: bool + mailbox_email_address: + description: Source mailbox address used for live flow analysis. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + message_source_id: + description: ID of the message source used for live flow analysis. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + urlscan: + api_key: + description: API key for the Urlscan analyzer. + helpLink: cases.html + global: False + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the Urlscan analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + enabled: + description: Analyzer enabled + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: bool + timeout: + description: Timeout for the Urlscan analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: int + visibility: + description: Type of visibility. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string + virustotal: + api_key: + description: API key for the VirusTotal analyzer. + helpLink: cases.html + global: False + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the VirusTotal analyzer. + helpLink: cases.html + global: False + sensitive: False + advanced: True + forcedType: string