pushing everything at once

This commit is contained in:
Jackson
2023-12-13 13:45:48 -05:00
parent 5d3f2298b6
commit 81e4fe78e7
49 changed files with 1392 additions and 261 deletions

View File

@@ -9,6 +9,20 @@ sensoroni:
sensoronikey:
soc_host:
analyzers:
echotrail:
base_url: https://api.echotrail.io/insights/
api_key:
elasticsearch:
base_url:
auth_user:
auth_pwd:
num_results: 10
api_key:
index: _all
time_delta_minutes: 14400
timestamp_field_name: '@timestamp'
map: {}
cert_path:
emailrep:
base_url: https://emailrep.io/
api_key:

View File

@@ -0,0 +1,25 @@
# EchoTrail
## Description
Submit a filename, hash, commandline to EchoTrail for analysis
## Configuration Requirements
In SOC, navigate to `Administration`, toggle `Show all configurable settings, including advanced settings.`, and navigate to `sensoroni` -> `analyzers` -> `echotrail`.
![echotrail](https://github.com/RyHoa/securityonion/assets/129560634/43b55869-1fba-4907-8418-c0745c37237b)
The following configuration options are available for:
``api_key`` - API key used for communication with the Echotrail API (Required)
This value should be set in the ``sensoroni`` pillar, like so:
```
sensoroni:
analyzers:
echotrail:
api_key: $yourapikey
```

View File

@@ -0,0 +1,10 @@
{
"name": "Echotrail",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "This analyzer queries Echotrail to see if a related filename, hash, or commandline is considered malicious.",
"supportedTypes" : ["filename","hash","commandline"],
"baseUrl": "https://api.echotrail.io/insights/"
}

View File

@@ -0,0 +1,67 @@
import json
import os
import sys
import requests
import helpers
import argparse
# for test usage:
# python3 echotrail.py '{"artifactType":"hash", "value":"438b6ccd84f4dd32d9684ed7d58fd7d1e5a75fe3f3d12ab6c788e6bb0ffad5e7"}'
# You will need to provide an API key in the .yaml file.
def checkConfigRequirements(conf):
if not conf['api_key']:
sys.exit(126)
else:
return True
def sendReq(conf, observ_value):
# send a get requests using a user-provided API key and the API url
url = conf['base_url'] + observ_value
headers = {'x-api-key': conf['api_key']}
response = requests.request('GET', url=url, headers=headers)
return response.json()
def prepareResults(raw):
# checking for the 'filenames' key alone does
# not work when querying by filename.
# So, we can account for a hash query, a filename query,
# and anything else with these if statements.
if 'filenames' in raw.keys():
summary = raw['filenames'][0][0]
elif 'tags' in raw.keys():
summary = raw['tags'][0][0]
else:
summary = 'inconclusive'
status = 'info'
return {'response': raw, 'summary': summary, 'status': status}
def analyze(conf, input):
# put all of our methods together and return a properly formatted output.
checkConfigRequirements(conf)
meta = helpers.loadMetadata(__file__)
data = helpers.parseArtifact(input)
helpers.checkSupportedType(meta, data['artifactType'])
response = sendReq(conf, data['value'])
return prepareResults(response)
def main():
dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(
description='Search Echotrail for a given artifact')
parser.add_argument(
'artifact', help='the artifact represented in JSON format')
parser.add_argument('-c', '--config', metavar='CONFIG_FILE', default=dir + '/echotrail.yaml',
help='optional config file to use instead of the default config file')
args = parser.parse_args()
if args.artifact:
results = analyze(helpers.loadConfig(args.config), args.artifact)
print(json.dumps(results))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,3 @@
base_url: "{{ salt['pillar.get']('sensoroni:analyzers:echotrail:base_url', 'https://api.echotrail.io/insights/') }}"
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:echotrail:api_key', '') }}"

View File

@@ -0,0 +1,61 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
import unittest
import echotrail
import helpers
class TestEchoTrailMethods(unittest.TestCase):
def test_main_success(self):
with patch('sys.stdout', new=StringIO()) as mock_cmd:
with patch('echotrail.analyze', new=MagicMock(return_value={'test': 'val'})) as mock:
sys.argv = ["test", "test"]
echotrail.main()
expected = '{"test": "val"}\n'
self.assertEqual(mock_cmd.getvalue(), expected)
mock.assert_called_once()
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd"]
echotrail.main()
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
def test_checkConfigRequirements(self):
conf = {'base_url': 'https://www.randurl.xyz/', 'api_key':''}
with self.assertRaises(SystemExit) as cm:
echotrail.checkConfigRequirements(conf)
self.assertEqual(cm.exception.code, 126)
def test_sendReq(self):
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
response = echotrail.sendReq(conf={'base_url': 'https://www.randurl.xyz/', 'api_key':'randkey'}, observ_value='example_data')
self.assertIsNotNone(response)
def test_prepareResults_noinput(self):
raw = {}
sim_results = {'response': raw,
'status': 'info', 'summary': 'inconclusive'}
results = echotrail.prepareResults(raw)
self.assertEqual(results, sim_results)
def test_prepareResults_none(self):
raw = {'query_status': 'no_result'}
sim_results = {'response': raw,
'status': 'info', 'summary': 'inconclusive'}
results = echotrail.prepareResults(raw)
self.assertEqual(results, sim_results)
def test_analyze(self):
sendReqOutput = {'threat': 'no_result'}
input = '{"artifactType":"hash", "value":"1234"}'
prepareResultOutput = {'response': '',
'summary': 'inconclusive', 'status': 'info'}
conf = {"api_key": "xyz"}
with patch('echotrail.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock:
with patch('echotrail.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2:
results = echotrail.analyze(conf, input)
self.assertEqual(results["summary"], "inconclusive")

View File

@@ -0,0 +1,2 @@
requests>=2.31.0
pyyaml>=6.0

View File

@@ -0,0 +1,58 @@
# Elasticsearch
Elasticsearch returns an informational breakdown of the queried observable.
## Overview
Elasticsearch facilitates queries within the user's database. User can use these observable type: hash, domain, file, filename, fqdn, gimphash, IP, mail, mail_subject, regexp, registry, telfhash, tlsh, uri_path, URL, and user-agent values.
## Description
Configure and submit the field you want to search for in your database. Ex: domain, hash, IP, or URL
## Requirement
An API key or User Credentials is necessary for utilizing Elasticsearch.
## Configuration Requirements
In SOC, navigate to `Administration`, toggle `Show all configurable settings, including advanced settings.`, and navigate to `sensoroni` -> `analyzers` -> `elasticsearch`.
![image](https://github.com/RyHoa/securityonion/assets/129560634/31c612d3-39f8-4d9e-881b-210c87a56b50)
The following configuration options are available for:
``api_key`` - API key used for communication with the Elasticsearch API (Optional if auth_user and auth_pwd are used)
``auth_user`` - Username used for communication with Elasticsearch
``auth_pwd`` - Password used for communication with Elasticsearch
``base_url`` - URL that connect to Elasticsearch VM on port 9200. Example format :"https://<your IP address>:9200
``index`` - The index of the data in Elasticsearch database. Default value is _all.
``num_results`` - The max number of results will be displayed. Default value is 10.
``time_delta_minutes`` - Range of time the users want the data in minutes. The value is in minutes and will be converted to days. Defaults value is is 1440.
``timestamp_field_name`` - The name of your timestamp field name. Default value is @timestamp.
``map`` - This is the dictionary of the field name in the user's Elasticsearch database. Example value {"hash":"userhashfieldname"}. This value will map the Security Onion hash field name to user hash field name.
``cert_path`` - This is the path to the certificate in the host for authentication purpose (Required)
This value should be set in the ``sensoroni`` pillar, like so:
```
sensoroni:
analyzers:
elasticsearch:
base_url:$yourbase_url
api_key: $yourapi_key
numResults:$yournum_results
auth_user:$yourauth_user
auth_pwd:$yourauth_pwd
index:$yourindex
timeDeltaMinutes:$yourtime_delta_minutes
timestampFieldName:$yourtimestamp_field_name
cert_path:$yourcert_path
map:$yourmap
```

View File

@@ -0,0 +1,9 @@
{
"name": "Elastic Search",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "Queries an ElasticSearch instance for specified field values.",
"supportedTypes": ["hash", "ip", "domain"]
}

View File

@@ -0,0 +1,138 @@
from datetime import datetime, timedelta
import argparse
import requests
import helpers
import json
import sys
import os
# As it stands, this analyzer does not support querying for mixed-case fields without disregarding case completely.
# So the current version will only support querying for all-lowercase alphanumerical values.
# default usage is:
# python3 elasticsearch.py '{"artifactType":"hash", "value":"*"}'
# To use outside of a Security Onion box, pass in '-c test.yaml' at the end
# of the above command to give this analyzer some test values. You may edit the
# values in the test.yaml file freely.
def checkConfigRequirements(conf):
# if the user hasn't given valid configurables, quit.
if not conf['num_results']:
sys.exit(126)
if not conf['time_delta_minutes']:
sys.exit(126)
if (not conf['auth_user'] or not conf['auth_pwd']) and not conf['api_key']:
sys.exit(126)
if not conf['index']:
sys.exit(126)
if not conf['base_url']:
sys.exit(126)
if not conf['timestamp_field_name']:
sys.exit(126)
if not conf['cert_path']:
sys.exit(126)
return True
def buildReq(conf, input):
# structure a query to send to the Elasticsearch machine
# based off of user configurable values
num_results = conf['num_results']
if conf['map'] != None:
mappings = conf['map']
else:
mappings = dict()
cur_time = datetime.now()
start_time = cur_time - timedelta(minutes=int(conf['time_delta_minutes']))
if input['artifactType'] in mappings:
type = mappings[input['artifactType']]
else:
type = input['artifactType']
query = {
"from": 0,
"size": num_results,
"query": {
"bool": {
"must": [{
"wildcard": {
type: input['value'],
},
}
],
"filter": {
"range": {
conf['timestamp_field_name']: {
"gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'),
"lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S')
}
}
}
}
}
}
return json.dumps(query)
def sendReq(conf, query):
# send configured query with even more user specification
headers = {}
url = conf['base_url'] + conf['index'] + '/_search'
uname = conf['auth_user']
pwd = conf['auth_pwd']
apikey = conf['api_key']
cert_path = conf['cert_path']
if pwd and uname:
headers = {
'Content-Type': 'application/json',
}
response = requests.post(str(url), auth=(
uname, pwd), verify=cert_path, data=query, headers=headers)
elif apikey:
headers = {
'Content-Type': 'application/json',
'Authorization': f"Apikey {apikey}"
}
response = requests.post(
str(url), verify=cert_path, data=query, headers=headers)
return response.json()
def prepareResults(raw):
# returns raw API response, amount of hits found, and status of request in order
summary = f"Documents returned: {len(raw['hits']['hits'])}"
status = 'info'
return {'response': raw, 'summary': summary, 'status': status}
def analyze(conf, input):
checkConfigRequirements(conf)
data = json.loads(input)
query = buildReq(conf, data)
response = sendReq(conf, query)
return prepareResults(response)
def main():
dir = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser(
description='Search Elastic Search for a given artifact?')
parser.add_argument('artifact', help='required artifact')
parser.add_argument('-c', '--config', metavar='CONFIG_FILE', default=dir + '/elasticsearch.yaml',
help='optional config file to use instead of the default config file')
args = parser.parse_args()
if args.artifact:
results = analyze(helpers.loadConfig(args.config), args.artifact)
print(json.dumps(results))
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,10 @@
base_url: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:base_url', '') }}"
auth_user: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:auth_user', '') }}"
auth_pwd: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:auth_pwd', '') }}"
num_results: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:num_results', 10) }}"
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:api_key', '') }}"
index: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:index', '_all') }}"
time_delta_minutes: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:time_delta_minutes', 14400) }}"
timestamp_field_name: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:timestamp_field_name', '@timestamp') }}"
map: {{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:map', '') }}
cert_path: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:cert_path', '') }}"

View File

@@ -0,0 +1,194 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
import unittest
import elasticsearch
import helpers
import json
from datetime import datetime, timedelta
class TestElasticSearchMethods(unittest.TestCase):
'''Test that the analyzer main method work as expect when not given enough input'''
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd"]
elasticsearch.main()
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
sysmock.assert_called_once_with(2)
'''Test that analyzer main method work as expect when all required input is given'''
def test_main_success(self):
conf = {"base_url":"test", "auth_user":"test", "auth_pwd":"test", "num_results":10,"api_key":"test","index":"test","time_delta_minutes": 14400,"timestamp_field_name":"test", "map":{}, "cert_path":""}
with patch('elasticsearch.helpers.loadConfig', new=MagicMock(return_value=conf))as mock_yaml:
with patch('sys.stdout', new=StringIO()) as mock_cmd:
with patch('elasticsearch.analyze', new=MagicMock(return_value={'foo': 'bar'})) as mock:
sys.argv = ["cmd", "conf"]
elasticsearch.main()
expected = '{"foo": "bar"}\n'
self.assertEqual(mock_cmd.getvalue(), expected)
mock.assert_called_once()
mock_yaml.assert_called_once()
'''Test that checks for empty and none values in configurables'''
def test_checkConfigRequirements(self):
conf = {"base_url":"", "auth_user":"", "auth_pwd":"", "num_results":None,"api_key":"","index":"","time_delta_minutes": None,"timestamp_field_name":"", "map":{}, "cert_path":""}
with self.assertRaises(SystemExit) as cm:
elasticsearch.checkConfigRequirements(conf)
self.assertEqual(cm.exception.code, 126)
'''Test that checks buildReq method, by comparing a mock buildReq result with an expectedQuery, used a mock object to simulate an expectedQuery
since Elasticsearch buildReq uses values in the config'''
def test_buildReq(self):
numberOfResults = 1
observableType = "hash"
expectedQuery = {
"from": 0,
"size": numberOfResults,
"query": {
"bool": {
"must": [{
"wildcard": {
observableType: observableType,
},
}
],
"filter": {
"range": {
"@timestamp": {
"gte": ('2023-11-29T14:23:45'),
"lte": ('2023-11-29T14:23:45')
}
}
}
}
}
}
with patch('elasticsearch.buildReq', new=MagicMock(return_value=expectedQuery)) as mock:
response = elasticsearch.buildReq(observableType,numberOfResults)
self.assertEqual(json.dumps(response), json.dumps(expectedQuery))
mock.assert_called_once()
def test_wrongbuildReq(self):
result={'map':'123','artifactType':'hash','timestamp_field_name':'abc', 'time_delta_minutes':14400, 'num_results':10,'value':'0' }
cur_time = datetime.now()
start_time = cur_time - timedelta(minutes=result['time_delta_minutes'])
query=elasticsearch.buildReq(result, result)
comparequery=json.dumps({
"from": 0,
"size":10,
"query": {
"bool":{
"must": [{
"wildcard": {
'hash': result['value'],
},
}
],
"filter":{
"range":{
result['timestamp_field_name']:{
"gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'),
"lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S')
}
}
}
}
}
})
self.assertEqual(query, comparequery )
def test_rightbuildReq(self):
result={'map':{'hash':'testingHash'},'artifactType':'hash','timestamp_field_name':'abc', 'time_delta_minutes':14400, 'num_results':10,'value':'0'}
cur_time = datetime.now()
start_time = cur_time - timedelta(minutes=result['time_delta_minutes'])
query=elasticsearch.buildReq(result, result)
comparequery=json.dumps({
"from": 0,
"size": 10,
"query": {
"bool":{
"must":[{
"wildcard": {
result['map'][result['artifactType']]: result['value'],
},
}
]
,
"filter":{
"range":{
result['timestamp_field_name']:{
"gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'),
"lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S')
}
}
}
}
}
})
self.assertEqual(query, comparequery )
def test_rightbuildReq100result(self):
result={'map':{'hash':'testingHash'},'artifactType':'hash','timestamp_field_name':'abc', 'time_delta_minutes':14400, 'num_results':100,'value':'0'}
cur_time = datetime.now()
start_time = cur_time - timedelta(minutes=result['time_delta_minutes'])
query=elasticsearch.buildReq(result, result)
comparequery=json.dumps({
"from": 0,
"size": 100,
"query": {
"bool":{
"must":[{
"wildcard": {
result['map'][result['artifactType']]: result['value'],
},
}
]
,
"filter":{
"range":{
result['timestamp_field_name']:{
"gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'),
"lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S')
}
}
}
}
}
})
self.assertEqual(query, comparequery )
'''Test that checks sendReq method to expect a response from a requests.post'''
def test_sendReq(self):
conf = {"base_url":"test", "auth_user":"test", "auth_pwd":"test", "api_key":"test","index":"test", "cert_path":""}
with patch('requests.post', new=MagicMock(return_value=MagicMock())) as mock:
response = elasticsearch.sendReq(conf, 'example_query')
self.assertIsNotNone(response)
'''Test that checks prepareResults method, by comparing a mock prepareResults return_value with an expectedResult'''
def test_prepareResults(self):
summary = "Documents returned: 5"
status = 'info'
raw = {'_id': "0", "hash": "123"}
expectedResult = {'response': raw, 'summary': summary, 'status': status}
with patch('elasticsearch.prepareResults', new=MagicMock(return_value=expectedResult)) as mock:
response = elasticsearch.prepareResults(raw)
self.assertEqual(expectedResult, response)
mock.assert_called_once()
'''Test that checks analyze method, simulated sendReq and prepareResults with 2 mock objects and variables sendReqOutput and prepareResultOutput,
input created for analyze method call and then we compared results['summary'] with 'Documents returned: 5' '''
def test_analyze(self):
sendReqOutput = {'_id': "0", "hash": "123"}
input = '{"artifactType":"hash", "value":"123"}'
prepareResultOutput = {'response': {'_id': "0", "hash": "123"},'summary': "Documents returned: 5", 'status': 'info'}
conf = {"base_url":"test", "auth_user":"test", "auth_pwd":"test", "num_results":10,"api_key":"test","index":"test","time_delta_minutes": 14400,"timestamp_field_name":"test", "map":{}, "cert_path":"test"}
with patch('elasticsearch.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock:
with patch('elasticsearch.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2:
results = elasticsearch.analyze(conf, input)
self.assertEqual(results["summary"], "Documents returned: 5")
mock.assert_called_once()

View File

@@ -0,0 +1,3 @@
requests>=2.31.0
pyyaml>=6.0
urllib3>=2.1.0

View File

@@ -0,0 +1,5 @@
# Malwarebazaar
## Description
Submit a gimphash, hash, tlsh, telfhash to Malwarebazaar for analysis.

View File

@@ -0,0 +1,8 @@
{
"name": "Malwarebazaar",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "This analyzer queries Malwarebazaar to see if a hash, gimphash, tlsh, or telfhash is considered malicious.",
"supportedTypes" : ["gimphash","hash","tlsh", "telfhash"],
"baseUrl": "https://mb-api.abuse.ch/api/v1/"
}

View File

@@ -0,0 +1,157 @@
import requests
import helpers
import json
import sys
# supports querying for hash, gimphash, tlsh, and telfhash
# usage is as follows:
# python3 malwarebazaar.py '{"artifactType":"x", "value":"y"}'
def buildReq(observ_type, observ_value):
# determine correct query type to send based off of observable type
unique_types = {'gimphash': 1, 'telfhash': 1, 'tlsh': 1}
if observ_type in unique_types:
qtype = 'get_' + observ_type
else:
qtype = 'get_info'
return {'query': qtype, observ_type: observ_value}
def sendReq(meta, query):
# send a post request with our compiled query to the API
url = meta['baseUrl']
response = requests.post(url, query)
return response.json()
def isInJson(data, target_string, maxdepth):
# searches a JSON object for an occurance of a string
# recursively.
# depth limiter (arbitrary value of 1000)
if maxdepth > 1000:
return False
if isinstance(data, dict):
for key, value in data.items():
if isinstance(value, (dict, list)):
# recursive call
if isInJson(value, target_string, maxdepth + 1):
return True
elif isinstance(value, str) and target_string in value.lower():
# found target string
return True
elif isinstance(data, list):
for item in data:
if isinstance(item, (dict, list)):
# recursive call
if isInJson(item, target_string, maxdepth + 1):
return True
elif isinstance(item, str) and target_string in item.lower():
# found target string
return True
return False
def prepareResults(raw):
# parse raw API response, gauge threat level and return status and a short summary
if raw == {}:
status = 'caution'
summary = 'internal_failure'
elif raw['query_status'] == 'ok':
parsed = raw['data'][0]
vendor_data = parsed['vendor_intel']
# get summary
if parsed['signature']:
summary = parsed['signature']
elif parsed['tags']:
summary = str(parsed['tags'][0])
elif vendor_data['YOROI_YOMI']:
summary = vendor_data['YOROI_YOMI']['detection']
# gauge vendors to determine an approximation of status, normalized to a value out of 100
# only updates score if it finds a higher indicator value
score = 0
if 'vxCube' in vendor_data:
score = int(vendor_data['vxCube']['maliciousness'])
if 'Triage' in vendor_data:
score = int(vendor_data['Triage']['score'])*10 if int(
vendor_data['Triage']['score'])*10 > score else score
if 'DocGuard' in vendor_data:
score = int(vendor_data['DocGuard']['alertlevel'])*10 if int(
vendor_data['DocGuard']['alertlevel'])*10 > score else score
if 'YOROI_YOMI' in vendor_data:
score = int(float(vendor_data['YOROI_YOMI']['score']))*100 if int(
float(vendor_data['YOROI_YOMI']['score']))*100 > score else score
if 'Inquest' in vendor_data and vendor_data['Inquest']['verdict'] == 'MALICIOUS':
score = 100 if 100 > score else score
if 'ReversingLabs' in vendor_data and vendor_data['ReversingLabs']['status'] == 'MALICIOUS':
score = 100 if 100 > score else score
if 'Spamhaus_HBL' in vendor_data and vendor_data['Spamhaus_HBL'][0]['detection'] == 'MALICIOUS':
score = 100 if 100 > score else score
# compute status
if score >= 75 or isInJson(raw, 'MALICIOUS'.lower()):
# if score >= 75:
status = 'threat'
elif score >= 50:
status = 'caution'
elif score >= 25:
status = 'info'
else:
status = 'ok'
elif raw['query_status'] != 'ok':
status = 'info'
summary = 'no result'
return {'response': raw, 'summary': summary, 'status': status}
def analyze(input):
# put all of our methods together, pass them input, and return
# properly formatted json/python dict output
data = json.loads(input)
meta = helpers.loadMetadata(__file__)
helpers.checkSupportedType(meta, data["artifactType"])
if (data['artifactType'] == 'tlsh' or data['artifactType'] == 'gimphash' or data['artifactType'] == 'telfhash'):
# To get accurate reporting for TLSH, telfhash and gimphash, we deem it necessary to query
# twice for the sake of retrieving more specific data.
initialQuery = buildReq(data['artifactType'], data['value'])
initialRaw = sendReq(meta, initialQuery)
# To prevent double-querying when a tlsh/gimphash is invalid, this if statement is necessary.
if initialRaw['query_status'] == 'ok':
# Setting artifactType and value to our new re-query arguments
# to get a more detailed report.
data['artifactType'] = 'hash'
data['value'] = initialRaw['data'][0]['sha256_hash']
else:
return prepareResults(initialRaw)
query = buildReq(data['artifactType'], data['value'])
response = sendReq(meta, query)
return prepareResults(response)
def main():
if len(sys.argv) == 2:
results = analyze(sys.argv[1])
print(json.dumps(results))
else:
print("ERROR: Input is not in proper JSON format")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,66 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
import malwarebazaar
import unittest
class TestMalwarebazaarMethods(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.stdout', new=StringIO()) as mock_cmd:
sys.argv = ["cmd"]
malwarebazaar.main()
self.assertEqual(mock_cmd.getvalue(),
'ERROR: Input is not in proper JSON format\n')
def test_main_success(self):
with patch('sys.stdout', new=StringIO()) as mock_cmd:
with patch('malwarebazaar.analyze', new=MagicMock(return_value={'test': 'val'})) as mock:
sys.argv = ["cmd", "input"]
malwarebazaar.main()
expected = '{"test": "val"}\n'
self.assertEqual(mock_cmd.getvalue(), expected)
mock.assert_called_once()
def test_analyze(self):
"""simulated sendReq and prepareResults with 2 mock objects and variables sendReqOutput and prepareResultOutput,
input created for analyze method call and then we compared results['summary'] with 'no result' """
sendReqOutput = {'threat': 'no_result',"query_status":"ok",'data':[{'sha256_hash':'notavalidhash'}]}
input = '{"artifactType":"hash", "value":"1234"}'
input2 ='{"artifactType":"tlsh", "value":"1234"}'
input3='{"artifactType":"gimphash", "value":"1234"}'
prepareResultOutput = {'response': '',
'summary': 'no result', 'status': 'info'}
with patch('malwarebazaar.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock:
with patch('malwarebazaar.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2:
results = malwarebazaar.analyze(input)
results2 = malwarebazaar.analyze(input2)
results3 =malwarebazaar.analyze(input3)
self.assertEqual(results["summary"],prepareResultOutput['summary'])
self.assertEqual(results2["summary"], prepareResultOutput['summary'])
self.assertEqual(results3["summary"], prepareResultOutput['summary'])
self.assertEqual(results["status"], "info")
self.assertEqual(results2["status"], "info")
self.assertEqual(results3["status"], "info")
mock.assert_called()
def test_prepareResults_illegal_search_term(self):
# illegal search term
raw = {'query_status': 'illegal_search_term'}
expected = {'response': raw, 'status': 'info', 'summary': 'no result'}
results = malwarebazaar.prepareResults(raw)
self.assertEqual(results, expected)
def test_buildReqGimqhash(self):
result = malwarebazaar.buildReq('gimphash', '')
self.assertEqual(
result, {'query': 'get_gimphash', 'gimphash': ''})
def test_buildReqHash(self):
result = malwarebazaar.buildReq('hash', '')
self.assertEqual(
result, {'query': 'get_info', 'hash': ''})
def test_buildReqtlshhash(self):
result = malwarebazaar.buildReq('tlsh', '')
self.assertEqual(
result, {'query': 'get_tlsh', 'tlsh': ''})

View File

@@ -0,0 +1,2 @@
requests>=2.31.0
pyyaml>=6.0

View File

@@ -0,0 +1,6 @@
# Threatfox
## Description
Submit a domain, hash, IP, or URL to Threatfox for analysis.

View File

@@ -0,0 +1,2 @@
requests>=2.31.0
pyyaml>=6.0

View File

@@ -0,0 +1,10 @@
{
"name": "Threatfox",
"version": "0.1",
"author": "Security Onion Solutions",
"description": "This analyzer queries Threatfox to see if a domain, hash, or IP is considered malicious.",
"supportedTypes" : ["domain","hash","ip"],
"baseUrl": "https://threatfox-api.abuse.ch/api/v1/"
}

View File

@@ -0,0 +1,74 @@
import requests
import helpers
import json
import sys
def buildReq(observ_type, observ_value):
# supports hash, ip, and domain. determines which query type to send.
if observ_type == 'hash':
qterms = {'query': 'search_hash', 'hash': observ_value}
elif observ_type == 'ip' or observ_type == 'domain':
qterms = {'query': 'search_ioc', 'search_term': observ_value}
return qterms
def sendReq(meta, query):
# send a post request based off of our compiled query
url = meta['baseUrl']
response = requests.post(url, json.dumps(query))
return response.json()
def prepareResults(raw):
# gauge threat level based off of threatfox's confidence level
if raw != {} and raw['query_status'] == 'ok':
parsed = raw['data'][0]
# get summary
if parsed['threat_type'] != '':
summary = parsed['threat_type']
else:
summary = parsed['threat_type_desc']
if parsed['confidence_level'] > 75:
status = 'threat'
elif parsed['confidence_level'] > 50:
status = 'caution'
elif parsed['confidence_level'] > 25:
status = 'info'
else:
status = 'ok'
elif raw != {} and raw['query_status'] in ['no_result', 'illegal_search_term', 'illegl_hash']:
status = 'info'
summary = 'no result'
else:
raw = {}
status = 'caution'
summary = 'internal_failure'
results = {'response': raw, 'summary': summary, 'status': status}
return results
def analyze(input):
# put all of our methods together, pass them input, and return
# properly formatted json/python dict output
data = json.loads(input)
meta = helpers.loadMetadata(__file__)
helpers.checkSupportedType(meta, data["artifactType"])
query = buildReq(data['artifactType'], data['value'])
response = sendReq(meta, query)
return prepareResults(response)
def main():
if len(sys.argv) == 2:
results = analyze(sys.argv[1])
print(json.dumps(results))
else:
print("ERROR: Input is not in proper JSON format")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,121 @@
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
import threatfox
import unittest
class TestThreatfoxMethods(unittest.TestCase):
# This should 1. create a fake cmd input with no args
# and 2. hit the else statement in main. It then
# compares the console output to a hardcoded string.
# DOES NOT WORK WITH ARGPARSE/MAIN METHOD
def test_main_missing_input(self):
with patch('sys.stdout', new=StringIO()) as mock_cmd:
sys.argv = ["cmd"]
threatfox.main()
self.assertEqual(mock_cmd.getvalue(),
'ERROR: Input is not in proper JSON format\n')
# This should 1. create a fake cmd input with 1 arg
# and 2. hit the if statement in main which runs a mock
# analyze method with return value of {'test': 'val'}.
# threatfox.main() should then print that to the console,
# which is then asserted equal against an expected value.
def test_main_success(self):
with patch('sys.stdout', new=StringIO()) as mock_cmd:
with patch('threatfox.analyze', new=MagicMock(return_value={'test': 'val'})) as mock:
sys.argv = ["cmd", "input"]
threatfox.main()
expected = '{"test": "val"}\n'
self.assertEqual(mock_cmd.getvalue(), expected)
mock.assert_called_once()
# result stores the output of the buildReq method
# comparing result with expected output
def test_buildReqHash(self):
result = threatfox.buildReq('hash', '2151c4b970eff0071948dbbc19066aa4')
self.assertEqual(
result, {'query': 'search_hash', 'hash': '2151c4b970eff0071948dbbc19066aa4'})
def test_buildReqIP(self):
result = threatfox.buildReq('ip', '139.180.203.104:443')
self.assertEqual(
result, {'query': 'search_ioc', 'search_term': '139.180.203.104:443'})
def test_buildReqDomain(self):
result = threatfox.buildReq('domain', 'https://google.com')
self.assertEqual(
result, {'query': 'search_ioc', 'search_term': 'https://google.com'})
def test_buildReqFalse(self):
result = threatfox.buildReq('hash', '2151c4b970eff0071948dbbc19066aa4')
self.assertNotEqual(result, {})
# simulate API response and makes sure sendReq gives a response
# we are just checking if sendReq gives back anything
def test_sendReq(self):
with patch('requests.post', new=MagicMock(return_value=MagicMock())) as mock:
response = threatfox.sendReq(
{'baseUrl': 'https://www.randurl.xyz'}, 'example_data')
self.assertIsNotNone(response)
# result stores the output of the prepareResults method
# comparing result with expected output
def test_prepareResults_noinput(self):
# no/improper given input
raw = {}
sim_results = {'response': raw, 'status': 'caution',
'summary': 'internal_failure'}
results = threatfox.prepareResults(raw)
self.assertEqual(results, sim_results)
def test_prepareResults_none(self):
# no results
raw = {'query_status': 'no_result'}
sim_results = {'response': raw,
'status': 'info', 'summary': 'no result'}
results = threatfox.prepareResults(raw)
self.assertEqual(results, sim_results)
def test_prepareResults_illegal_search_term(self):
# illegal search term
raw = {'query_status': 'illegal_search_term'}
expected = {'response': raw, 'status': 'info', 'summary': 'no result'}
results = threatfox.prepareResults(raw)
self.assertEqual(results, expected)
def test_prepareResults_threat(self):
# threat exists
raw = {'query_status': 'ok', 'data': [
{'threat_type': 'threat', 'confidence_level': 94}]}
sim_results = {'response': raw,
'summary': 'threat', 'status': 'threat'}
results = threatfox.prepareResults(raw)
self.assertEqual(results, sim_results)
def test_prepareResults_error(self):
raw = {}
sim_results = {'response': raw, 'status': 'caution',
'summary': 'internal_failure'}
results = threatfox.prepareResults(raw)
self.assertEqual(results, sim_results)
def test_analyze(self):
"""simulated sendReq and prepareResults with 2 mock objects and variables sendReqOutput and prepareResultOutput,
input created for analyze method call and then we compared results['summary'] with 'no result' """
sendReqOutput = {'threat': 'no_result'}
input = '{"artifactType":"hash", "value":"1234"}'
prepareResultOutput = {'response': '',
'summary': 'no result', 'status': ''}
with patch('threatfox.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock:
with patch('threatfox.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2:
results = threatfox.analyze(input)
self.assertEqual(results["summary"], "no result")
mock.assert_called_once()

View File

@@ -38,6 +38,92 @@ sensoroni:
global: True
advanced: True
analyzers:
echotrail:
api_key:
description: API key for the Echotrail analyzer.
helpLink: sensoroni.html
global: False
sensitive: True
advanced: True
forcedType: string
base_url:
description: Base URL for the Echotrail analyzer.
helpLink: sensoroni.html
global: False
sensitive: False
advanced: True
forcedType: string
elasticsearch:
api_key:
description: API key for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: True
advanced: True
forcedType: string
base_url:
description: Connection URL for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: False
advanced: True
forcedType: string
auth_user:
description: Username for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: False
advanced: True
forcedType: string
auth_pwd:
description: User password for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: True
advanced: True
forcedType: string
num_results:
description: Number of documents to return for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: False
advanced: True
forcedType: string
index:
description: Search index for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: False
advanced: True
forcedType: string
time_delta_minutes:
description: Time (in minutes) to search back for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: False
advanced: True
forcedType: int
timestamp_field_name:
description: Specified name for a documents' timestamp field for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: False
advanced: True
forcedType: string
map:
description: Map between observable types and search field for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: False
advanced: True
forcedType: string
cert_path:
description: Path to a TLS certificate for the Elasticsearch analyzer.
helpLink: sensoroni.html
global: False
sensitive: False
advanced: True
forcedType: string
emailrep:
api_key:
description: API key for the EmailRep analyzer.