mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge pull request #12003 from HoangLongVu/2.4/dev
2.4/dev Analyzers for Threatfox, MalwareBazaar, Echotrail, Elasticsearch
This commit is contained in:
@@ -9,6 +9,20 @@ sensoroni:
|
||||
sensoronikey:
|
||||
soc_host:
|
||||
analyzers:
|
||||
echotrail:
|
||||
base_url: https://api.echotrail.io/insights/
|
||||
api_key:
|
||||
elasticsearch:
|
||||
base_url:
|
||||
auth_user:
|
||||
auth_pwd:
|
||||
num_results: 10
|
||||
api_key:
|
||||
index: _all
|
||||
time_delta_minutes: 14400
|
||||
timestamp_field_name: '@timestamp'
|
||||
map: {}
|
||||
cert_path:
|
||||
emailrep:
|
||||
base_url: https://emailrep.io/
|
||||
api_key:
|
||||
|
||||
@@ -9,13 +9,17 @@ The built-in analyzers support the following observable types:
|
||||
| Name | Domain | EML | Hash | IP | Mail | Other | URI | URL | User Agent |
|
||||
| ------------------------|--------|-------|-------|-------|-------|-------|-------|-------|-------|
|
||||
| Alienvault OTX |✓ |✗|✓|✓|✗|✗|✗|✓|✗|
|
||||
| EchoTrail |✗ |✗|✓|✗|✗|✓|✗|✗|✗|
|
||||
| EmailRep |✗ |✗|✗|✗|✓|✗|✗|✗|✗|
|
||||
| Elasticsearch |✓ |✓|✓|✓|✓|✓|✓|✓|✓|
|
||||
| Greynoise |✗ |✗|✗|✓|✗|✗|✗|✗|✗|
|
||||
| LocalFile |✓ |✗|✓|✓|✗|✓|✗|✓|✗|
|
||||
| Malware Hash Registry |✗ |✗|✓|✗|✗|✗|✗|✓|✗|
|
||||
| MalwareBazaar |✗ |✗|✓|✗|✗|✓|✗|✗|✗|
|
||||
| Pulsedive |✓ |✗|✓|✓|✗|✗|✓|✓|✓|
|
||||
| Spamhaus |✗ |✗|✗|✓|✗|✗|✗|✗|✗|
|
||||
| Sublime Platform |✗ |✓|✗|✗|✗|✗|✗|✗|✗|
|
||||
| ThreatFox |✓ |✗|✓|✓|✗|✗|✗|✗|✗|
|
||||
| Urlhaus |✗ |✗|✗|✗|✗|✗|✗|✓|✗|
|
||||
| Urlscan |✗ |✗|✗|✗|✗|✗|✗|✓|✗|
|
||||
| Virustotal |✓ |✗|✓|✓|✗|✗|✗|✓|✗|
|
||||
@@ -28,13 +32,17 @@ Many analyzers require authentication, via an API key or similar. The table belo
|
||||
| Name | Authn Req'd|
|
||||
--------------------------|------------|
|
||||
[AlienVault OTX](https://otx.alienvault.com/api) |✓|
|
||||
[EchoTrail](https://www.echotrail.io/docs/quickstart) |✓|
|
||||
[EmailRep](https://emailrep.io/key) |✓|
|
||||
[Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/setting-up-authentication.html) |✓|
|
||||
[GreyNoise](https://www.greynoise.io/plans/community) |✓|
|
||||
[LocalFile](https://github.com/Security-Onion-Solutions/securityonion/tree/fix/sublime_analyzer_documentation/salt/sensoroni/files/analyzers/localfile) |✗|
|
||||
[Malware Hash Registry](https://hash.cymru.com/docs_whois) |✗|
|
||||
[MalwareBazaar](https://bazaar.abuse.ch/) |✗|
|
||||
[Pulsedive](https://pulsedive.com/api/) |✓|
|
||||
[Spamhaus](https://www.spamhaus.org/dbl/) |✗|
|
||||
[Sublime Platform](https://sublime.security) |✓|
|
||||
[ThreatFox](https://threatfox.abuse.ch/) |✗|
|
||||
[Urlhaus](https://urlhaus.abuse.ch/) |✗|
|
||||
[Urlscan](https://urlscan.io/docs/api/) |✓|
|
||||
[VirusTotal](https://developers.virustotal.com/reference/overview) |✓|
|
||||
|
||||
25
salt/sensoroni/files/analyzers/echotrail/README.md
Normal file
25
salt/sensoroni/files/analyzers/echotrail/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# EchoTrail
|
||||
|
||||
|
||||
## Description
|
||||
Submit a filename, hash, commandline to EchoTrail for analysis
|
||||
|
||||
## Configuration Requirements
|
||||
|
||||
In SOC, navigate to `Administration`, toggle `Show all configurable settings, including advanced settings.`, and navigate to `sensoroni` -> `analyzers` -> `echotrail`.
|
||||
|
||||

|
||||
|
||||
|
||||
The following configuration options are available for:
|
||||
|
||||
``api_key`` - API key used for communication with the Echotrail API (Required)
|
||||
|
||||
This value should be set in the ``sensoroni`` pillar, like so:
|
||||
|
||||
```
|
||||
sensoroni:
|
||||
analyzers:
|
||||
echotrail:
|
||||
api_key: $yourapikey
|
||||
```
|
||||
10
salt/sensoroni/files/analyzers/echotrail/echotrail.json
Normal file
10
salt/sensoroni/files/analyzers/echotrail/echotrail.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "Echotrail",
|
||||
"version": "0.1",
|
||||
"author": "Security Onion Solutions",
|
||||
"description": "This analyzer queries Echotrail to see if a related filename, hash, or commandline is considered malicious.",
|
||||
"supportedTypes" : ["filename","hash","commandline"],
|
||||
"baseUrl": "https://api.echotrail.io/insights/"
|
||||
}
|
||||
|
||||
|
||||
67
salt/sensoroni/files/analyzers/echotrail/echotrail.py
Normal file
67
salt/sensoroni/files/analyzers/echotrail/echotrail.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import requests
|
||||
import helpers
|
||||
import argparse
|
||||
|
||||
|
||||
# for test usage:
|
||||
# python3 echotrail.py '{"artifactType":"hash", "value":"438b6ccd84f4dd32d9684ed7d58fd7d1e5a75fe3f3d12ab6c788e6bb0ffad5e7"}'
|
||||
# You will need to provide an API key in the .yaml file.
|
||||
def checkConfigRequirements(conf):
|
||||
if not conf['api_key']:
|
||||
sys.exit(126)
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def sendReq(conf, observ_value):
|
||||
# send a get requests using a user-provided API key and the API url
|
||||
url = conf['base_url'] + observ_value
|
||||
headers = {'x-api-key': conf['api_key']}
|
||||
response = requests.request('GET', url=url, headers=headers)
|
||||
return response.json()
|
||||
|
||||
|
||||
def prepareResults(raw):
|
||||
# checking for the 'filenames' key alone does
|
||||
# not work when querying by filename.
|
||||
# So, we can account for a hash query, a filename query,
|
||||
# and anything else with these if statements.
|
||||
if 'filenames' in raw.keys():
|
||||
summary = raw['filenames'][0][0]
|
||||
elif 'tags' in raw.keys():
|
||||
summary = raw['tags'][0][0]
|
||||
else:
|
||||
summary = 'inconclusive'
|
||||
status = 'info'
|
||||
return {'response': raw, 'summary': summary, 'status': status}
|
||||
|
||||
|
||||
def analyze(conf, input):
|
||||
# put all of our methods together and return a properly formatted output.
|
||||
checkConfigRequirements(conf)
|
||||
meta = helpers.loadMetadata(__file__)
|
||||
data = helpers.parseArtifact(input)
|
||||
helpers.checkSupportedType(meta, data['artifactType'])
|
||||
response = sendReq(conf, data['value'])
|
||||
return prepareResults(response)
|
||||
|
||||
|
||||
def main():
|
||||
dir = os.path.dirname(os.path.realpath(__file__))
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Search Echotrail for a given artifact')
|
||||
parser.add_argument(
|
||||
'artifact', help='the artifact represented in JSON format')
|
||||
parser.add_argument('-c', '--config', metavar='CONFIG_FILE', default=dir + '/echotrail.yaml',
|
||||
help='optional config file to use instead of the default config file')
|
||||
args = parser.parse_args()
|
||||
if args.artifact:
|
||||
results = analyze(helpers.loadConfig(args.config), args.artifact)
|
||||
print(json.dumps(results))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
3
salt/sensoroni/files/analyzers/echotrail/echotrail.yaml
Normal file
3
salt/sensoroni/files/analyzers/echotrail/echotrail.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
base_url: "{{ salt['pillar.get']('sensoroni:analyzers:echotrail:base_url', 'https://api.echotrail.io/insights/') }}"
|
||||
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:echotrail:api_key', '') }}"
|
||||
|
||||
78
salt/sensoroni/files/analyzers/echotrail/echotrail_test.py
Normal file
78
salt/sensoroni/files/analyzers/echotrail/echotrail_test.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from io import StringIO
|
||||
import sys
|
||||
from unittest.mock import patch, MagicMock
|
||||
import unittest
|
||||
import echotrail
|
||||
|
||||
|
||||
class TestEchoTrailMethods(unittest.TestCase):
|
||||
def test_main_success(self):
|
||||
with patch('sys.stdout', new=StringIO()) as mock_cmd:
|
||||
with patch('echotrail.analyze', new=MagicMock(return_value={'test': 'val'})) as mock:
|
||||
sys.argv = ["test", "test"]
|
||||
echotrail.main()
|
||||
expected = '{"test": "val"}\n'
|
||||
self.assertEqual(mock_cmd.getvalue(), expected)
|
||||
mock.assert_called_once()
|
||||
|
||||
def test_main_missing_input(self):
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||
sys.argv = ["cmd"]
|
||||
echotrail.main()
|
||||
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
|
||||
sysmock.assert_called_once()
|
||||
|
||||
def test_checkConfigRequirements(self):
|
||||
conf = {'base_url': 'https://www.randurl.xyz/', 'api_key': ''}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
echotrail.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
def test_sendReq(self):
|
||||
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
|
||||
response = echotrail.sendReq(conf={'base_url': 'https://www.randurl.xyz/', 'api_key': 'randkey'}, observ_value='example_data')
|
||||
self.assertIsNotNone(response)
|
||||
mock.assert_called_once()
|
||||
|
||||
def test_prepareResults_noinput(self):
|
||||
raw = {}
|
||||
sim_results = {'response': raw,
|
||||
'status': 'info', 'summary': 'inconclusive'}
|
||||
results = echotrail.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_none(self):
|
||||
raw = {'query_status': 'no_result'}
|
||||
sim_results = {'response': raw,
|
||||
'status': 'info', 'summary': 'inconclusive'}
|
||||
results = echotrail.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_filenames(self):
|
||||
raw = {'filenames': [["abc.exe", "def.exe"], ["abc.exe", "def.exe"]]}
|
||||
sim_results = {'response': raw,
|
||||
'status': 'info', 'summary': 'abc.exe'}
|
||||
results = echotrail.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_tags(self):
|
||||
raw = {'tags': [["tag1", "tag2"], ["tag1", "tag2"]]}
|
||||
sim_results = {'response': raw,
|
||||
'status': 'info', 'summary': 'tag1'}
|
||||
results = echotrail.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_analyze(self):
|
||||
sendReqOutput = {'threat': 'no_result'}
|
||||
input = '{"artifactType":"hash", "value":"1234"}'
|
||||
prepareResultOutput = {'response': '',
|
||||
'summary': 'inconclusive', 'status': 'info'}
|
||||
conf = {"api_key": "xyz"}
|
||||
|
||||
with patch('echotrail.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock:
|
||||
with patch('echotrail.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2:
|
||||
results = echotrail.analyze(conf, input)
|
||||
self.assertEqual(results["summary"], "inconclusive")
|
||||
mock2.assert_called_once()
|
||||
mock.assert_called_once()
|
||||
@@ -0,0 +1,2 @@
|
||||
requests>=2.31.0
|
||||
pyyaml>=6.0
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
58
salt/sensoroni/files/analyzers/elasticsearch/README.md
Normal file
58
salt/sensoroni/files/analyzers/elasticsearch/README.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Elasticsearch
|
||||
Elasticsearch returns an informational breakdown of the queried observable.
|
||||
|
||||
## Overview
|
||||
Elasticsearch facilitates queries within the user's database. User can use these observable type: hash, domain, file, filename, fqdn, gimphash, IP, mail, mail_subject, regexp, registry, telfhash, tlsh, uri_path, URL, and user-agent values.
|
||||
|
||||
## Description
|
||||
Configure and submit the field you want to search for in your database. Ex: domain, hash, IP, or URL
|
||||
|
||||
## Requirement
|
||||
An API key or User Credentials is necessary for utilizing Elasticsearch.
|
||||
|
||||
## Configuration Requirements
|
||||
|
||||
In SOC, navigate to `Administration`, toggle `Show all configurable settings, including advanced settings.`, and navigate to `sensoroni` -> `analyzers` -> `elasticsearch`.
|
||||
|
||||

|
||||
|
||||
|
||||
The following configuration options are available for:
|
||||
|
||||
``api_key`` - API key used for communication with the Elasticsearch API (Optional if auth_user and auth_pwd are used)
|
||||
|
||||
``auth_user`` - Username used for communication with Elasticsearch
|
||||
|
||||
``auth_pwd`` - Password used for communication with Elasticsearch
|
||||
|
||||
``base_url`` - URL that connect to Elasticsearch VM on port 9200. Example format :"https://<your IP address>:9200
|
||||
|
||||
``index`` - The index of the data in Elasticsearch database. Default value is _all.
|
||||
|
||||
``num_results`` - The max number of results will be displayed. Default value is 10.
|
||||
|
||||
``time_delta_minutes`` - Range of time the users want the data in minutes. The value is in minutes and will be converted to days. Defaults value is is 1440.
|
||||
|
||||
``timestamp_field_name`` - The name of your timestamp field name. Default value is @timestamp.
|
||||
|
||||
``map`` - This is the dictionary of the field name in the user's Elasticsearch database. Example value {"hash":"userhashfieldname"}. This value will map the Security Onion hash field name to user hash field name.
|
||||
|
||||
``cert_path`` - This is the path to the certificate in the host for authentication purpose (Required)
|
||||
|
||||
This value should be set in the ``sensoroni`` pillar, like so:
|
||||
|
||||
```
|
||||
sensoroni:
|
||||
analyzers:
|
||||
elasticsearch:
|
||||
base_url:$yourbase_url
|
||||
api_key: $yourapi_key
|
||||
numResults:$yournum_results
|
||||
auth_user:$yourauth_user
|
||||
auth_pwd:$yourauth_pwd
|
||||
index:$yourindex
|
||||
timeDeltaMinutes:$yourtime_delta_minutes
|
||||
timestampFieldName:$yourtimestamp_field_name
|
||||
cert_path:$yourcert_path
|
||||
map:$yourmap
|
||||
```
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "Elasticsearch",
|
||||
"version": "0.1",
|
||||
"author": "Security Onion Solutions",
|
||||
"description": "Queries an Elasticsearch instance for specified field values.",
|
||||
"supportedTypes": ["hash", "ip", "domain", "other"]
|
||||
}
|
||||
|
||||
|
||||
138
salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.py
Normal file
138
salt/sensoroni/files/analyzers/elasticsearch/elasticsearch.py
Normal file
@@ -0,0 +1,138 @@
|
||||
from datetime import datetime, timedelta
|
||||
import argparse
|
||||
import requests
|
||||
import helpers
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
# As it stands, this analyzer does not support querying for mixed-case fields without disregarding case completely.
|
||||
# So the current version will only support querying for all-lowercase alphanumerical values.
|
||||
|
||||
# default usage is:
|
||||
# python3 elasticsearch.py '{"artifactType":"hash", "value":"*"}'
|
||||
|
||||
# To use outside of a Security Onion box, pass in '-c test.yaml' at the end
|
||||
# of the above command to give this analyzer some test values. You may edit the
|
||||
# values in the test.yaml file freely.
|
||||
|
||||
|
||||
def checkConfigRequirements(conf):
|
||||
# if the user hasn't given valid configurables, quit.
|
||||
if not conf['num_results']:
|
||||
sys.exit(126)
|
||||
if not conf['time_delta_minutes']:
|
||||
sys.exit(126)
|
||||
if (not conf['auth_user'] or not conf['auth_pwd']) and not conf['api_key']:
|
||||
sys.exit(126)
|
||||
if not conf['index']:
|
||||
sys.exit(126)
|
||||
if not conf['base_url']:
|
||||
sys.exit(126)
|
||||
if not conf['timestamp_field_name']:
|
||||
sys.exit(126)
|
||||
if not conf['cert_path']:
|
||||
sys.exit(126)
|
||||
return True
|
||||
|
||||
|
||||
def buildReq(conf, input):
|
||||
# structure a query to send to the Elasticsearch machine
|
||||
# based off of user configurable values
|
||||
num_results = conf['num_results']
|
||||
|
||||
if conf['map'] is not None:
|
||||
mappings = conf['map']
|
||||
else:
|
||||
mappings = dict()
|
||||
|
||||
cur_time = datetime.now()
|
||||
start_time = cur_time - timedelta(minutes=int(conf['time_delta_minutes']))
|
||||
|
||||
if input['artifactType'] in mappings:
|
||||
type = mappings[input['artifactType']]
|
||||
else:
|
||||
type = input['artifactType']
|
||||
|
||||
query = {
|
||||
"from": 0,
|
||||
"size": num_results,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [{
|
||||
"wildcard": {
|
||||
type: input['value'],
|
||||
},
|
||||
}
|
||||
],
|
||||
"filter": {
|
||||
"range": {
|
||||
conf['timestamp_field_name']: {
|
||||
"gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'),
|
||||
"lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return json.dumps(query)
|
||||
|
||||
|
||||
def sendReq(conf, query):
|
||||
# send configured query with even more user specification
|
||||
headers = {}
|
||||
url = conf['base_url'] + conf['index'] + '/_search'
|
||||
uname = conf['auth_user']
|
||||
pwd = conf['auth_pwd']
|
||||
apikey = conf['api_key']
|
||||
cert_path = conf['cert_path']
|
||||
|
||||
if pwd and uname:
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
}
|
||||
response = requests.post(str(url), auth=(
|
||||
uname, pwd), verify=cert_path, data=query, headers=headers)
|
||||
elif apikey:
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': f"Apikey {apikey}"
|
||||
}
|
||||
response = requests.post(
|
||||
str(url), verify=cert_path, data=query, headers=headers)
|
||||
|
||||
return response.json()
|
||||
|
||||
|
||||
def prepareResults(raw):
|
||||
# returns raw API response, amount of hits found, and status of request in order
|
||||
summary = f"Documents returned: {len(raw['hits']['hits'])}"
|
||||
status = 'info'
|
||||
return {'response': raw, 'summary': summary, 'status': status}
|
||||
|
||||
|
||||
def analyze(conf, input):
|
||||
checkConfigRequirements(conf)
|
||||
data = json.loads(input)
|
||||
query = buildReq(conf, data)
|
||||
response = sendReq(conf, query)
|
||||
return prepareResults(response)
|
||||
|
||||
|
||||
def main():
|
||||
dir = os.path.dirname(os.path.realpath(__file__))
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Search Elasticsearch for a given artifact?')
|
||||
parser.add_argument('artifact', help='required artifact')
|
||||
parser.add_argument('-c', '--config', metavar='CONFIG_FILE', default=dir + '/elasticsearch.yaml',
|
||||
help='optional config file to use instead of the default config file')
|
||||
args = parser.parse_args()
|
||||
if args.artifact:
|
||||
results = analyze(helpers.loadConfig(args.config), args.artifact)
|
||||
print(json.dumps(results))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,10 @@
|
||||
base_url: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:base_url', '') }}"
|
||||
auth_user: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:auth_user', '') }}"
|
||||
auth_pwd: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:auth_pwd', '') }}"
|
||||
num_results: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:num_results', 10) }}"
|
||||
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:api_key', '') }}"
|
||||
index: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:index', '_all') }}"
|
||||
time_delta_minutes: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:time_delta_minutes', 14400) }}"
|
||||
timestamp_field_name: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:timestamp_field_name', '@timestamp') }}"
|
||||
map: {{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:map', '') }}
|
||||
cert_path: "{{ salt['pillar.get']('sensoroni:analyzers:elasticsearch:cert_path', '') }}"
|
||||
@@ -0,0 +1,251 @@
|
||||
from io import StringIO
|
||||
import sys
|
||||
from unittest.mock import patch, MagicMock
|
||||
import unittest
|
||||
import elasticsearch
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
|
||||
class TestElasticSearchMethods(unittest.TestCase):
|
||||
|
||||
'''Test that the analyzer main method work as expect when not given enough input'''
|
||||
def test_main_missing_input(self):
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||
sys.argv = ["cmd"]
|
||||
elasticsearch.main()
|
||||
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
|
||||
sysmock.assert_called_once_with(2)
|
||||
|
||||
'''Test that analyzer main method work as expect when all required input is given'''
|
||||
def test_main_success(self):
|
||||
conf = {"base_url": "test", "auth_user": "test", "auth_pwd": "test", "api_key": "test", "index": "test", "time_delta_minutes": 14400, "map": {}, "cert_path": ""}
|
||||
with patch('elasticsearch.helpers.loadConfig', new=MagicMock(return_value=conf))as mock_yaml:
|
||||
with patch('sys.stdout', new=StringIO()) as mock_cmd:
|
||||
with patch('elasticsearch.analyze', new=MagicMock(return_value={'foo': 'bar'})) as mock:
|
||||
sys.argv = ["cmd", "conf"]
|
||||
elasticsearch.main()
|
||||
expected = '{"foo": "bar"}\n'
|
||||
self.assertEqual(mock_cmd.getvalue(), expected)
|
||||
mock.assert_called_once()
|
||||
mock_yaml.assert_called_once()
|
||||
|
||||
'''Test that checks for empty and none values in configurables'''
|
||||
def test_checkConfigRequirements_no_num_results(self):
|
||||
conf = {"base_url": "https://baseurl", "auth_user": "test",
|
||||
"auth_pwd": "test", "num_results": None, "api_key": "abcd1234",
|
||||
"index": "_all", "time_delta_minutes": 12345, "timestamp_field_name": "@timestamp",
|
||||
"map": {"test": "test"}, "cert_path": "/cert"}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
elasticsearch.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
def test_checkConfigRequirements_no_delta(self):
|
||||
conf = {"base_url": "https://baseurl", "auth_user": "test",
|
||||
"auth_pwd": "test", "num_results": 1, "api_key": "abcd1234",
|
||||
"index": "_all", "time_delta_minutes": None, "timestamp_field_name": "@timestamp",
|
||||
"map": {"test": "test"}, "cert_path": "/cert"}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
elasticsearch.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
def test_checkConfigRequirements_no_auth_user(self):
|
||||
conf = {"base_url": "https://baseurl", "auth_user": None, "auth_pwd": "test",
|
||||
"num_results": "1", "api_key": None, "index": "_all", "time_delta_minutes": 12345,
|
||||
"timestamp_field_name": "@timestamp", "map": {"test": "test"}, "cert_path": "/cert"}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
elasticsearch.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
'''Test that checks buildReq method, by comparing a mock buildReq result with an expectedQuery, used a mock object to simulate an expectedQuery
|
||||
since Elasticsearch buildReq uses values in the config'''
|
||||
|
||||
def test_checkConfigRequirements_no_index(self):
|
||||
conf = {"base_url": "https://baseurl", "auth_user": "test", "auth_pwd": "test",
|
||||
"num_results": "1", "api_key": "abcd1234", "index": None, "time_delta_minutes": 12345,
|
||||
"timestamp_field_name": "@timestamp", "map": {"test": "test"}, "cert_path": "/cert"}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
elasticsearch.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
def test_checkConfigRequirements_no_base_url(self):
|
||||
conf = {"base_url": None, "auth_user": "test", "auth_pwd": "test", "num_results": "1",
|
||||
"api_key": "abcd1234", "index": "_all", "time_delta_minutes": 12345,
|
||||
"timestamp_field_name": "@timestamp", "map": {"test": "test"}, "cert_path": "/cert"}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
elasticsearch.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
def test_checkConfigRequirements_no_timestamp_field_name(self):
|
||||
conf = {"base_url": "https://baseurl", "auth_user": "test", "auth_pwd": "test", "num_results": "1",
|
||||
"api_key": "abcd1234", "index": "_all", "time_delta_minutes": 12345,
|
||||
"timestamp_field_name": None, "map": {"test": "test"}, "cert_path": "/cert"}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
elasticsearch.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
def test_checkConfigRequirements_no_cert_path(self):
|
||||
conf = {"base_url": "https://baseurl", "auth_user": "test", "auth_pwd": "test", "num_results": "1",
|
||||
"api_key": "abcd1234", "index": "_all", "time_delta_minutes": 12345, "timestamp_field_name": "@timestamp",
|
||||
"map": {"test": "test"}, "cert_path": None}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
elasticsearch.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
'''Test that checks buildReq method, by comparing a mock buildReq result with an expectedQuery, used a mock object to simulate an expectedQuery
|
||||
since Elasticsearch buildReq uses values in the config'''
|
||||
|
||||
def test_buildReq(self):
|
||||
numberOfResults = 1
|
||||
observableType = "hash"
|
||||
expectedQuery = {
|
||||
"from": 0,
|
||||
"size": numberOfResults,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [{
|
||||
"wildcard": {
|
||||
observableType: observableType,
|
||||
},
|
||||
}
|
||||
],
|
||||
"filter": {
|
||||
"range": {
|
||||
"@timestamp": {
|
||||
"gte": ('2023-11-29T14:23:45'),
|
||||
"lte": ('2023-11-29T14:23:45')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
with patch('elasticsearch.buildReq', new=MagicMock(return_value=expectedQuery)) as mock:
|
||||
response = elasticsearch.buildReq(observableType, numberOfResults)
|
||||
self.assertEqual(json.dumps(response), json.dumps(expectedQuery))
|
||||
mock.assert_called_once()
|
||||
|
||||
def test_wrongbuildReq(self):
|
||||
mapping = None
|
||||
result = {'map': mapping, 'artifactType': 'hash', 'timestamp_field_name': 'abc', 'time_delta_minutes': 14400, 'num_results': 10, 'value': '0'}
|
||||
cur_time = datetime.now()
|
||||
start_time = cur_time - timedelta(minutes=result['time_delta_minutes'])
|
||||
query = elasticsearch.buildReq(result, result)
|
||||
comparequery = json.dumps({
|
||||
"from": 0,
|
||||
"size": 10,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [{
|
||||
"wildcard": {
|
||||
'hash': result['value'],
|
||||
},
|
||||
}
|
||||
],
|
||||
"filter": {
|
||||
"range": {
|
||||
result['timestamp_field_name']: {
|
||||
"gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'),
|
||||
"lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
self.assertEqual(query, comparequery)
|
||||
|
||||
def test_rightbuildReq(self):
|
||||
result = {'map': {'hash': 'testingHash'}, 'artifactType': 'hash', 'timestamp_field_name': 'abc', 'time_delta_minutes': 14400, 'num_results': 10, 'value': '0'}
|
||||
cur_time = datetime.now()
|
||||
start_time = cur_time - timedelta(minutes=result['time_delta_minutes'])
|
||||
query = elasticsearch.buildReq(result, result)
|
||||
comparequery = json.dumps({
|
||||
"from": 0,
|
||||
"size": 10,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [{
|
||||
"wildcard": {
|
||||
result['map'][result['artifactType']]: result['value'],
|
||||
},
|
||||
}],
|
||||
"filter": {
|
||||
"range": {
|
||||
result['timestamp_field_name']: {
|
||||
"gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'),
|
||||
"lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
self.assertEqual(query, comparequery)
|
||||
|
||||
def test_rightbuildReq100result(self):
|
||||
result = {'map': {'hash': 'testingHash'}, 'artifactType': 'hash', 'timestamp_field_name': 'abc', 'time_delta_minutes': 14400, 'num_results': 100, 'value': '0'}
|
||||
cur_time = datetime.now()
|
||||
start_time = cur_time - timedelta(minutes=result['time_delta_minutes'])
|
||||
query = elasticsearch.buildReq(result, result)
|
||||
comparequery = json.dumps({
|
||||
"from": 0,
|
||||
"size": 100,
|
||||
"query": {
|
||||
"bool": {
|
||||
"must": [{
|
||||
"wildcard": {
|
||||
result['map'][result['artifactType']]: result['value'],
|
||||
},
|
||||
}],
|
||||
"filter": {
|
||||
"range": {
|
||||
result['timestamp_field_name']: {
|
||||
"gte": start_time.strftime('%Y-%m-%dT%H:%M:%S'),
|
||||
"lte": cur_time.strftime('%Y-%m-%dT%H:%M:%S')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
self.assertEqual(query, comparequery)
|
||||
|
||||
'''Test that checks sendReq method to expect a response from a requests.post'''
|
||||
def test_sendReq_user_password(self):
|
||||
conf = {"base_url": "test", "auth_user": "test", "auth_pwd": "test", "api_key": "test", "index": "test", "cert_path": ""}
|
||||
with patch('requests.post', new=MagicMock(return_value=MagicMock())) as mock:
|
||||
response = elasticsearch.sendReq(conf, 'example_query')
|
||||
self.assertIsNotNone(response)
|
||||
mock.assert_called_once
|
||||
|
||||
def test_sendReq_apikey(self):
|
||||
conf = {"base_url": "test", "auth_user": None, "auth_pwd": None, "api_key": "abcd1234", "index": "test", "cert_path": ""}
|
||||
with patch('requests.post', new=MagicMock(return_value=MagicMock())) as mock:
|
||||
response = elasticsearch.sendReq(conf, 'example_query')
|
||||
self.assertIsNotNone(response)
|
||||
mock.assert_called_once
|
||||
|
||||
'''Test that checks prepareResults method, by comparing a mock prepareResults return_value with an expectedResult'''
|
||||
def test_prepareResults(self):
|
||||
raw = {"hits": {"hits": [{"_id": 0, "hash": "123"}]}}
|
||||
results = elasticsearch.prepareResults(raw)
|
||||
self.assertEqual(results["response"], raw)
|
||||
self.assertEqual(results["summary"], "Documents returned: 1")
|
||||
self.assertEqual(results["status"], "info")
|
||||
|
||||
'''Test that checks analyze method, simulated sendReq and prepareResults with 2 mock objects and variables sendReqOutput and prepareResultOutput,
|
||||
input created for analyze method call and then we compared results['summary'] with 'Documents returned: 5' '''
|
||||
def test_analyze(self):
|
||||
sendReqOutput = {'_id': "0", "hash": "123"}
|
||||
input = '{"artifactType": "hash", "value": "123"}'
|
||||
prepareResultOutput = {'response': {'_id': "0", "hash": "123"}, 'summary': "Documents returned: 5", 'status': 'info'}
|
||||
conf = {"base_url": "test", "auth_user": "test", "auth_pwd": "test", "num_results": 10, "api_key": "test", "index": "test",
|
||||
"time_delta_minutes": 14400, "timestamp_field_name": "test", "map": {}, "cert_path": "test"}
|
||||
with patch('elasticsearch.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock:
|
||||
with patch('elasticsearch.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2:
|
||||
results = elasticsearch.analyze(conf, input)
|
||||
self.assertEqual(results["summary"], "Documents returned: 5")
|
||||
mock.assert_called_once()
|
||||
mock2.assert_called_once()
|
||||
@@ -0,0 +1,3 @@
|
||||
requests>=2.31.0
|
||||
pyyaml>=6.0
|
||||
urllib3>=2.1.0
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
5
salt/sensoroni/files/analyzers/malwarebazaar/README.md
Normal file
5
salt/sensoroni/files/analyzers/malwarebazaar/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Malwarebazaar
|
||||
|
||||
## Description
|
||||
Submit a gimphash, hash, tlsh, telfhash to Malwarebazaar for analysis.
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "Malwarebazaar",
|
||||
"version": "0.1",
|
||||
"author": "Security Onion Solutions",
|
||||
"description": "This analyzer queries Malwarebazaar to see if a hash, gimphash, tlsh, or telfhash is considered malicious.",
|
||||
"supportedTypes" : ["gimphash","hash","tlsh", "telfhash"],
|
||||
"baseUrl": "https://mb-api.abuse.ch/api/v1/"
|
||||
}
|
||||
156
salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar.py
Executable file
156
salt/sensoroni/files/analyzers/malwarebazaar/malwarebazaar.py
Executable file
@@ -0,0 +1,156 @@
|
||||
import requests
|
||||
import helpers
|
||||
import json
|
||||
import sys
|
||||
|
||||
# supports querying for hash, gimphash, tlsh, and telfhash
|
||||
# usage is as follows:
|
||||
# python3 malwarebazaar.py '{"artifactType":"x", "value":"y"}'
|
||||
|
||||
|
||||
def buildReq(observ_type, observ_value):
|
||||
# determine correct query type to send based off of observable type
|
||||
unique_types = {'gimphash': 1, 'telfhash': 1, 'tlsh': 1}
|
||||
if observ_type in unique_types:
|
||||
qtype = 'get_' + observ_type
|
||||
else:
|
||||
qtype = 'get_info'
|
||||
return {'query': qtype, observ_type: observ_value}
|
||||
|
||||
|
||||
def sendReq(meta, query):
|
||||
# send a post request with our compiled query to the API
|
||||
url = meta['baseUrl']
|
||||
response = requests.post(url, query)
|
||||
return response.json()
|
||||
|
||||
|
||||
def isInJson(data, target_string, maxdepth=1000, tail=0):
|
||||
# searches a JSON object for an occurance of a string
|
||||
# recursively.
|
||||
# depth limiter (arbitrary default value of 1000)
|
||||
if tail > maxdepth:
|
||||
return False
|
||||
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
if isinstance(value, (dict, list)):
|
||||
# recursive call
|
||||
if isInJson(value, target_string, maxdepth, tail + 1):
|
||||
return True
|
||||
elif isinstance(value, str) and target_string in value.lower():
|
||||
# found target string
|
||||
return True
|
||||
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
if isinstance(item, (dict, list)):
|
||||
# recursive call
|
||||
if isInJson(item, target_string, maxdepth, tail + 1):
|
||||
return True
|
||||
elif isinstance(item, str) and target_string in item.lower():
|
||||
# found target string
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def prepareResults(raw):
|
||||
# parse raw API response, gauge threat level
|
||||
# and return status and a short summary
|
||||
if raw == {}:
|
||||
status = 'caution'
|
||||
summary = 'internal_failure'
|
||||
elif raw['query_status'] == 'ok':
|
||||
parsed = raw['data'][0]
|
||||
vendor_data = parsed['vendor_intel']
|
||||
|
||||
# get summary
|
||||
if 'signature' in parsed:
|
||||
summary = parsed['signature']
|
||||
elif 'tags' in parsed:
|
||||
summary = str(parsed['tags'][0])
|
||||
elif 'YOROI_YOMI' in vendor_data:
|
||||
summary = vendor_data['YOROI_YOMI']['detection']
|
||||
|
||||
# gauge vendors to determine an approximation of status,
|
||||
# normalized to a value out of 100
|
||||
# only updates score if it finds a higher indicator value
|
||||
score = 0
|
||||
vendor_info_list = [
|
||||
('vxCube', 'maliciousness', int),
|
||||
('Triage', 'score', lambda x: int(x) * 10),
|
||||
('DocGuard', 'alertlevel', lambda x: int(x) * 10),
|
||||
('YOROI_YOMI', 'score', lambda x: int(float(x)) * 100),
|
||||
('Inquest', 'verdict', lambda x: 100 if x == 'MALICIOUS' else 0),
|
||||
('ReversingLabs', 'status',
|
||||
lambda x: 100 if x == 'MALICIOUS' else 0),
|
||||
('Spamhaus_HBL', 'detection',
|
||||
lambda x: 100 if x == 'MALICIOUS' else 0),
|
||||
]
|
||||
for vendor, key, transform in vendor_info_list:
|
||||
if vendor in vendor_data and key in vendor_data[vendor]:
|
||||
value = vendor_data[vendor][key]
|
||||
score = max(score, transform(value))
|
||||
# Ensure score is at least 0 (or some default value)
|
||||
score = max(score, 0)
|
||||
|
||||
# compute status
|
||||
if score >= 75 or isInJson(raw, 'MALICIOUS'.lower(), 1001):
|
||||
# if score >= 75:
|
||||
status = 'threat'
|
||||
elif score >= 50:
|
||||
status = 'caution'
|
||||
elif score >= 25:
|
||||
status = 'info'
|
||||
|
||||
else:
|
||||
status = 'ok'
|
||||
elif raw['query_status'] != 'ok':
|
||||
status = 'info'
|
||||
summary = 'no result'
|
||||
|
||||
return {'response': raw, 'summary': summary, 'status': status}
|
||||
|
||||
|
||||
def analyze(input):
|
||||
# put all of our methods together, pass them input, and return
|
||||
# properly formatted json/python dict output
|
||||
data = json.loads(input)
|
||||
meta = helpers.loadMetadata(__file__)
|
||||
helpers.checkSupportedType(meta, data["artifactType"])
|
||||
|
||||
if (data['artifactType'] == 'tlsh' or data['artifactType'] == 'gimphash'
|
||||
or data['artifactType'] == 'telfhash'):
|
||||
# To get accurate reporting for TLSH, telfhash and gimphash,
|
||||
# we deem it necessary to query
|
||||
# twice for the sake of retrieving more specific data.
|
||||
|
||||
initialQuery = buildReq(data['artifactType'], data['value'])
|
||||
initialRaw = sendReq(meta, initialQuery)
|
||||
|
||||
# To prevent double-querying when a tlsh/gimphash is invalid,
|
||||
# this if statement is necessary.
|
||||
if initialRaw['query_status'] == 'ok':
|
||||
# Setting artifactType and value to our new re-query arguments
|
||||
# to get a more detailed report.
|
||||
data['artifactType'] = 'hash'
|
||||
data['value'] = initialRaw['data'][0]['sha256_hash']
|
||||
else:
|
||||
return prepareResults(initialRaw)
|
||||
|
||||
query = buildReq(data['artifactType'], data['value'])
|
||||
response = sendReq(meta, query)
|
||||
return prepareResults(response)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) == 2:
|
||||
results = analyze(sys.argv[1])
|
||||
print(json.dumps(results))
|
||||
else:
|
||||
print("ERROR: Input is not in proper JSON format")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,245 @@
|
||||
from io import StringIO
|
||||
import sys
|
||||
from unittest.mock import patch, MagicMock
|
||||
from malwarebazaar import malwarebazaar
|
||||
import unittest
|
||||
|
||||
|
||||
class TestMalwarebazaarMethods(unittest.TestCase):
|
||||
def test_main_missing_input(self):
|
||||
with patch('sys.stdout', new=StringIO()) as mock_cmd:
|
||||
sys.argv = ["cmd"]
|
||||
malwarebazaar.main()
|
||||
self.assertEqual(mock_cmd.getvalue(),
|
||||
'ERROR: Input is not in proper JSON format\n')
|
||||
|
||||
def test_main_success(self):
|
||||
with patch('sys.stdout', new=StringIO()) as mock_cmd:
|
||||
with patch('malwarebazaar.malwarebazaar.analyze',
|
||||
new=MagicMock(return_value={'test': 'val'})) as mock:
|
||||
sys.argv = ["cmd", "input"]
|
||||
malwarebazaar.main()
|
||||
expected = '{"test": "val"}\n'
|
||||
self.assertEqual(mock_cmd.getvalue(), expected)
|
||||
mock.assert_called_once()
|
||||
|
||||
def test_isInJson_tail_greater_than_max_depth(self):
|
||||
max_depth = 1000
|
||||
tail = 2000
|
||||
test_string = "helo"
|
||||
input_json = {
|
||||
"value": "test",
|
||||
"test": "value",
|
||||
"arr": ["Foo", "Bar", "Hello"],
|
||||
"dict1": {"key1": "val", "key2": "helo"}
|
||||
}
|
||||
self.assertEqual(malwarebazaar.isInJson(input_json, test_string, max_depth, tail), False)
|
||||
|
||||
def test_isInJson_string_found_in_dict(self):
|
||||
test_string = "helo"
|
||||
input_json = {
|
||||
"value": "test",
|
||||
"test": "value",
|
||||
"arr": ["Foo", "Bar", "Hello"],
|
||||
"dict1": {"key1": "val", "key2": "helo"}
|
||||
}
|
||||
self.assertEqual(malwarebazaar.isInJson(input_json, test_string), True)
|
||||
|
||||
def test_isInJson_dict_in_list(self):
|
||||
max_depth = 1000
|
||||
tail = 1
|
||||
test_string = "helo"
|
||||
input_json = {
|
||||
"key1": "test",
|
||||
"key2": "value",
|
||||
"key3": ["Foo", "Bar", "Hello"],
|
||||
"nested_list": [{"key1": "val", "key2": "helo"}]
|
||||
}
|
||||
self.assertEqual(malwarebazaar.isInJson(input_json, test_string, max_depth, tail), True)
|
||||
|
||||
def test_isInJson_string_found_in_arr(self):
|
||||
test_string = "helo"
|
||||
input_json = {
|
||||
"value": "test",
|
||||
"test": "value",
|
||||
"arr": ["Foo", "Bar", "helo"],
|
||||
"dict1": {"Hello": "val", "key": "val"}
|
||||
}
|
||||
self.assertEqual(malwarebazaar.isInJson(input_json, test_string), True)
|
||||
|
||||
def test_isInJson_string_not_found(self):
|
||||
test_string = "ValNotInJSON"
|
||||
input_json = {
|
||||
"value": "test",
|
||||
"test": "value",
|
||||
"arr": ["Foo", "Bar", "helo"],
|
||||
"dict1": {"Hello": "val", "key": "val"}
|
||||
}
|
||||
self.assertEqual(malwarebazaar.isInJson(input_json, test_string), False)
|
||||
|
||||
def test_analyze(self):
|
||||
"""simulated sendReq and prepareResults with 2 mock objects
|
||||
and variables sendReqOutput and prep_res_sim,
|
||||
input created for analyze method call
|
||||
and then we compared results['summary'] with 'no result' """
|
||||
sendReqOutput = {'threat': 'no_result', "query_status": "ok",
|
||||
'data': [{'sha256_hash': 'notavalidhash'}]}
|
||||
input = '{"artifactType": "hash", "value": "1234"}'
|
||||
input2 = '{"artifactType": "tlsh", "value": "1234"}'
|
||||
input3 = '{"artifactType": "gimphash", "value": "1234"}'
|
||||
prep_res_sim = {'response': '',
|
||||
'summary': 'no result', 'status': 'info'}
|
||||
|
||||
with patch('malwarebazaar.malwarebazaar.sendReq',
|
||||
new=MagicMock(return_value=sendReqOutput)) as mock:
|
||||
with patch('malwarebazaar.malwarebazaar.prepareResults',
|
||||
new=MagicMock(return_value=prep_res_sim)) as mock2:
|
||||
results = malwarebazaar.analyze(input)
|
||||
results2 = malwarebazaar.analyze(input2)
|
||||
results3 = malwarebazaar.analyze(input3)
|
||||
self.assertEqual(results["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results2["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results3["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results["status"], "info")
|
||||
self.assertEqual(results2["status"], "info")
|
||||
self.assertEqual(results3["status"], "info")
|
||||
mock2.assert_called()
|
||||
mock.assert_called()
|
||||
|
||||
def test_analyze_result(self):
|
||||
"""simulated sendReq and prepareResults with 2 mock objects
|
||||
and variables sendReqOutput and prep_res_sim,
|
||||
input created for analyze method call
|
||||
and then we compared results['summary'] with 'no result' """
|
||||
sendReqOutput = {'threat': 'threat', "query_status": "notok", 'data': [
|
||||
{'sha256_hash': 'validhash'}]}
|
||||
input = '{"artifactType": "hash", "value": "1234"}'
|
||||
input2 = '{"artifactType": "tlsh", "value": "1234"}'
|
||||
input3 = '{"artifactType": "gimphash", "value": "1234"}'
|
||||
prep_res_sim = {'response': '',
|
||||
'summary': 'Bad', 'status': 'threat'}
|
||||
|
||||
with patch('malwarebazaar.malwarebazaar.sendReq',
|
||||
new=MagicMock(return_value=sendReqOutput)) as mock:
|
||||
with patch('malwarebazaar.malwarebazaar.prepareResults',
|
||||
new=MagicMock(return_value=prep_res_sim)) as mock2:
|
||||
results = malwarebazaar.analyze(input)
|
||||
results2 = malwarebazaar.analyze(input2)
|
||||
results3 = malwarebazaar.analyze(input3)
|
||||
self.assertEqual(results["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results2["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results3["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results["status"], "threat")
|
||||
self.assertEqual(results2["status"], "threat")
|
||||
self.assertEqual(results3["status"], "threat")
|
||||
mock2.assert_called()
|
||||
mock.assert_called()
|
||||
|
||||
def test_prepareResults_illegal_search_term(self):
|
||||
# illegal search term
|
||||
raw = {'query_status': 'illegal_search_term'}
|
||||
expected = {'response': raw, 'status': 'info', 'summary': 'no result'}
|
||||
results = malwarebazaar.prepareResults(raw)
|
||||
self.assertEqual(results, expected)
|
||||
|
||||
def test_prepareResults_empty(self):
|
||||
# raw is empty
|
||||
raw = {}
|
||||
expected = {'response': raw, 'status': 'caution',
|
||||
'summary': 'internal_failure'}
|
||||
results = malwarebazaar.prepareResults(raw)
|
||||
self.assertEqual(results, expected)
|
||||
|
||||
def test_prepareResults_threat(self):
|
||||
raw = {'query_status': 'ok', 'data': [{'sha256_hash': 'validhash',
|
||||
'vendor_intel':
|
||||
{'ReversingLabs':
|
||||
{'status':
|
||||
'MALICIOUS'}},
|
||||
'signature': 'abcd1234',
|
||||
'tags': ['tag1']}]}
|
||||
expected = {'response': raw, 'status': 'threat', 'summary': 'abcd1234'}
|
||||
results = malwarebazaar.prepareResults(raw)
|
||||
self.assertEqual(results, expected)
|
||||
|
||||
def test_prepareResults_caution(self):
|
||||
# raw is empty
|
||||
raw = {'query_status': 'ok', 'data': [{'sha256_hash': 'validhash',
|
||||
'vendor_intel':
|
||||
{'Triage': {'score': '6'}},
|
||||
'signature': 'abcd1234',
|
||||
'tags': ['tag1']}]}
|
||||
expected = {'response': raw,
|
||||
'status': 'caution', 'summary': 'abcd1234'}
|
||||
results = malwarebazaar.prepareResults(raw)
|
||||
self.assertEqual(results, expected)
|
||||
|
||||
def test_prepareResults_info(self):
|
||||
# raw is empty
|
||||
raw = {'query_status': 'ok', 'data': [{'sha256_hash': 'validhash',
|
||||
'vendor_intel':
|
||||
{'Triage': {'score': '3'}},
|
||||
'signature': 'abcd1234',
|
||||
'tags': ['tag1']}]}
|
||||
expected = {'response': raw, 'status': 'info', 'summary': 'abcd1234'}
|
||||
results = malwarebazaar.prepareResults(raw)
|
||||
self.assertEqual(results, expected)
|
||||
|
||||
def test_prepareResults_ok(self):
|
||||
# raw is empty
|
||||
raw = {'query_status': 'ok', 'data': [{'sha256_hash': 'validhash',
|
||||
'vendor_intel':
|
||||
{'Triage': {'score': '1'}},
|
||||
'signature': 'abcd1234',
|
||||
'tags': ['tag1']}]}
|
||||
expected = {'response': raw, 'status': 'ok', 'summary': 'abcd1234'}
|
||||
results = malwarebazaar.prepareResults(raw)
|
||||
self.assertEqual(results, expected)
|
||||
|
||||
def test_prepareResults_ok_tags(self):
|
||||
# raw is empty
|
||||
raw = {'query_status': 'ok', 'data': [{'sha256_hash': 'validhash',
|
||||
'vendor_intel':
|
||||
{'Triage': {'score': '1'}},
|
||||
'tags': ['tag1']}]}
|
||||
expected = {'response': raw, 'status': 'ok', 'summary': 'tag1'}
|
||||
results = malwarebazaar.prepareResults(raw)
|
||||
self.assertEqual(results, expected)
|
||||
|
||||
def test_prepareResults_ok_yomi(self):
|
||||
# raw is empty
|
||||
raw = {'query_status': 'ok',
|
||||
'data': [{'sha256_hash': 'validhash',
|
||||
'vendor_intel':
|
||||
{'YOROI_YOMI':
|
||||
{'detection':
|
||||
'detection1',
|
||||
'summary': '0.1'}}}]}
|
||||
expected = {'response': raw, 'status': 'ok', 'summary': 'detection1'}
|
||||
results = malwarebazaar.prepareResults(raw)
|
||||
self.assertEqual(results, expected)
|
||||
|
||||
def test_buildReqGimqhash(self):
|
||||
result = malwarebazaar.buildReq('gimphash', '')
|
||||
self.assertEqual(
|
||||
result, {'query': 'get_gimphash', 'gimphash': ''})
|
||||
|
||||
def test_buildReqHash(self):
|
||||
result = malwarebazaar.buildReq('hash', '')
|
||||
self.assertEqual(
|
||||
result, {'query': 'get_info', 'hash': ''})
|
||||
|
||||
def test_buildReqtlshhash(self):
|
||||
result = malwarebazaar.buildReq('tlsh', '')
|
||||
self.assertEqual(
|
||||
result, {'query': 'get_tlsh', 'tlsh': ''})
|
||||
|
||||
# simulate API response and makes sure sendReq gives a response,
|
||||
# we are just checking if sendReq gives back anything
|
||||
def test_sendReq(self):
|
||||
with patch('requests.post',
|
||||
new=MagicMock(return_value=MagicMock())) as mock:
|
||||
response = malwarebazaar.sendReq(
|
||||
{'baseUrl': 'https://www.randurl.xyz'}, 'example_data')
|
||||
self.assertIsNotNone(response)
|
||||
mock.assert_called_once()
|
||||
@@ -0,0 +1,2 @@
|
||||
requests>=2.31.0
|
||||
pyyaml>=6.0
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -6,7 +6,7 @@ Submit a base64-encoded EML file to Sublime Platform for analysis.
|
||||
## Configuration Requirements
|
||||
In SOC, navigate to `Administration`, toggle `Show all configurable settings, including advanced settings.`, and navigate to `sensoroni` -> `analyzers` -> `sublime_platform`.
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
The following configuration options are available for:
|
||||
|
||||
6
salt/sensoroni/files/analyzers/threatfox/README.md
Normal file
6
salt/sensoroni/files/analyzers/threatfox/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Threatfox
|
||||
|
||||
## Description
|
||||
Submit a domain, hash, IP, or URL to Threatfox for analysis.
|
||||
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
requests>=2.31.0
|
||||
pyyaml>=6.0
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
10
salt/sensoroni/files/analyzers/threatfox/threatfox.json
Normal file
10
salt/sensoroni/files/analyzers/threatfox/threatfox.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "Threatfox",
|
||||
"version": "0.1",
|
||||
"author": "Security Onion Solutions",
|
||||
"description": "This analyzer queries Threatfox to see if a domain, hash, or IP is considered malicious.",
|
||||
"supportedTypes" : ["domain","hash","ip"],
|
||||
"baseUrl": "https://threatfox-api.abuse.ch/api/v1/"
|
||||
}
|
||||
|
||||
|
||||
74
salt/sensoroni/files/analyzers/threatfox/threatfox.py
Normal file
74
salt/sensoroni/files/analyzers/threatfox/threatfox.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import requests
|
||||
import helpers
|
||||
import json
|
||||
import sys
|
||||
|
||||
|
||||
def buildReq(observ_type, observ_value):
|
||||
# supports hash, ip, and domain. determines which query type to send.
|
||||
if observ_type == 'hash':
|
||||
qterms = {'query': 'search_hash', 'hash': observ_value}
|
||||
elif observ_type == 'ip' or observ_type == 'domain':
|
||||
qterms = {'query': 'search_ioc', 'search_term': observ_value}
|
||||
return qterms
|
||||
|
||||
|
||||
def sendReq(meta, query):
|
||||
# send a post request based off of our compiled query
|
||||
url = meta['baseUrl']
|
||||
response = requests.post(url, json.dumps(query))
|
||||
return response.json()
|
||||
|
||||
|
||||
def prepareResults(raw):
|
||||
# gauge threat level based off of threatfox's confidence level
|
||||
if raw != {} and raw['query_status'] == 'ok':
|
||||
parsed = raw['data'][0]
|
||||
|
||||
# get summary
|
||||
if parsed['threat_type'] != '':
|
||||
summary = parsed['threat_type']
|
||||
else:
|
||||
summary = parsed['threat_type_desc']
|
||||
|
||||
if parsed['confidence_level'] > 75:
|
||||
status = 'threat'
|
||||
elif parsed['confidence_level'] > 50:
|
||||
status = 'caution'
|
||||
elif parsed['confidence_level'] > 25:
|
||||
status = 'info'
|
||||
else:
|
||||
status = 'ok'
|
||||
elif raw != {} and raw['query_status'] in ['no_result', 'illegal_search_term', 'illegl_hash']:
|
||||
status = 'info'
|
||||
summary = 'no result'
|
||||
else:
|
||||
raw = {}
|
||||
status = 'caution'
|
||||
summary = 'internal_failure'
|
||||
|
||||
results = {'response': raw, 'summary': summary, 'status': status}
|
||||
return results
|
||||
|
||||
|
||||
def analyze(input):
|
||||
# put all of our methods together, pass them input, and return
|
||||
# properly formatted json/python dict output
|
||||
data = json.loads(input)
|
||||
meta = helpers.loadMetadata(__file__)
|
||||
helpers.checkSupportedType(meta, data["artifactType"])
|
||||
query = buildReq(data['artifactType'], data['value'])
|
||||
response = sendReq(meta, query)
|
||||
return prepareResults(response)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) == 2:
|
||||
results = analyze(sys.argv[1])
|
||||
print(json.dumps(results))
|
||||
else:
|
||||
print("ERROR: Input is not in proper JSON format")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
163
salt/sensoroni/files/analyzers/threatfox/threatfox_test.py
Normal file
163
salt/sensoroni/files/analyzers/threatfox/threatfox_test.py
Normal file
@@ -0,0 +1,163 @@
|
||||
from io import StringIO
|
||||
import sys
|
||||
from unittest.mock import patch, MagicMock
|
||||
import threatfox
|
||||
import unittest
|
||||
|
||||
|
||||
class TestThreatfoxMethods(unittest.TestCase):
|
||||
# This should 1. create a fake cmd input with no args
|
||||
# and 2. hit the else statement in main. It then
|
||||
# compares the console output to a hardcoded string.
|
||||
|
||||
# DOES NOT WORK WITH ARGPARSE/MAIN METHOD
|
||||
|
||||
def test_main_missing_input(self):
|
||||
with patch('sys.stdout', new=StringIO()) as mock_cmd:
|
||||
sys.argv = ["cmd"]
|
||||
threatfox.main()
|
||||
self.assertEqual(mock_cmd.getvalue(),
|
||||
'ERROR: Input is not in proper JSON format\n')
|
||||
|
||||
# This should 1. create a fake cmd input with 1 arg
|
||||
# and 2. hit the if statement in main which runs a mock
|
||||
# analyze method with return value of {'test': 'val'}.
|
||||
# threatfox.main() should then print that to the console,
|
||||
# which is then asserted equal against an expected value.
|
||||
|
||||
def test_main_success(self):
|
||||
with patch('sys.stdout', new=StringIO()) as mock_cmd:
|
||||
with patch('threatfox.analyze', new=MagicMock(return_value={'test': 'val'})) as mock:
|
||||
sys.argv = ["cmd", "input"]
|
||||
threatfox.main()
|
||||
expected = '{"test": "val"}\n'
|
||||
self.assertEqual(mock_cmd.getvalue(), expected)
|
||||
mock.assert_called_once()
|
||||
|
||||
# result stores the output of the buildReq method
|
||||
# comparing result with expected output
|
||||
def test_buildReqHash(self):
|
||||
result = threatfox.buildReq('hash', '2151c4b970eff0071948dbbc19066aa4')
|
||||
self.assertEqual(
|
||||
result, {'query': 'search_hash', 'hash': '2151c4b970eff0071948dbbc19066aa4'})
|
||||
|
||||
def test_buildReqIP(self):
|
||||
result = threatfox.buildReq('ip', '139.180.203.104:443')
|
||||
self.assertEqual(
|
||||
result, {'query': 'search_ioc', 'search_term': '139.180.203.104:443'})
|
||||
|
||||
def test_buildReqDomain(self):
|
||||
result = threatfox.buildReq('domain', 'https://google.com')
|
||||
self.assertEqual(
|
||||
result, {'query': 'search_ioc', 'search_term': 'https://google.com'})
|
||||
|
||||
def test_buildReqFalse(self):
|
||||
result = threatfox.buildReq('hash', '2151c4b970eff0071948dbbc19066aa4')
|
||||
self.assertNotEqual(result, {})
|
||||
|
||||
# simulate API response and makes sure sendReq gives a response, we are just checking if sendReq gives back anything
|
||||
def test_sendReq(self):
|
||||
with patch('requests.post', new=MagicMock(return_value=MagicMock())) as mock:
|
||||
response = threatfox.sendReq(
|
||||
{'baseUrl': 'https://www.randurl.xyz'}, 'example_data')
|
||||
self.assertIsNotNone(response)
|
||||
mock.assert_called_once()
|
||||
|
||||
# result stores the output of the prepareResults method, comparing result with expected output
|
||||
def test_prepareResults_noinput(self):
|
||||
# no/improper given input
|
||||
raw = {}
|
||||
sim_results = {'response': raw, 'status': 'caution',
|
||||
'summary': 'internal_failure'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_none(self):
|
||||
# no results
|
||||
raw = {'query_status': 'no_result'}
|
||||
sim_results = {'response': raw,
|
||||
'status': 'info', 'summary': 'no result'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_illegal_search_term(self):
|
||||
# illegal search term
|
||||
raw = {'query_status': 'illegal_search_term'}
|
||||
expected = {'response': raw, 'status': 'info', 'summary': 'no result'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, expected)
|
||||
|
||||
def test_prepareResults_threat(self):
|
||||
# threat exists
|
||||
raw = {'query_status': 'ok', 'data': [
|
||||
{'threat_type': 'threat', 'confidence_level': 94}]}
|
||||
sim_results = {'response': raw,
|
||||
'summary': 'threat', 'status': 'threat'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_threat_type_does_not_exist(self):
|
||||
# threat type does not exist
|
||||
raw = {'query_status': 'ok', 'data': [
|
||||
{'threat_type': '', 'threat_type_desc': 'description', 'confidence_level': 0}]}
|
||||
sim_results = {'response': raw,
|
||||
'summary': 'description', 'status': 'ok'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_threat_type_25_or_less(self):
|
||||
# confidence level of 25 or less
|
||||
raw = {'query_status': 'ok', 'data': [
|
||||
{'threat_type': 'threat', 'confidence_level': 25}]}
|
||||
sim_results = {'response': raw,
|
||||
'summary': 'threat', 'status': 'ok'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_threat_type_greater_than_25(self):
|
||||
# confidence level greater than 25
|
||||
raw = {'query_status': 'ok', 'data': [
|
||||
{'threat_type': 'threat', 'confidence_level': 26}]}
|
||||
sim_results = {'response': raw,
|
||||
'summary': 'threat', 'status': 'info'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_threat_type_greater_than_50(self):
|
||||
# confidence level greater than 50
|
||||
raw = {'query_status': 'ok', 'data': [
|
||||
{'threat_type': 'threat', 'confidence_level': 51}]}
|
||||
sim_results = {'response': raw,
|
||||
'summary': 'threat', 'status': 'caution'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_threat_type_greater_than_75(self):
|
||||
# confidence level greater than 75
|
||||
raw = {'query_status': 'ok', 'data': [
|
||||
{'threat_type': 'threat', 'confidence_level': 76}]}
|
||||
sim_results = {'response': raw,
|
||||
'summary': 'threat', 'status': 'threat'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_error(self):
|
||||
raw = {}
|
||||
sim_results = {'response': raw, 'status': 'caution',
|
||||
'summary': 'internal_failure'}
|
||||
results = threatfox.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_analyze(self):
|
||||
"""simulated sendReq and prepareResults with 2 mock objects and variables sendReqOutput and prepareResultOutput,
|
||||
input created for analyze method call and then we compared results['summary'] with 'no result' """
|
||||
sendReqOutput = {'threat': 'no_result'}
|
||||
input = '{"artifactType":"hash", "value":"1234"}'
|
||||
prepareResultOutput = {'response': '',
|
||||
'summary': 'no result', 'status': ''}
|
||||
with patch('threatfox.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock:
|
||||
with patch('threatfox.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2:
|
||||
results = threatfox.analyze(input)
|
||||
self.assertEqual(results["summary"], "no result")
|
||||
mock.assert_called_once()
|
||||
mock2.assert_called_once()
|
||||
@@ -38,6 +38,92 @@ sensoroni:
|
||||
global: True
|
||||
advanced: True
|
||||
analyzers:
|
||||
echotrail:
|
||||
api_key:
|
||||
description: API key for the Echotrail analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: True
|
||||
advanced: False
|
||||
forcedType: string
|
||||
base_url:
|
||||
description: Base URL for the Echotrail analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: False
|
||||
advanced: False
|
||||
forcedType: string
|
||||
elasticsearch:
|
||||
api_key:
|
||||
description: API key for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: True
|
||||
advanced: True
|
||||
forcedType: string
|
||||
base_url:
|
||||
description: Connection URL for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: False
|
||||
advanced: False
|
||||
forcedType: string
|
||||
auth_user:
|
||||
description: Username for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: False
|
||||
advanced: False
|
||||
forcedType: string
|
||||
auth_pwd:
|
||||
description: User password for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: True
|
||||
advanced: False
|
||||
forcedType: string
|
||||
num_results:
|
||||
description: Number of documents to return for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: False
|
||||
advanced: True
|
||||
forcedType: string
|
||||
index:
|
||||
description: Search index for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: False
|
||||
advanced: False
|
||||
forcedType: string
|
||||
time_delta_minutes:
|
||||
description: Time (in minutes) to search back for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: False
|
||||
advanced: True
|
||||
forcedType: int
|
||||
timestamp_field_name:
|
||||
description: Specified name for a documents' timestamp field for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: False
|
||||
advanced: True
|
||||
forcedType: string
|
||||
map:
|
||||
description: Map between observable types and search field for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: False
|
||||
advanced: False
|
||||
forcedType: string
|
||||
cert_path:
|
||||
description: Path to a TLS certificate for the Elasticsearch analyzer.
|
||||
helpLink: sensoroni.html
|
||||
global: False
|
||||
sensitive: False
|
||||
advanced: False
|
||||
forcedType: string
|
||||
emailrep:
|
||||
api_key:
|
||||
description: API key for the EmailRep analyzer.
|
||||
|
||||
Reference in New Issue
Block a user