diff --git a/.github/workflows/pythontest.yml b/.github/workflows/pythontest.yml new file mode 100644 index 000000000..c0e692730 --- /dev/null +++ b/.github/workflows/pythontest.yml @@ -0,0 +1,31 @@ +name: python-test + +on: [push, pull_request] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.10"] + python-code-path: ["salt/sensoroni/files/analyzers"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install flake8 pytest pytest-cov + find . -name requirements.txt -exec pip install -r {} \; + - name: Lint with flake8 + run: | + flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics + - name: Test with pytest + run: | + pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=90 --cov-config=${{ matrix.python-code-path }}/pytest.ini diff --git a/.gitignore b/.gitignore index 19447927b..7f446a041 100644 --- a/.gitignore +++ b/.gitignore @@ -56,4 +56,15 @@ $RECYCLE.BIN/ # Windows shortcuts *.lnk -# End of https://www.gitignore.io/api/macos,windows \ No newline at end of file +# End of https://www.gitignore.io/api/macos,windows + +# Pytest output +__pycache__ +.pytest_cache +.coverage +*.pyc +.venv + +# Analyzer dev/test config files +*_dev.yaml +site-packages \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/README.md b/salt/sensoroni/files/analyzers/README.md new file mode 100644 index 000000000..fe311725a --- /dev/null +++ b/salt/sensoroni/files/analyzers/README.md @@ -0,0 +1,248 @@ +# Security Onion Analyzers + +Security Onion provides a means for performing data analysis on varying inputs. This data can be any data of interest sourced from event logs. Examples include hostnames, IP addresses, file hashes, URLs, etc. The analysis is conducted by one or more analyzers that understand that type of input. Analyzers come with the default installation of Security Onion. However, it is also possible to add additional analyzers to extend the analysis across additional areas or data types. + +## Supported Observable Types +The built-in analyzers support the following observable types: + +| Name | Domain | Hash | IP | JA3 | Mail | Other | URI | URL | User Agent | +| ------------------------|--------|-------|-------|-------|-------|-------|-------|-------|------------ +| Alienvault OTX |✓ |✓|✓|✗|✗|✗|✗|✓|✗| +| EmailRep |✗ |✗|✗|✗|✓|✗|✗|✗|✗| +| Greynoise |✗ |✗|✓|✗|✗|✗|✗|✗|✗| +| JA3er |✗ |✗|✗|✓|✗|✗|✗|✗|✗| +| LocalFile |✓ |✓|✓|✓|✗|✓|✗|✓|✗| +| Pulsedive |✓ |✓|✓|✗|✗|✗|✓|✓|✓| +| Spamhaus |✗ |✗|✓|✗|✗|✗|✗|✗|✗| +| Urlhaus |✗ |✗|✗|✗|✗|✗|✗|✓|✗| +| Urlscan |✗ |✗|✗|✗|✗|✗|✗|✓|✗| +| Virustotal |✓ |✓|✓|✗|✗|✗|✗|✓|✗| + +## Developer Guide + +### Python + +Analyzers are Python modules, and can be made up of a single .py script, for simpler analyzers, or a complex set of scripts organized within nested directories. + +The Python language was chosen because of it's wide adoption in the security industry, ease of development and testing, and the abundance of developers with Python skills. + +Specifically, analyzers must be compatible with Python 3.10. + +For more information about Python, see the [Python Documentation](https://docs.python.org). + +### Development + +Custom analyzers should be developed outside of the Security Onion cluster, in a proper software development environment, with version control or other backup mechanisms in place. The analyzer can be developed, unit tested, and integration tested without the need for a Security Onion installation. Once satisifed with the analyzer functionality the analyzer directory should be copied to the Security Onion manager node. + +Developing an analyzer directly on a Security Onion manager node is strongly discouraged, as loss of source code (and time and effort) can occur, should the management node suffer a catastrophic failure with disk storage loss. + +For best results, avoid long, complicated functions in favor of short, discrete functions. This has several benefits: + +- Easier to troubleshoot +- Easier to maintain +- Easier to unit test +- Easier for other developers to review + +### Linting + +Source code should adhere to the [PEP 8 - Style Guide for Python Code](https://peps.python.org/pep-0008/). Developers can use the default configuration of `flake8` to validate conformance, or run the included `build.sh` inside the analyzers directory. Note that linting conformance is mandatory for analyzers that are contributed back to the Security Onion project. + +### Testing + +Python's [unitest](https://docs.python.org/3/library/unittest.html) library can be used for covering analyzer code with unit tests. Unit tests are encouraged for custom analyzers, and mandatory for public analyzers submitted back to the Security Onion project. + +If you are new to unit testing, please see the included `urlhaus_test.py` as an example. + +Unit tests should be named following the pattern `_test.py`. + + +### Analyzer Package Structure + +Delpoyment of a custom analyzer entails copying the analyzer source directory and depenency wheel archives to the Security Onion manager node. The destination locations can be found inside the `securityonion` salt source directory tree. Using the [Saltstack](https://github.com/saltstack/salt) directory pattern allows Security Onion developers to add their own analyzers with minimal additional effort needed to upgrade to newer versions of Security Onion. When the _sensoroni_ salt state executes it will merge the default analyzers with any local analyzers, and copy the merged analyzers into the `/opt/so/conf/sensoroni` directory. + +Do not modify files in the `/opt/so/conf/sensoroni` directory! This is a generated directory and changes made inside will be automatically erased on a frequent interval. + +On a Security Onion manager, custom analyzers should be placed inside the `/opt/so/saltstack/local/salt/sensoroni` directory, as described in the next section. + +#### Directory Tree + +From within the default saltstack directory, the following files and directories exist: + +``` +salt + |- sensoroni + |- files + |- analyzers + |- urlhaus <- Example of an existing analyzer + | |- source-packages <- Contains wheel package bundles for this analyzer's dependencies + | |- site-packages <- Auto-generated site-packages directory (or used for custom dependencies) + | |- requirements.txt <- List of all dependencies needed for this analyzer + | |- urlhaus.py <- Source code for the analyzer + | |- urlhaus_test.py <- Unit tests for the analyzer source code + | |- urlhaus.json <- Metadata for the analyzer + | |- __init__.py <- Package initialization file, often empty + | + |- build.sh <- Simple CI tool for validating linting and unit tests + |- helpers.py <- Common functions shared by many analyzers + |- helpers_test.py <- Unit tests for the shared source code + |- pytest.ini <- Configuration options for the flake8 and pytest + |- README.md <- The file you are currently reading +``` + +Custom analyzers should conform to this same structure, but instead of being placed in the `/opt/so/saltstack/default` directory tree, they should be placed in the `/opt/so/saltstack/local` directory tree. This ensures future Security Onion upgrades will not overwrite customizations. Shared files like `build.sh` and `helpers.py` do not need to be duplicated. They can remain in the _default_ directory tree. Only new or modified files should exist in the _local_ directory tree. + +#### Metadata + +Each analyzer has certain metadata that helps describe the function of the analyzer, required inputs, artifact compatibility, optional configuration options, analyzer version, and other important details of the analyzer. This file is a static file and is not intended to be used for dynamic or custom configuration options. It should only be modified by the author of the analyzer. + +The following example describes the urlhaus metadata content: + +``` +{ + "name": "Urlhaus", <- Unique human-friendly name of this analyzer + "version": "0.1", <- The version of the analyzer + "author": "Security Onion Solutions", <- Author's name, and/or email or other contact information + "description": "This analyzer queries URLHaus...", <- A brief, concise description of the analyzer + "supportedTypes" : ["url"], <- List of types that must match the SOC observable types + "baseUrl": "https://urlhaus-api.abuse.ch/v1/url/" <- Optional hardcoded data used by the analyzer +} +``` + +The `supportedTypes` values should only contain the types that this analyzer can work with. In the case of the URLHaus analyzer, we know that it works with URLs. So adding "hash" to this list wouldn't make sense, since URLHaus doesn't provide information about file hashes. If an analyzer does not support a particular type then it will not show up in the analyzer results in SOC for that observable being analyzed. This is intentional, to eliminate unnecessary screen clutter in SOC. To find a list of available values for the `supportedTypes` field, login to SOC and inside of a Case, click the + button on the Observables tab. You will see a list of types and each of those can be used in this metadata field, when applicable to the analyzer. + +#### Dependencies + +Analyzers will often require the use of third-party packages. For example, if an analyzer needs to make a request to a remote server via HTTPS, then the `requests` package will likely be used. Each analyzer will container a `requirements.txt` file, in which all third-party dependencies can be specified, following the python [Requirements File Specification](https://pip.pypa.io/en/stable/reference/requirements-file-format/). + +Additionally, to support airgapped users, the dependency packages themselves, and any transitive dependencies, should be placed inside the `source-packages` directory. To obtain the full hierarchy of dependencies, execute the following commands: + +```bash +pip download -r /requirements.txt -d /source-packages +``` + + +### Analyzer Architecture + +The Sensoroni Docker container is responsible for executing analyzers. Only the manager's Sensoroni container will process analyzer jobs. Other nodes in the grid, such as sensors and search nodes, will not be assigned analyzer jobs. + +When the Sensoroni Docker container starts, the `/opt/so/conf/sensoroni/analyzer` directory is mapped into the container. The initialization of the Sensoroni Analyze module will scan that directory for any subdirectories. Valid subdirectories will be added as an available analyzer. + +The analyzer itself will only run when a user in SOC enqueues an analyzer job, such as via the Cases -> Observables tab. When the Sensoroni node is ready to run the job it will execute the python command interpretor separately for each loaded analyzer. The command line resembles the following: + +```bash +python -m urlhaus '{"artifactType":"url","value":"https://bigbadbotnet.invalid",...}' +``` + +It is up to each analyzer to determine whether the provided input is compatible with that analyzer. This is assisted by the analyzer metadata, as described earlier in this document, with the use of the `supportedTypes` list. + +Once the analyzer completes its functionality, it must terminate promptly. See the following sections for more details on expected internal behavior of the analyzer. + +#### Configuration + +Analyzers may need dynamic configuration data, such as credentials or other secrets, in order to complete their function. Optional configuration files can provide this information, and are expected to reside in the analyzer's directory. Configuration files are typically written in YAML syntax for ease of modification. + +Configuration files for analyzers included with Security Onion will be pillarized, meaning they derive their custom values from the Saltstack pillar data. For example, an analyzer that requires a user supplied credential might contain a config file resembling the following, where Jinja templating syntax is used to extra Salt pillar data: + +```yaml +username: {{ salt['pillar.get']('sensoroni:analyzers:myanalyzer:username', '') }} +password: {{ salt['pillar.get']('sensoroni:analyzers:myanalyzer:password', '') }} +``` + +Sensoroni will not provide any inputs to the analyzer during execution, other than the artifact input in JSON format. However, developers will likely need to test the analyzer outside of Sensoroni and without Jinja templating, therefore an alternate config file should normally be supplied as the configuration argument during testing. Analyzers should allow for this additional command line argument, but by default should automatically read a configuration file stored in the analyzer's directory. + +#### Exit Code + +If an analyzer determines it cannot or should not operate on the input then the analyzer should return an exit code of `126`. + +If an analyzer does attempt to operate against the input then the exit code should be 0, regardless of the outcome. The outcome, be it an error, a confirmed threat detection, or perhaps an unknown outcome, should be noted in the output of the analyzer. + +#### Output + +The outcome of the analyzer is reflected in the analyzer's output to `stdout`. The output must be JSON formatted, and should contain the following fields. + +`summary`: A very short summarization of the outcome. This should be under 50 characters, otherwise it will be truncated when displayed on the Analyzer job list. + +`status`: Can be one of the following status values, which most appropriately reflects the outcome: +- `ok`: The analyzer has concluded that the provided input is not a known threat. +- `info`: This analyzer provides informative data, but does not attempt to conclude the input is a threat. +- `caution`: The data provided is inconclusive. Analysts should review this information further. This can be used in error scenarios, such as if the analyzer fails to complete, perhaps due to a remote service being offline. +- `threat`: The analyzer has detected that the input is likely related to a threat. + +`error`: [Optional] If the analyzer encounters an unrecoverable error, those details, useful for administrators to troubleshoot the problem, should be placed in this field. + +Additional fields are allowed, and should contain data that is specific to the analyzer. + +Below is an example of a _urlhaus_ analyzer output. Note that the urlhaus raw JSON is added to a custom field called "response". + +```json +{ + "response": { + "blacklists": { + "spamhaus_dbl": "not listed", + "surbl": "not listed" + }, + "date_added": "2022-04-07 12:39:14 UTC", + "host": "abeibaba.com", + "id": "2135795", + "larted": "false", + "last_online": null, + "payloads": null, + "query_status": "ok", + "reporter": "switchcert", + "tags": [ + "Flubot" + ], + "takedown_time_seconds": null, + "threat": "malware_download", + "url": "https://abeibaba.com/ian/?redacted", + "url_status": "offline", + "urlhaus_reference": "https://urlhaus.abuse.ch/url/2135795/" + }, + "status": "threat", + "summary": "malware_download" +} +``` + +Users in SOC will be able to view the entire JSON output, therefore it is important that sensitive information, such as credentials or other secrets, is excluded from the output. + +#### Internationalization + +Some of the built-in analyzers use snake_case summary values, instead of human friendly words or phrases. These are identifiers that the SOC UI will use to lookup a localized translation for the user. The use of these identifiers is not required for custom analyzers. In fact, in order for an identifier to be properly localized the translations must exist in the SOC product, which is out of scope of this development guide. That said, the following generic translations might be useful for custom analyzers: + +| Identifier | English | +| ------------------ | -------------------------- | +| `malicious` | Malicious | +| `suspicious` | Suspicious | +| `harmless` | Harmless | +| `internal_failure` | Analyzer Internal Failure | +| `timeout` | Remote Host Timed Out | + +#### Timeout + +It is expected that analyzers will finish quickly, but there is a default timeout in place that will abort the analyzer if the timeout is exceeded. By default that timeout is 15 minutes (900000 milliseconds), but can be customized via the `sensoroni:analyze_timeout_ms` salt pillar. + + +## Contributing + +Review the Security Onion project [contribution guidelines](https://github.com/Security-Onion-Solutions/securityonion/blob/master/CONTRIBUTING.md) if you are considering contributing an analyzer to the Security Onion project. + +#### Procedure + +In order to make a custom analyzer into a permanent Security Onion analyzer, the following steps need to be taken: + +1. Fork the [securityonion GitHub repository](https://github.com/Security-Onion-Solutions/securityonion) +2. Copy your custom analyzer directory to the forked project, under the `securityonion/salt/sensoroni/files/analyzers` directory. +3. Ensure the contribution requirements in the following section are met. +4. Submit a [pull request](https://github.com/Security-Onion-Solutions/securityonion/pulls) to merge your GitHub fork back into the `securityonion` _dev_ branch. + +#### Requirements + +The following requirements must be satisfied in order for analyzer pull requests to be accepted into the Security Onion GitHub project: + +- Analyzer contributions must not contain licensed dependencies or source code that is incompatible with the [GPLv2 licensing](https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html). +- All source code must pass the `flake8` lint check. This ensures source code conforms to the same style guides as the other analyzers. The Security Onion project will automatically run the linter after each push to a `securityonion` repository fork, and again when submitting a pull request. Failed lint checks will result in the submitter being sent an automated email message. +- All source code must include accompanying unit test coverage. The Security Onion project will automatically run the unit tests after each push to a `securityonion` repository fork, and again when submitting a pull request. Failed unit tests, or insufficient unit test coverage, will result in the submitter being sent an automated email message. +- Documentation of the analyzer, its input requirements, conditions for operation, and other relevant information must be clearly written in an accompanying analyzer metadata file. This file is described in more detail earlier in this document. +- Source code must be well-written and be free of security defects that can put users or their data at unnecessary risk. + + diff --git a/salt/sensoroni/files/analyzers/build.sh b/salt/sensoroni/files/analyzers/build.sh new file mode 100755 index 000000000..17e53c6a7 --- /dev/null +++ b/salt/sensoroni/files/analyzers/build.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +HOME_DIR=$(dirname "$0") +TARGET_DIR=${1:-.} + +PATH=$PATH:/usr/local/bin + +if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then + echo "Missing dependencies. Consider running the following command:" + echo " python -m pip install flake8 pytest pytest-cov" + exit 1 +fi + +flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini" +pytest "$TARGET_DIR" "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 diff --git a/salt/sensoroni/files/analyzers/emailrep/__init__.py b/salt/sensoroni/files/analyzers/emailrep/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/emailrep/emailrep.json b/salt/sensoroni/files/analyzers/emailrep/emailrep.json new file mode 100644 index 000000000..cfd0656d8 --- /dev/null +++ b/salt/sensoroni/files/analyzers/emailrep/emailrep.json @@ -0,0 +1,7 @@ +{ + "name": "EmailRep", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries the EmailRep API for email address reputation information", + "supportedTypes" : ["email", "mail"] +} diff --git a/salt/sensoroni/files/analyzers/emailrep/emailrep.py b/salt/sensoroni/files/analyzers/emailrep/emailrep.py new file mode 100755 index 000000000..4e9a8fee9 --- /dev/null +++ b/salt/sensoroni/files/analyzers/emailrep/emailrep.py @@ -0,0 +1,67 @@ +import json +import os +import sys +import requests +import helpers +import argparse + + +def checkConfigRequirements(conf): + if "api_key" not in conf: + sys.exit(126) + else: + return True + + +def sendReq(conf, meta, email): + url = conf['base_url'] + email + headers = {"Key": conf['api_key']} + response = requests.request('GET', url=url, headers=headers) + return response.json() + + +def prepareResults(raw): + if "suspicious" in raw: + if raw['suspicious'] is True: + status = "caution" + summary = "suspicious" + elif raw['suspicious'] is False: + status = "ok" + summary = "harmless" + elif "status" in raw: + if raw["reason"] == "invalid email": + status = "caution" + summary = "Invalid email address." + if "exceeded daily limit" in raw["reason"]: + status = "caution" + summary = "Exceeded daily request limit." + else: + status = "caution" + summary = "internal_failure" + results = {'response': raw, 'summary': summary, 'status': status} + return results + + +def analyze(conf, input): + checkConfigRequirements(conf) + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + response = sendReq(conf, meta, data["value"]) + return prepareResults(response) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser(description='Search Greynoise for a given artifact') + parser.add_argument('artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/emailrep.yaml", help='optional config file to use instead of the default config file') + + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/emailrep/emailrep.yaml b/salt/sensoroni/files/analyzers/emailrep/emailrep.yaml new file mode 100644 index 000000000..360fdc540 --- /dev/null +++ b/salt/sensoroni/files/analyzers/emailrep/emailrep.yaml @@ -0,0 +1,2 @@ +base_url: https://emailrep.io/ +api_key: "{{ salt['pillar.get']('sensoroni:analyzers:emailrep:api_key', '') }}" diff --git a/salt/sensoroni/files/analyzers/emailrep/emailrep_test.py b/salt/sensoroni/files/analyzers/emailrep/emailrep_test.py new file mode 100644 index 000000000..ecd6010b0 --- /dev/null +++ b/salt/sensoroni/files/analyzers/emailrep/emailrep_test.py @@ -0,0 +1,85 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +from emailrep import emailrep +import unittest + + +class TestEmailRepMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + emailrep.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('emailrep.emailrep.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + emailrep.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def test_checkConfigRequirements_not_present(self): + conf = {"not_a_file_path": "blahblah"} + with self.assertRaises(SystemExit) as cm: + emailrep.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_sendReq(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + meta = {} + conf = {"base_url": "https://myurl/", "api_key": "abcd1234"} + email = "test@abc.com" + response = emailrep.sendReq(conf=conf, meta=meta, email=email) + mock.assert_called_once_with("GET", headers={"Key": "abcd1234"}, url="https://myurl/test@abc.com") + self.assertIsNotNone(response) + + def test_prepareResults_invalidEmail(self): + raw = {"status": "fail", "reason": "invalid email"} + results = emailrep.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "Invalid email address.") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_not_suspicious(self): + raw = {"email": "notsus@domain.com", "reputation": "high", "suspicious": False, "references": 21, "details": {"blacklisted": False, "malicious_activity": False, "profiles": ["twitter"]}} + results = emailrep.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "harmless") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_suspicious(self): + raw = {"email": "sus@domain.com", "reputation": "none", "suspicious": True, "references": 0, "details": {"blacklisted": False, "malicious_activity": False, "profiles": []}} + results = emailrep.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "suspicious") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_exceeded_limit(self): + raw = {"status": "fail", "reason": "exceeded daily limit. please wait 24 hrs or visit emailrep.io/key for an api key."} + results = emailrep.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "Exceeded daily request limit.") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_error(self): + raw = {} + results = emailrep.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_analyze(self): + output = {"email": "sus@domain.com", "reputation": "none", "suspicious": True, "references": 0, "details": {"blacklisted": False, "malicious_activity": False, "profiles": []}} + artifactInput = '{"value":"sus@domain.com","artifactType":"email"}' + conf = {"base_url": "myurl/", "api_key": "abcd1234"} + with patch('emailrep.emailrep.sendReq', new=MagicMock(return_value=output)) as mock: + results = emailrep.analyze(conf, artifactInput) + self.assertEqual(results["summary"], "suspicious") + mock.assert_called_once() diff --git a/salt/sensoroni/files/analyzers/emailrep/requirements.txt b/salt/sensoroni/files/analyzers/emailrep/requirements.txt new file mode 100644 index 000000000..a8980057f --- /dev/null +++ b/salt/sensoroni/files/analyzers/emailrep/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.27.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/emailrep/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl b/salt/sensoroni/files/analyzers/emailrep/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl new file mode 100644 index 000000000..1dfb5c2d3 Binary files /dev/null and b/salt/sensoroni/files/analyzers/emailrep/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/emailrep/source-packages/certifi-2021.10.8-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/emailrep/source-packages/certifi-2021.10.8-py2.py3-none-any.whl new file mode 100644 index 000000000..fbcb86b5f Binary files /dev/null and b/salt/sensoroni/files/analyzers/emailrep/source-packages/certifi-2021.10.8-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/emailrep/source-packages/charset_normalizer-2.0.12-py3-none-any.whl b/salt/sensoroni/files/analyzers/emailrep/source-packages/charset_normalizer-2.0.12-py3-none-any.whl new file mode 100644 index 000000000..17a2dfbeb Binary files /dev/null and b/salt/sensoroni/files/analyzers/emailrep/source-packages/charset_normalizer-2.0.12-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/emailrep/source-packages/idna-3.3-py3-none-any.whl b/salt/sensoroni/files/analyzers/emailrep/source-packages/idna-3.3-py3-none-any.whl new file mode 100644 index 000000000..060541bc9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/emailrep/source-packages/idna-3.3-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/emailrep/source-packages/requests-2.27.1-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/emailrep/source-packages/requests-2.27.1-py2.py3-none-any.whl new file mode 100644 index 000000000..807fc6110 Binary files /dev/null and b/salt/sensoroni/files/analyzers/emailrep/source-packages/requests-2.27.1-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/emailrep/source-packages/urllib3-1.26.9-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/emailrep/source-packages/urllib3-1.26.9-py2.py3-none-any.whl new file mode 100644 index 000000000..5019453dd Binary files /dev/null and b/salt/sensoroni/files/analyzers/emailrep/source-packages/urllib3-1.26.9-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/greynoise/__init__.py b/salt/sensoroni/files/analyzers/greynoise/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/greynoise/greynoise.json b/salt/sensoroni/files/analyzers/greynoise/greynoise.json new file mode 100644 index 000000000..76cef3324 --- /dev/null +++ b/salt/sensoroni/files/analyzers/greynoise/greynoise.json @@ -0,0 +1,7 @@ +{ + "name": "Greynoise IP Analyzer", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries Greynoise for context around an IP address", + "supportedTypes" : ["ip"] +} diff --git a/salt/sensoroni/files/analyzers/greynoise/greynoise.py b/salt/sensoroni/files/analyzers/greynoise/greynoise.py new file mode 100755 index 000000000..deeef5414 --- /dev/null +++ b/salt/sensoroni/files/analyzers/greynoise/greynoise.py @@ -0,0 +1,78 @@ +import json +import os +import sys +import requests +import helpers +import argparse + + +def checkConfigRequirements(conf): + if "api_key" not in conf or len(conf['api_key']) == 0: + sys.exit(126) + else: + return True + + +def sendReq(conf, meta, ip): + url = conf['base_url'] + if conf['api_version'] == 'community': + url = url + 'v3/community/' + ip + elif conf['api_version'] == 'investigate' or 'automate': + url = url + 'v2/noise/context/' + ip + headers = {"key": conf['api_key']} + response = requests.request('GET', url=url, headers=headers) + return response.json() + + +def prepareResults(raw): + if "message" in raw: + if "Success" in raw["message"]: + if "classification" in raw: + if "benign" in raw['classification']: + status = "ok" + summary = "harmless" + elif "malicious" in raw['classification']: + status = "threat" + summary = "malicious" + elif "unknown" in raw['classification']: + status = "caution" + summary = "Results found." + elif "IP not observed scanning the internet or contained in RIOT data set." in raw["message"]: + status = "ok" + summary = "no_results" + elif "Request is not a valid routable IPv4 address" in raw["message"]: + status = "caution" + summary = "Invalid IP address." + else: + status = "info" + summary = raw["message"] + else: + status = "caution" + summary = "internal_failure" + results = {'response': raw, 'summary': summary, 'status': status} + return results + + +def analyze(conf, input): + checkConfigRequirements(conf) + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + response = sendReq(conf, meta, data["value"]) + return prepareResults(response) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser(description='Search Greynoise for a given artifact') + parser.add_argument('artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/greynoise.yaml", help='optional config file to use instead of the default config file') + + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/greynoise/greynoise.yaml b/salt/sensoroni/files/analyzers/greynoise/greynoise.yaml new file mode 100644 index 000000000..aee4f961a --- /dev/null +++ b/salt/sensoroni/files/analyzers/greynoise/greynoise.yaml @@ -0,0 +1,3 @@ +base_url: https://api.greynoise.io/ +api_key: "{{ salt['pillar.get']('sensoroni:analyzers:greynoise:api_key', '') }}" +api_version: "{{ salt['pillar.get']('sensoroni:analyzers:greynoise:api_version', 'community') }}" diff --git a/salt/sensoroni/files/analyzers/greynoise/greynoise_test.py b/salt/sensoroni/files/analyzers/greynoise/greynoise_test.py new file mode 100644 index 000000000..768107adb --- /dev/null +++ b/salt/sensoroni/files/analyzers/greynoise/greynoise_test.py @@ -0,0 +1,117 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +from greynoise import greynoise +import unittest + + +class TestGreynoiseMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + greynoise.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('greynoise.greynoise.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + greynoise.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def test_checkConfigRequirements_not_present(self): + conf = {"not_a_file_path": "blahblah"} + with self.assertRaises(SystemExit) as cm: + greynoise.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_sendReq_community(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + meta = {} + conf = {"base_url": "https://myurl/", "api_key": "abcd1234", "api_version": "community"} + ip = "192.168.1.1" + response = greynoise.sendReq(conf=conf, meta=meta, ip=ip) + mock.assert_called_once_with("GET", headers={'key': 'abcd1234'}, url="https://myurl/v3/community/192.168.1.1") + self.assertIsNotNone(response) + + def test_sendReq_investigate(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + meta = {} + conf = {"base_url": "https://myurl/", "api_key": "abcd1234", "api_version": "investigate"} + ip = "192.168.1.1" + response = greynoise.sendReq(conf=conf, meta=meta, ip=ip) + mock.assert_called_once_with("GET", headers={'key': 'abcd1234'}, url="https://myurl/v2/noise/context/192.168.1.1") + self.assertIsNotNone(response) + + def test_sendReq_automate(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + meta = {} + conf = {"base_url": "https://myurl/", "api_key": "abcd1234", "api_version": "automate"} + ip = "192.168.1.1" + response = greynoise.sendReq(conf=conf, meta=meta, ip=ip) + mock.assert_called_once_with("GET", headers={'key': 'abcd1234'}, url="https://myurl/v2/noise/context/192.168.1.1") + self.assertIsNotNone(response) + + def test_prepareResults_invalidIP(self): + raw = {"message": "Request is not a valid routable IPv4 address"} + results = greynoise.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "Invalid IP address.") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_not_found(self): + raw = {"ip": "192.190.1.1", "noise": "false", "riot": "false", "message": "IP not observed scanning the internet or contained in RIOT data set."} + results = greynoise.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "no_results") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_benign(self): + raw = {"ip": "8.8.8.8", "noise": "false", "riot": "true", "classification": "benign", "name": "Google Public DNS", "link": "https://viz.gn.io", "last_seen": "2022-04-26", "message": "Success"} + results = greynoise.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "harmless") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_malicious(self): + raw = {"ip": "121.142.87.218", "noise": "true", "riot": "false", "classification": "malicious", "name": "unknown", "link": "https://viz.gn.io", "last_seen": "2022-04-26", "message": "Success"} + results = greynoise.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "malicious") + self.assertEqual(results["status"], "threat") + + def test_prepareResults_unknown(self): + raw = {"ip": "221.4.62.149", "noise": "true", "riot": "false", "classification": "unknown", "name": "unknown", "link": "https://viz.gn.io", "last_seen": "2022-04-26", "message": "Success"} + results = greynoise.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "Results found.") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_unknown_message(self): + raw = {"message": "unknown"} + results = greynoise.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "unknown") + self.assertEqual(results["status"], "info") + + def test_prepareResults_error(self): + raw = {} + results = greynoise.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_analyze(self): + output = {"ip": "221.4.62.149", "noise": "true", "riot": "false", "classification": "unknown", "name": "unknown", "link": "https://viz.gn.io", "last_seen": "2022-04-26", "message": "Success"} + artifactInput = '{"value":"221.4.62.149","artifactType":"ip"}' + conf = {"base_url": "myurl/", "api_key": "abcd1234", "api_version": "community"} + with patch('greynoise.greynoise.sendReq', new=MagicMock(return_value=output)) as mock: + results = greynoise.analyze(conf, artifactInput) + self.assertEqual(results["summary"], "Results found.") + mock.assert_called_once() diff --git a/salt/sensoroni/files/analyzers/greynoise/requirements.txt b/salt/sensoroni/files/analyzers/greynoise/requirements.txt new file mode 100644 index 000000000..a8980057f --- /dev/null +++ b/salt/sensoroni/files/analyzers/greynoise/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.27.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/greynoise/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl b/salt/sensoroni/files/analyzers/greynoise/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl new file mode 100644 index 000000000..1dfb5c2d3 Binary files /dev/null and b/salt/sensoroni/files/analyzers/greynoise/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/greynoise/source-packages/certifi-2021.10.8-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/greynoise/source-packages/certifi-2021.10.8-py2.py3-none-any.whl new file mode 100644 index 000000000..fbcb86b5f Binary files /dev/null and b/salt/sensoroni/files/analyzers/greynoise/source-packages/certifi-2021.10.8-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/greynoise/source-packages/charset_normalizer-2.0.12-py3-none-any.whl b/salt/sensoroni/files/analyzers/greynoise/source-packages/charset_normalizer-2.0.12-py3-none-any.whl new file mode 100644 index 000000000..17a2dfbeb Binary files /dev/null and b/salt/sensoroni/files/analyzers/greynoise/source-packages/charset_normalizer-2.0.12-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/greynoise/source-packages/idna-3.3-py3-none-any.whl b/salt/sensoroni/files/analyzers/greynoise/source-packages/idna-3.3-py3-none-any.whl new file mode 100644 index 000000000..060541bc9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/greynoise/source-packages/idna-3.3-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/greynoise/source-packages/requests-2.27.1-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/greynoise/source-packages/requests-2.27.1-py2.py3-none-any.whl new file mode 100644 index 000000000..807fc6110 Binary files /dev/null and b/salt/sensoroni/files/analyzers/greynoise/source-packages/requests-2.27.1-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/greynoise/source-packages/urllib3-1.26.9-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/greynoise/source-packages/urllib3-1.26.9-py2.py3-none-any.whl new file mode 100644 index 000000000..5019453dd Binary files /dev/null and b/salt/sensoroni/files/analyzers/greynoise/source-packages/urllib3-1.26.9-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/helpers.py b/salt/sensoroni/files/analyzers/helpers.py new file mode 100644 index 000000000..f4ef4a1e6 --- /dev/null +++ b/salt/sensoroni/files/analyzers/helpers.py @@ -0,0 +1,28 @@ +import json +import os +import sys +import yaml + + +def checkSupportedType(meta, artifact_type): + if artifact_type not in meta['supportedTypes']: + sys.exit(126) + else: + return True + + +def parseArtifact(artifact): + data = json.loads(artifact) + return data + + +def loadMetadata(file): + dir = os.path.dirname(os.path.realpath(file)) + filename = os.path.realpath(file).rsplit('/', 1)[1].split('.')[0] + with open(str(dir + "/" + filename + ".json"), "r") as metafile: + return json.load(metafile) + + +def loadConfig(path): + with open(str(path), "r") as conffile: + return yaml.safe_load(conffile) diff --git a/salt/sensoroni/files/analyzers/helpers_test.py b/salt/sensoroni/files/analyzers/helpers_test.py new file mode 100644 index 000000000..c10ff00d5 --- /dev/null +++ b/salt/sensoroni/files/analyzers/helpers_test.py @@ -0,0 +1,35 @@ +from unittest.mock import patch, MagicMock +import helpers +import os +import unittest + + +class TestHelpersMethods(unittest.TestCase): + + def test_checkSupportedType(self): + with patch('sys.exit', new=MagicMock()) as mock: + meta = {"supportedTypes": ["ip", "foo"]} + result = helpers.checkSupportedType(meta, "ip") + self.assertTrue(result) + mock.assert_not_called() + + result = helpers.checkSupportedType(meta, "bar") + self.assertFalse(result) + mock.assert_called_once_with(126) + + def test_loadMetadata(self): + dir = os.path.dirname(os.path.realpath(__file__)) + input = dir + '/urlhaus/urlhaus.py' + data = helpers.loadMetadata(input) + self.assertEqual(data["name"], "Urlhaus") + + def test_loadConfig(self): + dir = os.path.dirname(os.path.realpath(__file__)) + data = helpers.loadConfig(dir + "/virustotal/virustotal.yaml") + self.assertEqual(data["base_url"], "https://www.virustotal.com/api/v3/search?query=") + + def test_parseArtifact(self): + input = '{"value":"foo","artifactType":"bar"}' + data = helpers.parseArtifact(input) + self.assertEqual(data["artifactType"], "bar") + self.assertEqual(data["value"], "foo") diff --git a/salt/sensoroni/files/analyzers/ja3er/__init__.py b/salt/sensoroni/files/analyzers/ja3er/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/ja3er/ja3er.json b/salt/sensoroni/files/analyzers/ja3er/ja3er.json new file mode 100644 index 000000000..de072d0b7 --- /dev/null +++ b/salt/sensoroni/files/analyzers/ja3er/ja3er.json @@ -0,0 +1,7 @@ +{ + "name": "JA3er Hash Search", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries JA3er user agents and sightings", + "supportedTypes" : ["ja3"] +} diff --git a/salt/sensoroni/files/analyzers/ja3er/ja3er.py b/salt/sensoroni/files/analyzers/ja3er/ja3er.py new file mode 100755 index 000000000..330a8dd66 --- /dev/null +++ b/salt/sensoroni/files/analyzers/ja3er/ja3er.py @@ -0,0 +1,53 @@ +import json +import os +import requests +import helpers +import argparse + + +def sendReq(conf, meta, hash): + url = conf['base_url'] + hash + response = requests.request('GET', url) + return response.json() + + +def prepareResults(raw): + if "error" in raw: + if "Sorry" in raw["error"]: + status = "ok" + summary = "No results found." + elif "Invalid hash" in raw["error"]: + status = "caution" + summary = "Invalid hash." + else: + status = "caution" + summary = "internal_failure" + else: + status = "info" + summary = "Results found." + results = {'response': raw, 'summary': summary, 'status': status} + return results + + +def analyze(conf, input): + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + response = sendReq(conf, meta, data["value"]) + return prepareResults(response) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser(description='Search JA3er for a given artifact') + parser.add_argument('artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/ja3er.yaml", help='optional config file to use instead of the default config file') + + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/ja3er/ja3er.yaml b/salt/sensoroni/files/analyzers/ja3er/ja3er.yaml new file mode 100644 index 000000000..40d6f64dd --- /dev/null +++ b/salt/sensoroni/files/analyzers/ja3er/ja3er.yaml @@ -0,0 +1 @@ +base_url: https://ja3er.com/search/ diff --git a/salt/sensoroni/files/analyzers/ja3er/ja3er_test.py b/salt/sensoroni/files/analyzers/ja3er/ja3er_test.py new file mode 100644 index 000000000..8ad22ac69 --- /dev/null +++ b/salt/sensoroni/files/analyzers/ja3er/ja3er_test.py @@ -0,0 +1,65 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +from ja3er import ja3er +import unittest + + +class TestJa3erMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + ja3er.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('ja3er.ja3er.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + ja3er.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def test_sendReq(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + meta = {} + conf = {"base_url": "myurl/"} + hash = "abcd1234" + response = ja3er.sendReq(conf=conf, meta=meta, hash=hash) + mock.assert_called_once_with("GET", "myurl/abcd1234") + self.assertIsNotNone(response) + + def test_prepareResults_none(self): + raw = {"error": "Sorry no values found"} + results = ja3er.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "No results found.") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_invalidHash(self): + raw = {"error": "Invalid hash"} + results = ja3er.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "Invalid hash.") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_info(self): + raw = [{"User-Agent": "Blah/5.0", "Count": 24874, "Last_seen": "2022-04-08 16:18:38"}, {"Comment": "Brave browser v1.36.122\n\n", "Reported": "2022-03-28 20:26:42"}] + results = ja3er.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "Results found.") + self.assertEqual(results["status"], "info") + + def test_analyze(self): + output = {"info": "Results found."} + artifactInput = '{"value":"abcd1234","artifactType":"ja3"}' + conf = {"base_url": "myurl/"} + with patch('ja3er.ja3er.sendReq', new=MagicMock(return_value=output)) as mock: + results = ja3er.analyze(conf, artifactInput) + self.assertEqual(results["summary"], "Results found.") + mock.assert_called_once() diff --git a/salt/sensoroni/files/analyzers/ja3er/requirements.txt b/salt/sensoroni/files/analyzers/ja3er/requirements.txt new file mode 100644 index 000000000..a8980057f --- /dev/null +++ b/salt/sensoroni/files/analyzers/ja3er/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.27.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/ja3er/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl b/salt/sensoroni/files/analyzers/ja3er/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl new file mode 100644 index 000000000..1dfb5c2d3 Binary files /dev/null and b/salt/sensoroni/files/analyzers/ja3er/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/ja3er/source-packages/certifi-2021.10.8-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/ja3er/source-packages/certifi-2021.10.8-py2.py3-none-any.whl new file mode 100644 index 000000000..fbcb86b5f Binary files /dev/null and b/salt/sensoroni/files/analyzers/ja3er/source-packages/certifi-2021.10.8-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/ja3er/source-packages/charset_normalizer-2.0.12-py3-none-any.whl b/salt/sensoroni/files/analyzers/ja3er/source-packages/charset_normalizer-2.0.12-py3-none-any.whl new file mode 100644 index 000000000..17a2dfbeb Binary files /dev/null and b/salt/sensoroni/files/analyzers/ja3er/source-packages/charset_normalizer-2.0.12-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/ja3er/source-packages/idna-3.3-py3-none-any.whl b/salt/sensoroni/files/analyzers/ja3er/source-packages/idna-3.3-py3-none-any.whl new file mode 100644 index 000000000..060541bc9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/ja3er/source-packages/idna-3.3-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/ja3er/source-packages/requests-2.27.1-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/ja3er/source-packages/requests-2.27.1-py2.py3-none-any.whl new file mode 100644 index 000000000..807fc6110 Binary files /dev/null and b/salt/sensoroni/files/analyzers/ja3er/source-packages/requests-2.27.1-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/ja3er/source-packages/urllib3-1.26.9-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/ja3er/source-packages/urllib3-1.26.9-py2.py3-none-any.whl new file mode 100644 index 000000000..5019453dd Binary files /dev/null and b/salt/sensoroni/files/analyzers/ja3er/source-packages/urllib3-1.26.9-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/localfile/__init__.py b/salt/sensoroni/files/analyzers/localfile/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/localfile/localfile.json b/salt/sensoroni/files/analyzers/localfile/localfile.json new file mode 100644 index 000000000..5dd379ff4 --- /dev/null +++ b/salt/sensoroni/files/analyzers/localfile/localfile.json @@ -0,0 +1,7 @@ +{ + "name": "Local File Analyzer", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries one or more local CSV files for a value, then returns all columns within matching rows.", + "supportedTypes" : ["domain", "hash", "ip", "other", "url"] +} diff --git a/salt/sensoroni/files/analyzers/localfile/localfile.py b/salt/sensoroni/files/analyzers/localfile/localfile.py new file mode 100755 index 000000000..745c4b9b6 --- /dev/null +++ b/salt/sensoroni/files/analyzers/localfile/localfile.py @@ -0,0 +1,79 @@ +import json +import helpers +import os +import sys +import argparse +import csv + + +def checkConfigRequirements(conf): + if "file_path" not in conf or len(conf['file_path']) == 0: + sys.exit(126) + else: + return True + + +def searchFile(artifact, csvfiles): + dir = os.path.dirname(os.path.realpath(__file__)) + found = [] + for f in csvfiles: + filename = dir + "/" + f + with open(filename, "r") as csvfile: + csvdata = csv.DictReader(csvfile) + for row in csvdata: + first_key = list(row.keys())[0] + if artifact in row[first_key]: + row.update({"filename": filename}) + found.append(row) + if len(found) != 0: + if len(found) == 1: + results = found[0] + else: + results = found + else: + results = "No results" + + return results + + +def prepareResults(raw): + if len(raw) > 0: + if "No results" in raw: + status = "ok" + summary = "no_results" + else: + status = "info" + summary = "One or more matches found." + else: + raw = {} + status = "caution" + summary = "internal_failure" + response = raw + results = {'response': response, 'status': status, 'summary': summary} + return results + + +def analyze(conf, input): + checkConfigRequirements(conf) + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + search = searchFile(data["value"], conf['file_path']) + results = prepareResults(search) + return results + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser(description='Search CSV file for a given artifact') + parser.add_argument('artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/localfile.yaml", help='optional config file to use instead of the default config file') + + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/localfile/localfile.yaml b/salt/sensoroni/files/analyzers/localfile/localfile.yaml new file mode 100644 index 000000000..69740c379 --- /dev/null +++ b/salt/sensoroni/files/analyzers/localfile/localfile.yaml @@ -0,0 +1 @@ +file_path: [] diff --git a/salt/sensoroni/files/analyzers/localfile/localfile_test.csv b/salt/sensoroni/files/analyzers/localfile/localfile_test.csv new file mode 100644 index 000000000..bf16d3378 --- /dev/null +++ b/salt/sensoroni/files/analyzers/localfile/localfile_test.csv @@ -0,0 +1,4 @@ +indicator,description,reference +abcd1234,This is a test!,Testing +abcd1234,This is another test!,Testing +192.168.1.1,Yet another test!,Testing diff --git a/salt/sensoroni/files/analyzers/localfile/localfile_test.py b/salt/sensoroni/files/analyzers/localfile/localfile_test.py new file mode 100644 index 000000000..66e4820e1 --- /dev/null +++ b/salt/sensoroni/files/analyzers/localfile/localfile_test.py @@ -0,0 +1,119 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +from localfile import localfile +import unittest + + +class TestLocalfileMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + localfile.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('localfile.localfile.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + localfile.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def test_checkConfigRequirements_present(self): + conf = {"file_path": "['intel.csv']"} + self.assertTrue(localfile.checkConfigRequirements(conf)) + + def test_checkConfigRequirements_not_present(self): + conf = {"not_a_file_path": "blahblah"} + with self.assertRaises(SystemExit) as cm: + localfile.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_checkConfigRequirements_empty(self): + conf = {"file_path": ""} + with self.assertRaises(SystemExit) as cm: + localfile.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_searchFile_multiple_found(self): + artifact = "abcd1234" + results = localfile.searchFile(artifact, ["localfile_test.csv"]) + self.assertEqual(results[0]["indicator"], "abcd1234") + self.assertEqual(results[0]["description"], "This is a test!") + self.assertEqual(results[0]["reference"], "Testing") + self.assertEqual(results[1]["indicator"], "abcd1234") + self.assertEqual(results[1]["description"], "This is another test!") + + def test_searchFile_single_found(self): + artifact = "192.168.1.1" + results = localfile.searchFile(artifact, ["localfile_test.csv"]) + self.assertEqual(results["indicator"], "192.168.1.1") + self.assertEqual(results["description"], "Yet another test!") + self.assertEqual(results["reference"], "Testing") + + def test_searchFile_not_found(self): + artifact = "youcan'tfindme" + results = localfile.searchFile(artifact, ["localfile_test.csv"]) + self.assertEqual(results, "No results") + + def test_prepareResults_none(self): + raw = "No results" + results = localfile.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "no_results") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_ok(self): + raw = [ + { + "description": "This is one BAD piece of malware!", + "filename": "/opt/sensoroni/analyzers/localfile/intel.csv", + "indicator": "abc1234", + "reference": "https://myintelservice" + }, + { + "filename": "/opt/sensoroni/analyzers/localfile/random.csv", + "randomcol1": "myothervalue", + "randomcol2": "myotherothervalue", + "value": "abc1234" + } + ] + results = localfile.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "One or more matches found.") + self.assertEqual(results["status"], "info") + + def test_prepareResults_error(self): + raw = {} + results = localfile.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_analyze(self): + output = [ + { + "description": "This is one BAD piece of malware!", + "filename": "/opt/sensoroni/analyzers/localfile/intel.csv", + "indicator": "abc1234", + "reference": "https://myintelservice" + }, + { + "filename": "/opt/sensoroni/analyzers/localfile/random.csv", + "randomcol1": "myothervalue", + "randomcol2": "myotherothervalue", + "value": "abc1234" + } + ] + artifactInput = '{"value":"foo","artifactType":"url"}' + conf = {"file_path": "/home/intel.csv"} + with patch('localfile.localfile.searchFile', new=MagicMock(return_value=output)) as mock: + results = localfile.analyze(conf, artifactInput) + self.assertEqual(results["summary"], "One or more matches found.") + mock.assert_called_once() diff --git a/salt/sensoroni/files/analyzers/localfile/requirements.txt b/salt/sensoroni/files/analyzers/localfile/requirements.txt new file mode 100644 index 000000000..a8980057f --- /dev/null +++ b/salt/sensoroni/files/analyzers/localfile/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.27.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/localfile/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl b/salt/sensoroni/files/analyzers/localfile/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl new file mode 100644 index 000000000..1dfb5c2d3 Binary files /dev/null and b/salt/sensoroni/files/analyzers/localfile/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/localfile/source-packages/certifi-2021.10.8-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/localfile/source-packages/certifi-2021.10.8-py2.py3-none-any.whl new file mode 100644 index 000000000..fbcb86b5f Binary files /dev/null and b/salt/sensoroni/files/analyzers/localfile/source-packages/certifi-2021.10.8-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/localfile/source-packages/charset_normalizer-2.0.12-py3-none-any.whl b/salt/sensoroni/files/analyzers/localfile/source-packages/charset_normalizer-2.0.12-py3-none-any.whl new file mode 100644 index 000000000..17a2dfbeb Binary files /dev/null and b/salt/sensoroni/files/analyzers/localfile/source-packages/charset_normalizer-2.0.12-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/localfile/source-packages/idna-3.3-py3-none-any.whl b/salt/sensoroni/files/analyzers/localfile/source-packages/idna-3.3-py3-none-any.whl new file mode 100644 index 000000000..060541bc9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/localfile/source-packages/idna-3.3-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/localfile/source-packages/requests-2.27.1-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/localfile/source-packages/requests-2.27.1-py2.py3-none-any.whl new file mode 100644 index 000000000..807fc6110 Binary files /dev/null and b/salt/sensoroni/files/analyzers/localfile/source-packages/requests-2.27.1-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/localfile/source-packages/urllib3-1.26.9-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/localfile/source-packages/urllib3-1.26.9-py2.py3-none-any.whl new file mode 100644 index 000000000..5019453dd Binary files /dev/null and b/salt/sensoroni/files/analyzers/localfile/source-packages/urllib3-1.26.9-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/otx/__init__.py b/salt/sensoroni/files/analyzers/otx/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/otx/otx.json b/salt/sensoroni/files/analyzers/otx/otx.json new file mode 100644 index 000000000..f1cdea4c3 --- /dev/null +++ b/salt/sensoroni/files/analyzers/otx/otx.json @@ -0,0 +1,7 @@ +{ + "name": "Alienvault OTX", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries Alienvault OTX for a domain, hash, IP, or URL, then returns a report for it.", + "supportedTypes" : ["domain", "hash", "ip", "url"] +} diff --git a/salt/sensoroni/files/analyzers/otx/otx.py b/salt/sensoroni/files/analyzers/otx/otx.py new file mode 100755 index 000000000..2d4e8e592 --- /dev/null +++ b/salt/sensoroni/files/analyzers/otx/otx.py @@ -0,0 +1,88 @@ +import json +import requests +import helpers +import sys +import os +import argparse + + +def buildReq(conf, artifact_type, artifact_value): + headers = {"X-OTX-API-KEY": conf["api_key"]} + base_url = conf['base_url'] + if artifact_type == "ip": + uri = "indicators/IPv4/" + elif artifact_type == "url": + uri = "indicators/url/" + elif artifact_type == "domain": + uri = "indicators/domain/" + elif artifact_type == "hash": + uri = "indicators/file/" + section = "/general" + url = base_url + uri + artifact_value + section + return url, headers + + +def checkConfigRequirements(conf): + if "api_key" not in conf or len(conf['api_key']) == 0: + sys.exit(126) + else: + return True + + +def sendReq(url, headers): + response = requests.request('GET', url, headers=headers) + return response.json() + + +def prepareResults(response): + if len(response) != 0: + raw = response + if 'reputation' in raw: + reputation = raw["reputation"] + if reputation == 0: + status = "ok" + summaryinfo = "harmless" + elif reputation > 0 and reputation < 50: + status = "ok" + summaryinfo = "Likely Harmless" + elif reputation >= 50 and reputation < 75: + status = "caution" + summaryinfo = "suspicious" + elif reputation >= 75 and reputation <= 100: + status = "threat" + summaryinfo = "malicious" + else: + status = "info" + summaryinfo = "Analysis complete." + else: + raw = {} + status = "caution" + summaryinfo = "internal_failure" + results = {'response': raw, 'status': status, 'summary': summaryinfo} + return results + + +def analyze(conf, input): + checkConfigRequirements(conf) + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + request = buildReq(conf, data["artifactType"], data["value"]) + response = sendReq(request[0], request[1]) + return prepareResults(response) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser(description='Search Alienvault OTX for a given artifact') + parser.add_argument('artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/otx.yaml", help='optional config file to use instead of the default config file') + + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/otx/otx.yaml b/salt/sensoroni/files/analyzers/otx/otx.yaml new file mode 100644 index 000000000..5b7dfa6a8 --- /dev/null +++ b/salt/sensoroni/files/analyzers/otx/otx.yaml @@ -0,0 +1,2 @@ +base_url: https://otx.alienvault.com/api/v1/ +api_key: "{{ salt['pillar.get']('sensoroni:analyzers:otx:api_key', '') }}" diff --git a/salt/sensoroni/files/analyzers/otx/otx_test.py b/salt/sensoroni/files/analyzers/otx/otx_test.py new file mode 100644 index 000000000..6f5764ca4 --- /dev/null +++ b/salt/sensoroni/files/analyzers/otx/otx_test.py @@ -0,0 +1,250 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +from otx import otx +import unittest + + +class TestOtxMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + otx.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('otx.otx.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + otx.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def checkConfigRequirements(self): + conf = {"not_a_key": "abcd12345"} + with self.assertRaises(SystemExit) as cm: + otx.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_buildReq_domain(self): + conf = {'base_url': 'https://myurl/', 'api_key': 'abcd12345'} + artifact_type = "domain" + artifact_value = "abc.com" + result = otx.buildReq(conf, artifact_type, artifact_value) + self.assertEqual("https://myurl/indicators/domain/abc.com/general", result[0]) + self.assertEqual({'X-OTX-API-KEY': 'abcd12345'}, result[1]) + + def test_buildReq_hash(self): + conf = {'base_url': 'https://myurl/', 'api_key': 'abcd12345'} + artifact_type = "hash" + artifact_value = "abcd1234" + result = otx.buildReq(conf, artifact_type, artifact_value) + self.assertEqual("https://myurl/indicators/file/abcd1234/general", result[0]) + self.assertEqual({'X-OTX-API-KEY': 'abcd12345'}, result[1]) + + def test_buildReq_ip(self): + conf = {'base_url': 'https://myurl/', 'api_key': 'abcd12345'} + artifact_type = "ip" + artifact_value = "192.168.1.1" + result = otx.buildReq(conf, artifact_type, artifact_value) + self.assertEqual("https://myurl/indicators/IPv4/192.168.1.1/general", result[0]) + self.assertEqual({'X-OTX-API-KEY': 'abcd12345'}, result[1]) + + def test_buildReq_url(self): + conf = {'base_url': 'https://myurl/', 'api_key': 'abcd12345'} + artifact_type = "url" + artifact_value = "https://abc.com" + result = otx.buildReq(conf, artifact_type, artifact_value) + self.assertEqual("https://myurl/indicators/url/https://abc.com/general", result[0]) + self.assertEqual({'X-OTX-API-KEY': 'abcd12345'}, result[1]) + + def test_sendReq(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + url = "https://myurl=" + response = otx.sendReq(url, headers={"x-apikey": "xyz"}) + mock.assert_called_once_with("GET", "https://myurl=", headers={"x-apikey": "xyz"}) + self.assertIsNotNone(response) + + def test_prepareResults_harmless(self): + raw = { + "whois": "http://whois.domaintools.com/192.168.1.1", + "reputation": 0, + "indicator": "192.168.1.1", + "type": "IPv4", + "pulse_info": { + "count": 0, + "pulses": [], + "related": { + "alienvault": { + "adversary": [], + "malware_families": [] + } + } + }, + "false_positive": [], + "sections": [ + "general" + ] + } + results = otx.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "harmless") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_likely_harmless(self): + raw = { + "whois": "http://whois.domaintools.com/192.168.1.1", + "reputation": 49, + "indicator": "192.168.1.1", + "type": "IPv4", + "pulse_info": { + "count": 0, + "pulses": [], + "related": { + "alienvault": { + "adversary": [], + "malware_families": [] + } + } + }, + "false_positive": [], + "sections": [ + "general" + ] + } + results = otx.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "Likely Harmless") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_suspicious(self): + raw = { + "whois": "http://whois.domaintools.com/192.168.1.1", + "reputation": 50, + "indicator": "192.168.1.1", + "type": "IPv4", + "pulse_info": { + "count": 0, + "pulses": [], + "related": { + "alienvault": { + "adversary": [], + "malware_families": [] + } + } + }, + "false_positive": [], + "sections": [ + "general" + ] + } + results = otx.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "suspicious") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_threat(self): + raw = { + "whois": "http://whois.domaintools.com/192.168.1.1", + "reputation": 75, + "indicator": "192.168.1.1", + "type": "IPv4", + "pulse_info": { + "count": 0, + "pulses": [], + "related": { + "alienvault": { + "adversary": [], + "malware_families": [] + } + } + }, + "false_positive": [], + "sections": [ + "general" + ] + } + results = otx.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "malicious") + self.assertEqual(results["status"], "threat") + + def test_prepareResults_undetermined(self): + raw = { + "alexa": "", + "base_indicator": {}, + "domain": "Unavailable", + "false_positive": [], + "hostname": "Unavailable", + "indicator": "http://192.168.1.1", + "pulse_info": { + "count": 0, + "pulses": [], + "references": [], + "related": { + "alienvault": { + "adversary": [], + "industries": [], + "malware_families": [], + "unique_indicators": 0 + }, + "other": { + "adversary": [], + "industries": [], + "malware_families": [], + "unique_indicators": 0 + } + } + }, + "sections": [ + "general" + ], + "type": "url", + "type_title": "URL", + "validation": [] + } + results = otx.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "Analysis complete.") + self.assertEqual(results["status"], "info") + + def test_prepareResults_error(self): + raw = {} + results = otx.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_analyze(self): + output = { + "whois": "http://whois.domaintools.com/192.168.1.1", + "reputation": 0, + "indicator": "192.168.1.1", + "type": "IPv4", + "pulse_info": { + "count": 0, + "pulses": [], + "related": { + "alienvault": { + "adversary": [], + "malware_families": [] + } + } + }, + "false_positive": [], + "sections": [ + "general" + ] + } + + artifactInput = '{"value":"192.168.1.1","artifactType":"ip"}' + conf = {"base_url": "https://myurl/", "api_key": "xyz"} + with patch('otx.otx.sendReq', new=MagicMock(return_value=output)) as mock: + results = otx.analyze(conf, artifactInput) + self.assertEqual(results["summary"], "harmless") + mock.assert_called_once() diff --git a/salt/sensoroni/files/analyzers/otx/requirements.txt b/salt/sensoroni/files/analyzers/otx/requirements.txt new file mode 100644 index 000000000..a8980057f --- /dev/null +++ b/salt/sensoroni/files/analyzers/otx/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.27.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/otx/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl b/salt/sensoroni/files/analyzers/otx/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl new file mode 100644 index 000000000..1dfb5c2d3 Binary files /dev/null and b/salt/sensoroni/files/analyzers/otx/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/otx/source-packages/certifi-2021.10.8-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/otx/source-packages/certifi-2021.10.8-py2.py3-none-any.whl new file mode 100644 index 000000000..fbcb86b5f Binary files /dev/null and b/salt/sensoroni/files/analyzers/otx/source-packages/certifi-2021.10.8-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/otx/source-packages/charset_normalizer-2.0.12-py3-none-any.whl b/salt/sensoroni/files/analyzers/otx/source-packages/charset_normalizer-2.0.12-py3-none-any.whl new file mode 100644 index 000000000..17a2dfbeb Binary files /dev/null and b/salt/sensoroni/files/analyzers/otx/source-packages/charset_normalizer-2.0.12-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/otx/source-packages/idna-3.3-py3-none-any.whl b/salt/sensoroni/files/analyzers/otx/source-packages/idna-3.3-py3-none-any.whl new file mode 100644 index 000000000..060541bc9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/otx/source-packages/idna-3.3-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/otx/source-packages/requests-2.27.1-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/otx/source-packages/requests-2.27.1-py2.py3-none-any.whl new file mode 100644 index 000000000..807fc6110 Binary files /dev/null and b/salt/sensoroni/files/analyzers/otx/source-packages/requests-2.27.1-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/otx/source-packages/urllib3-1.26.9-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/otx/source-packages/urllib3-1.26.9-py2.py3-none-any.whl new file mode 100644 index 000000000..5019453dd Binary files /dev/null and b/salt/sensoroni/files/analyzers/otx/source-packages/urllib3-1.26.9-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/pulsedive/__init__.py b/salt/sensoroni/files/analyzers/pulsedive/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/pulsedive/pulsedive.json b/salt/sensoroni/files/analyzers/pulsedive/pulsedive.json new file mode 100644 index 000000000..87d4889e7 --- /dev/null +++ b/salt/sensoroni/files/analyzers/pulsedive/pulsedive.json @@ -0,0 +1,7 @@ +{ + "name": "Pulsedive", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries Pulsedive for context around an observable", + "supportedTypes": ["domain", "ip", "hash", "uri_path", "url", "user-agent"] +} diff --git a/salt/sensoroni/files/analyzers/pulsedive/pulsedive.py b/salt/sensoroni/files/analyzers/pulsedive/pulsedive.py new file mode 100644 index 000000000..719d760e6 --- /dev/null +++ b/salt/sensoroni/files/analyzers/pulsedive/pulsedive.py @@ -0,0 +1,107 @@ +import json +import requests +import argparse +import helpers +import os +import sys + + +def checkConfigRequirements(conf): + if "api_key" not in conf or len(conf['api_key']) == 0: + sys.exit(126) + else: + return True + + +def buildReq(conf, artifactType, artifactValue): + indicatorTypes = ["domain", "hash", "ip" "url"] + if artifactType in indicatorTypes: + url = conf['base_url'] + '/info.php' + params = {"key": conf["api_key"], "indicator": artifactValue} + else: + if artifactType == "uri_path": + query = "http.location=" + artifactValue + url = conf['base_url'] + '/explore.php' + elif artifactType == "user-agent": + query = "http.useragent_normaliser=" + artifactValue + url = conf['base_url'] + '/explore.php' + params = {"key": conf["api_key"], "q": query, "limit": 100} + + return url, params + + +def sendReq(url, params): + response = requests.request('GET', url, params=params) + return response.json() + + +def prepareResults(raw): + classified = [] + classification = { + "high": "malicious", + "medium": "suspicious", + "low": "harmless", + "none": "none", + "unknown": "unknown" + } + + if raw: + if 'results' in raw: + if raw['results'] == []: + classified.append("no_results") + else: + for r in raw['results']: + risk = r['risk'] + classified.append(classification.get(risk)) + else: + classified.append(classification.get(raw['risk'])) + + if classified.count('malicious') > 0: + summary = "malicious" + status = "threat" + elif classified.count('suspicious') > 0: + summary = "suspicious" + status = "caution" + elif classified.count('harmless') > 0: + summary = "harmless" + status = "ok" + elif classified.count('none') > 0: + summary = "harmless" + status = "ok" + elif classified.count('unknown') > 0: + summary = "" + status = "unknown" + elif classified.count('no_results') > 0: + summary = "no_results" + status = "ok" + else: + summary = "internal_failure" + status = "caution" + results = {'response': raw, 'summary': summary, 'status': status} + return results + + +def analyze(conf, input): + checkConfigRequirements(conf) + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + request = buildReq(conf, data["artifactType"], data["value"]) + response = sendReq(request[0], request[1]) + return prepareResults(response) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser(description='Search VirusTotal for a given artifact') + parser.add_argument('artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/pulsedive.yaml", help='optional config file to use instead of the default config file') + + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/pulsedive/pulsedive.yaml b/salt/sensoroni/files/analyzers/pulsedive/pulsedive.yaml new file mode 100644 index 000000000..c29f61ad8 --- /dev/null +++ b/salt/sensoroni/files/analyzers/pulsedive/pulsedive.yaml @@ -0,0 +1,2 @@ +base_url: https://pulsedive.com/api/ +api_key: "{{ salt['pillar.get']('sensoroni:analyzers:pulsedive:api_key', '') }}" diff --git a/salt/sensoroni/files/analyzers/pulsedive/pulsedive_test.py b/salt/sensoroni/files/analyzers/pulsedive/pulsedive_test.py new file mode 100644 index 000000000..47b60efdd --- /dev/null +++ b/salt/sensoroni/files/analyzers/pulsedive/pulsedive_test.py @@ -0,0 +1,121 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +from pulsedive import pulsedive +import unittest + + +class TestVirusTotalMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + pulsedive.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('pulsedive.pulsedive.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + pulsedive.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def test_checkConfigRequirements(self): + conf = {"not_a_key": "abcd12345"} + with self.assertRaises(SystemExit) as cm: + pulsedive.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_buildReq_domain(self): + conf = {"api_key": "xyz", "base_url": "https://myurl"} + artifactType = "domain" + artifactValue = "pulsedive.com" + result = pulsedive.buildReq(conf, artifactType, artifactValue) + self.assertEqual("https://myurl/info.php", result[0]) + self.assertEqual({"key": "xyz", "indicator": "pulsedive.com"}, result[1]) + + def test_buildReq_uri_path(self): + conf = {"api_key": "xyz", "base_url": "https://myurl"} + artifactType = "uri_path" + artifactValue = "/main.php" + result = pulsedive.buildReq(conf, artifactType, artifactValue) + self.assertEqual("https://myurl/explore.php", result[0]) + self.assertEqual({"key": "xyz", "q": "http.location=/main.php", "limit": 100}, result[1]) + + def test_sendReq(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + url = 'https://myurl/api/' + params = {"key": "abcd1234", "q": "http.location=/main.php", "limit": 100} + response = pulsedive.sendReq(url=url, params=params) + mock.assert_called_once_with("GET", "https://myurl/api/", params={"key": "abcd1234", "q": "http.location=/main.php", "limit": 100}) + self.assertIsNotNone(response) + + def test_prepareResults_risk_high(self): + raw = {"results": [{"risk": "high"}]} + results = pulsedive.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "malicious") + self.assertEqual(results["status"], "threat") + + def test_prepareResults_risk_med(self): + raw = {"results": [{"risk": "medium"}]} + results = pulsedive.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "suspicious") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_risk_low(self): + raw = {"results": [{"risk": "low"}]} + results = pulsedive.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "harmless") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_risk_none(self): + raw = {"results": [{"risk": "none"}]} + results = pulsedive.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "harmless") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_risk_unknown(self): + raw = {"results": [{"risk": "unknown"}]} + results = pulsedive.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "") + self.assertEqual(results["status"], "unknown") + + def test_prepareResults_no_results(self): + raw = {"results": []} + results = pulsedive.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "no_results") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_risk_none_indicator(self): + raw = {"iid": "1234", "risk": "none"} + results = pulsedive.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "harmless") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_error(self): + raw = {} + results = pulsedive.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_analyze(self): + output = {"results": [{"risk": "low"}]} + artifactInput = '{"value":"chrome","artifactType":"user-agent"}' + conf = {"api_key": "xyz", "base_url": "https://myurl"} + with patch('pulsedive.pulsedive.sendReq', new=MagicMock(return_value=output)) as mock: + results = pulsedive.analyze(conf, artifactInput) + self.assertEqual(results["summary"], "harmless") + mock.assert_called_once() diff --git a/salt/sensoroni/files/analyzers/pulsedive/requirements.txt b/salt/sensoroni/files/analyzers/pulsedive/requirements.txt new file mode 100644 index 000000000..a8980057f --- /dev/null +++ b/salt/sensoroni/files/analyzers/pulsedive/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.27.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/pulsedive/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl b/salt/sensoroni/files/analyzers/pulsedive/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl new file mode 100644 index 000000000..1dfb5c2d3 Binary files /dev/null and b/salt/sensoroni/files/analyzers/pulsedive/source-packages/PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/pulsedive/source-packages/certifi-2021.10.8-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/pulsedive/source-packages/certifi-2021.10.8-py2.py3-none-any.whl new file mode 100644 index 000000000..fbcb86b5f Binary files /dev/null and b/salt/sensoroni/files/analyzers/pulsedive/source-packages/certifi-2021.10.8-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/pulsedive/source-packages/charset_normalizer-2.0.12-py3-none-any.whl b/salt/sensoroni/files/analyzers/pulsedive/source-packages/charset_normalizer-2.0.12-py3-none-any.whl new file mode 100644 index 000000000..17a2dfbeb Binary files /dev/null and b/salt/sensoroni/files/analyzers/pulsedive/source-packages/charset_normalizer-2.0.12-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/pulsedive/source-packages/idna-3.3-py3-none-any.whl b/salt/sensoroni/files/analyzers/pulsedive/source-packages/idna-3.3-py3-none-any.whl new file mode 100644 index 000000000..060541bc9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/pulsedive/source-packages/idna-3.3-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/pulsedive/source-packages/requests-2.27.1-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/pulsedive/source-packages/requests-2.27.1-py2.py3-none-any.whl new file mode 100644 index 000000000..807fc6110 Binary files /dev/null and b/salt/sensoroni/files/analyzers/pulsedive/source-packages/requests-2.27.1-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/pulsedive/source-packages/urllib3-1.26.9-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/pulsedive/source-packages/urllib3-1.26.9-py2.py3-none-any.whl new file mode 100644 index 000000000..5019453dd Binary files /dev/null and b/salt/sensoroni/files/analyzers/pulsedive/source-packages/urllib3-1.26.9-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/pytest.ini b/salt/sensoroni/files/analyzers/pytest.ini new file mode 100644 index 000000000..e74b52dd1 --- /dev/null +++ b/salt/sensoroni/files/analyzers/pytest.ini @@ -0,0 +1,27 @@ +[flake8] +exclude = + .venv + */site-packages/, + */source-packages/, + */__pycache__ +show_source = true +max_complexity = 12 +max_line_length = 200 +statistics = true +doctests = true + +[pytest] +python_files = *_test.py +python_classes = Test +python_functions = test_* +norecursedirs = site-packages + +[report] +exclude_lines = + if __name__ == .__main__.: + +show_missing = True +omit = + *_test.py, + */site-packages/*, + */source-packages/* diff --git a/salt/sensoroni/files/analyzers/spamhaus/__init__.py b/salt/sensoroni/files/analyzers/spamhaus/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/spamhaus/requirements.txt b/salt/sensoroni/files/analyzers/spamhaus/requirements.txt new file mode 100644 index 000000000..9ec0d5539 --- /dev/null +++ b/salt/sensoroni/files/analyzers/spamhaus/requirements.txt @@ -0,0 +1,2 @@ +dnspython>=2.2.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/spamhaus/source-packages/PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl b/salt/sensoroni/files/analyzers/spamhaus/source-packages/PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl new file mode 100644 index 000000000..b390ce52f Binary files /dev/null and b/salt/sensoroni/files/analyzers/spamhaus/source-packages/PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/spamhaus/source-packages/dnspython-2.2.1-py3-none-any.whl b/salt/sensoroni/files/analyzers/spamhaus/source-packages/dnspython-2.2.1-py3-none-any.whl new file mode 100644 index 000000000..645d5bb5b Binary files /dev/null and b/salt/sensoroni/files/analyzers/spamhaus/source-packages/dnspython-2.2.1-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/spamhaus/spamhaus.json b/salt/sensoroni/files/analyzers/spamhaus/spamhaus.json new file mode 100644 index 000000000..abeced090 --- /dev/null +++ b/salt/sensoroni/files/analyzers/spamhaus/spamhaus.json @@ -0,0 +1,7 @@ +{ + "name": "Spamhaus", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries Spamhaus to see if an IP is considered malicious.", + "supportedTypes" : ["ip"] +} diff --git a/salt/sensoroni/files/analyzers/spamhaus/spamhaus.py b/salt/sensoroni/files/analyzers/spamhaus/spamhaus.py new file mode 100644 index 000000000..cf6bfa198 --- /dev/null +++ b/salt/sensoroni/files/analyzers/spamhaus/spamhaus.py @@ -0,0 +1,83 @@ +import argparse +import dns.resolver +import dns.reversename +import json +import os +import helpers + + +def resolve(config, meta, ip): + value = str(dns.reversename.from_address(ip)).replace("in-addr.arpa.", config["lookup_host"] + ".") + resolver = dns.resolver.Resolver() + if len(config["nameservers"]) > 0 and len(config["nameservers"][0]) > 0: + resolver.nameservers = config["nameservers"] + try: + responses = resolver.resolve(value) + except dns.resolver.NXDOMAIN: + responses = [] + + return responses + + +def prepareResults(responses): + resultMap = { + "127.0.0.2": {'severity': 200, 'summary': 'spam', 'status': 'caution'}, + "127.0.0.3": {'severity': 200, 'summary': 'spam', 'status': 'caution'}, + "127.0.0.4": {'severity': 300, 'summary': 'malicious', 'status': 'threat'}, + "127.0.0.5": {'severity': 300, 'summary': 'malicious', 'status': 'threat'}, + "127.0.0.6": {'severity': 300, 'summary': 'malicious', 'status': 'threat'}, + "127.0.0.7": {'severity': 300, 'summary': 'malicious', 'status': 'threat'}, + "127.0.0.10": {'severity': 100, 'summary': 'suspicious', 'status': 'caution'}, + "127.0.0.11": {'severity': 100, 'summary': 'suspicious', 'status': 'caution'}, + + "127.0.1.2": {'severity': 200, 'summary': 'spam', 'status': 'caution'}, + "127.0.1.4": {'severity': 250, 'summary': 'phishing', 'status': 'threat'}, + "127.0.1.5": {'severity': 300, 'summary': 'malicious', 'status': 'threat'}, + "127.0.1.6": {'severity': 300, 'summary': 'malicious', 'status': 'threat'}, + "127.0.1.102": {'severity': 200, 'summary': 'spam', 'status': 'caution'}, + "127.0.1.103": {'severity': 200, 'summary': 'spam', 'status': 'caution'}, + "127.0.1.104": {'severity': 300, 'summary': 'malicious', 'status': 'threat'}, + "127.0.1.105": {'severity': 300, 'summary': 'malicious', 'status': 'threat'}, + "127.0.1.106": {'severity': 300, 'summary': 'malicious', 'status': 'threat'}, + "127.0.1.107": {'severity': 100, 'summary': 'suspicious', 'status': 'caution'}, + + "127.255.255.252": {'severity': 1, 'summary': 'internal_failure', 'status': 'caution'}, + "127.255.255.254": {'severity': 2, 'summary': 'internal_failure', 'status': 'caution'}, + "127.255.255.255": {'severity': 3, 'summary': 'excessive_usage', 'status': 'caution'}, + } + + raw = [] + currentResult = {'severity': 0, 'summary': 'harmless', 'status': 'ok'} + for response in responses: + raw.append(response.to_text()) + if response.address in resultMap: + result = resultMap[response.address] + if currentResult is None or currentResult['severity'] < result['severity']: + currentResult = result + + currentResult['response'] = raw + return currentResult + + +def analyze(config, input): + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + response = resolve(config, meta, data["value"]) + return prepareResults(response) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser(description='Search Spamhaus for an IP') + parser.add_argument('artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/spamhaus.yaml", help='optional config file to use instead of the default config file') + + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/spamhaus/spamhaus.yaml b/salt/sensoroni/files/analyzers/spamhaus/spamhaus.yaml new file mode 100644 index 000000000..271cf27a0 --- /dev/null +++ b/salt/sensoroni/files/analyzers/spamhaus/spamhaus.yaml @@ -0,0 +1,2 @@ +lookup_host: zen.spamhaus.org +nameservers: ["{{ salt['pillar.get']('sensoroni:analyzers:spamhaus:nameserver', '') }}"] \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/spamhaus/spamhaus_test.py b/salt/sensoroni/files/analyzers/spamhaus/spamhaus_test.py new file mode 100644 index 000000000..b62024444 --- /dev/null +++ b/salt/sensoroni/files/analyzers/spamhaus/spamhaus_test.py @@ -0,0 +1,126 @@ +from io import StringIO +import dns +import sys +from unittest.mock import patch, MagicMock +from spamhaus import spamhaus +import unittest + + +class FakeAnswer: + address = '' + + def __init__(self, ip='127.0.0.1'): + self.address = ip + + def to_text(self): + return str(self.address) + + +class TestSpamhausMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + spamhaus.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('spamhaus.spamhaus.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + spamhaus.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def test_resolve(self): + with patch('dns.resolver.Resolver.resolve', new=MagicMock(return_value=MagicMock())) as mock: + meta = {} + conf = {"nameservers": ["1.2.3.4"], "lookup_host": "some.host"} + response = spamhaus.resolve(config=conf, meta=meta, ip="127.0.0.1") + mock.assert_called_once_with("1.0.0.127.some.host.") + self.assertIsNotNone(response) + + def test_resolve_not_found(self): + mock = MagicMock() + mock.side_effect = dns.resolver.NXDOMAIN + with patch('dns.resolver.Resolver.resolve', new=mock): + meta = {} + conf = {"nameservers": ["1.2.3.4"], "lookup_host": "some.host"} + response = spamhaus.resolve(config=conf, meta=meta, ip="127.0.0.1") + mock.assert_called_once_with("1.0.0.127.some.host.") + self.assertIsNotNone(response) + + def test_prepareResults_ok_multiple(self): + raw = [FakeAnswer("127.0.0.0"), FakeAnswer("127.0.0.1")] + results = spamhaus.prepareResults(raw) + self.assertEqual(results["response"], ['127.0.0.0', '127.0.0.1']) + self.assertEqual(results["summary"], "harmless") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_failure2(self): + raw = [FakeAnswer("127.255.255.252")] + results = spamhaus.prepareResults(raw) + self.assertEqual(results["response"], ['127.255.255.252']) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_failure4(self): + raw = [FakeAnswer("127.255.255.254")] + results = spamhaus.prepareResults(raw) + self.assertEqual(results["response"], ['127.255.255.254']) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_excessive(self): + raw = [FakeAnswer("127.255.255.255")] + results = spamhaus.prepareResults(raw) + self.assertEqual(results["response"], ['127.255.255.255']) + self.assertEqual(results["summary"], "excessive_usage") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_sus_multiple(self): + raw = [FakeAnswer("127.0.0.10"), FakeAnswer("127.0.0.11")] + results = spamhaus.prepareResults(raw) + self.assertEqual(results["response"], ['127.0.0.10', '127.0.0.11']) + self.assertEqual(results["summary"], "suspicious") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_spam_multiple(self): + raw = [FakeAnswer("127.0.0.2")] + results = spamhaus.prepareResults(raw) + self.assertEqual(results["response"], ['127.0.0.2']) + self.assertEqual(results["summary"], "spam") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_threat_multiple(self): + raw = [FakeAnswer("127.0.0.1"), FakeAnswer("127.0.0.4")] + results = spamhaus.prepareResults(raw) + self.assertEqual(results["response"], ['127.0.0.1', '127.0.0.4']) + self.assertEqual(results["summary"], "malicious") + self.assertEqual(results["status"], "threat") + + def test_prepareResults_threat(self): + raw = [FakeAnswer("127.0.0.4")] + results = spamhaus.prepareResults(raw) + self.assertEqual(results["response"], ['127.0.0.4']) + self.assertEqual(results["summary"], "malicious") + self.assertEqual(results["status"], "threat") + + def test_prepareResults_error(self): + raw = [] + results = spamhaus.prepareResults(raw) + self.assertEqual(results["response"], []) + self.assertEqual(results["summary"], "harmless") + self.assertEqual(results["status"], "ok") + + def test_analyze(self): + output = [FakeAnswer()] + artifactInput = '{"value":"1.2.3.4","artifactType":"ip"}' + with patch('spamhaus.spamhaus.resolve', new=MagicMock(return_value=output)) as mock: + results = spamhaus.analyze({}, artifactInput) + self.assertEqual(results["summary"], "harmless") + mock.assert_called_once() diff --git a/salt/sensoroni/files/analyzers/urlhaus/__init__.py b/salt/sensoroni/files/analyzers/urlhaus/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/urlhaus/requirements.txt b/salt/sensoroni/files/analyzers/urlhaus/requirements.txt new file mode 100644 index 000000000..a8980057f --- /dev/null +++ b/salt/sensoroni/files/analyzers/urlhaus/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.27.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/urlhaus/source-packages/certifi-2021.10.8-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/urlhaus/source-packages/certifi-2021.10.8-py2.py3-none-any.whl new file mode 100644 index 000000000..fbcb86b5f Binary files /dev/null and b/salt/sensoroni/files/analyzers/urlhaus/source-packages/certifi-2021.10.8-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/urlhaus/source-packages/charset_normalizer-2.0.12-py3-none-any.whl b/salt/sensoroni/files/analyzers/urlhaus/source-packages/charset_normalizer-2.0.12-py3-none-any.whl new file mode 100644 index 000000000..17a2dfbeb Binary files /dev/null and b/salt/sensoroni/files/analyzers/urlhaus/source-packages/charset_normalizer-2.0.12-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/urlhaus/source-packages/idna-3.3-py3-none-any.whl b/salt/sensoroni/files/analyzers/urlhaus/source-packages/idna-3.3-py3-none-any.whl new file mode 100644 index 000000000..060541bc9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/urlhaus/source-packages/idna-3.3-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/urlhaus/source-packages/requests-2.27.1-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/urlhaus/source-packages/requests-2.27.1-py2.py3-none-any.whl new file mode 100644 index 000000000..807fc6110 Binary files /dev/null and b/salt/sensoroni/files/analyzers/urlhaus/source-packages/requests-2.27.1-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/urlhaus/source-packages/urllib3-1.26.9-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/urlhaus/source-packages/urllib3-1.26.9-py2.py3-none-any.whl new file mode 100644 index 000000000..5019453dd Binary files /dev/null and b/salt/sensoroni/files/analyzers/urlhaus/source-packages/urllib3-1.26.9-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/urlhaus/urlhaus.json b/salt/sensoroni/files/analyzers/urlhaus/urlhaus.json new file mode 100644 index 000000000..d9cf1dce0 --- /dev/null +++ b/salt/sensoroni/files/analyzers/urlhaus/urlhaus.json @@ -0,0 +1,8 @@ +{ + "name": "Urlhaus", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries URLHaus to see if a URL is considered malicious.", + "supportedTypes" : ["url"], + "baseUrl": "https://urlhaus-api.abuse.ch/v1/url/" +} diff --git a/salt/sensoroni/files/analyzers/urlhaus/urlhaus.py b/salt/sensoroni/files/analyzers/urlhaus/urlhaus.py new file mode 100644 index 000000000..3c326d3b0 --- /dev/null +++ b/salt/sensoroni/files/analyzers/urlhaus/urlhaus.py @@ -0,0 +1,52 @@ +import json +import requests +import sys +import helpers + + +def buildReq(artifact_value): + return {"url": artifact_value} + + +def sendReq(meta, payload): + url = meta['baseUrl'] + response = requests.request('POST', url, data=payload) + return response.json() + + +def prepareResults(raw): + if 'threat' in raw: + summary = raw['threat'] + status = "threat" + elif 'query_status' in raw: + summary = raw['query_status'] + if summary == 'no_results': + status = "ok" + else: + status = "caution" + else: + summary = "internal_failure" + status = "caution" + results = {'response': raw, 'summary': summary, 'status': status} + return results + + +def analyze(input): + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + payload = buildReq(data["value"]) + response = sendReq(meta, payload) + return prepareResults(response) + + +def main(): + if len(sys.argv) == 2: + results = analyze(sys.argv[1]) + print(json.dumps(results)) + else: + print("ERROR: Missing input JSON") + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/urlhaus/urlhaus_test.py b/salt/sensoroni/files/analyzers/urlhaus/urlhaus_test.py new file mode 100644 index 000000000..ae4584ee5 --- /dev/null +++ b/salt/sensoroni/files/analyzers/urlhaus/urlhaus_test.py @@ -0,0 +1,72 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +from urlhaus import urlhaus +import unittest + + +class TestUrlhausMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + sys.argv = ["cmd"] + urlhaus.main() + self.assertEqual(mock_stdout.getvalue(), "ERROR: Missing input JSON\n") + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('urlhaus.urlhaus.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + urlhaus.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def test_buildReq(self): + result = urlhaus.buildReq("test") + self.assertEqual("test", result["url"]) + + def test_sendReq(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + meta = {"baseUrl": "myurl"} + response = urlhaus.sendReq(meta, "mypayload") + mock.assert_called_once_with("POST", "myurl", data="mypayload") + self.assertIsNotNone(response) + + def test_prepareResults_none(self): + raw = {"query_status": "no_results"} + results = urlhaus.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "no_results") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_invalidUrl(self): + raw = {"query_status": "invalid_url"} + results = urlhaus.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "invalid_url") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_threat(self): + raw = {"query_status": "invalid_url"} # This is overrided in this scenario + raw["threat"] = "bad_actor" + results = urlhaus.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "bad_actor") + self.assertEqual(results["status"], "threat") + + def test_prepareResults_error(self): + raw = {} + results = urlhaus.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_analyze(self): + output = {"threat": "malware_download"} + artifactInput = '{"value":"foo","artifactType":"url"}' + with patch('urlhaus.urlhaus.sendReq', new=MagicMock(return_value=output)) as mock: + results = urlhaus.analyze(artifactInput) + self.assertEqual(results["summary"], "malware_download") + mock.assert_called_once() diff --git a/salt/sensoroni/files/analyzers/urlscan/__init__.py b/salt/sensoroni/files/analyzers/urlscan/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/urlscan/requirements.txt b/salt/sensoroni/files/analyzers/urlscan/requirements.txt new file mode 100644 index 000000000..a8980057f --- /dev/null +++ b/salt/sensoroni/files/analyzers/urlscan/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.27.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/urlscan/urlscan.json b/salt/sensoroni/files/analyzers/urlscan/urlscan.json new file mode 100644 index 000000000..75e85bdbf --- /dev/null +++ b/salt/sensoroni/files/analyzers/urlscan/urlscan.json @@ -0,0 +1,7 @@ +{ + "name": "Urlscan", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer submits a URL to Urlscan for context around an observable.", + "supportedTypes" : ["url"] +} diff --git a/salt/sensoroni/files/analyzers/urlscan/urlscan.py b/salt/sensoroni/files/analyzers/urlscan/urlscan.py new file mode 100755 index 000000000..94c3ec8db --- /dev/null +++ b/salt/sensoroni/files/analyzers/urlscan/urlscan.py @@ -0,0 +1,91 @@ +import json +import requests +import helpers +import sys +import os +import argparse +import time + + +def checkConfigRequirements(conf): + if "enabled" in conf: + if "api_key" not in conf or len(conf['api_key']) == 0: + sys.exit(126) + else: + return True + else: + sys.exit(126) + + +def buildReq(conf, artifact_type, artifact_value): + headers = {"API-Key": conf["api_key"]} + url = conf['base_url'] + 'scan/' + visibility = conf['visibility'] + data = {"url": artifact_value, "visibility": visibility} + return url, headers, data + + +def getReport(conf, report_url): + report = requests.request('GET', report_url) + timeout = conf.get('timeout', 300) + counter = 0 + while report.status_code == 404: + time.sleep(2) + counter += 2 + if counter >= timeout: + break + report = requests.request('GET', report_url) + return report + + +def sendReq(url, headers, data): + submission = requests.request('POST', url=url, headers=headers, data=data).json() + report_url = submission['api'] + return report_url + + +def prepareResults(raw): + if raw and "verdicts" in raw: + if raw["verdicts"]["overall"]["malicious"] is True: + status = "threat" + summary = "malicious" + elif raw["verdicts"]["overall"]["score"] > 0: + status = "caution" + summary = "suspicious" + else: + status = "info" + summary = "Scan complete." + else: + status = "caution" + summary = "internal_failure" + + results = {'response': raw, 'status': status, 'summary': summary} + return results + + +def analyze(conf, input): + checkConfigRequirements(conf) + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + request = buildReq(conf, data["artifactType"], data["value"]) + report_url = sendReq(request[0], request[1], request[2]) + time.sleep(10) + report = getReport(conf, report_url) + return prepareResults(report.json()) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser(description='Search Alienvault OTX for a given artifact') + parser.add_argument('artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/urlscan.yaml", help='optional config file to use instead of the default config file') + + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/urlscan/urlscan.yaml b/salt/sensoroni/files/analyzers/urlscan/urlscan.yaml new file mode 100644 index 000000000..986a61359 --- /dev/null +++ b/salt/sensoroni/files/analyzers/urlscan/urlscan.yaml @@ -0,0 +1,5 @@ +base_url: https://urlscan.io/api/v1/ +api_key: "{{ salt['pillar.get']('sensoroni:analyzers:urlscan:api_key', '') }}" +enabled: "{{ salt['pillar.get']('sensoroni:analyzers:urlscan:enabled', 'False') }}" +visibility: "{{ salt['pillar.get']('sensoroni:analyzers:urlscan:visibility', 'public') }}" +timeout: "{{ salt['pillar.get']('sensoroni:analyzers:urlscan:visibility', '180') }}" diff --git a/salt/sensoroni/files/analyzers/urlscan/urlscan_test.py b/salt/sensoroni/files/analyzers/urlscan/urlscan_test.py new file mode 100644 index 000000000..487e6dbe3 --- /dev/null +++ b/salt/sensoroni/files/analyzers/urlscan/urlscan_test.py @@ -0,0 +1,121 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock, PropertyMock, call +from urlscan import urlscan +import unittest + + +class TestUrlScanMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + urlscan.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('urlscan.urlscan.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + urlscan.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def test_checkConfigRequirements_notEnabled(self): + conf = {"not_a_key": "abcd12345"} + with self.assertRaises(SystemExit) as cm: + urlscan.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_checkConfigRequirements_noApikey(self): + conf = {"enabled": True, "not_a_key": "abcd12345"} + with self.assertRaises(SystemExit) as cm: + urlscan.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_checkConfigRequirements_Exist(self): + conf = {"enabled": True, "api_key": "abcd12345"} + config_exists = urlscan.checkConfigRequirements(conf) + self.assertTrue(config_exists) + + def test_buildReq(self): + conf = {'base_url': 'https://myurl/api/v1/', 'api_key': 'abcd12345', 'visibility': 'public'} + artifact_type = "url" + artifact_value = "https://abc.com" + result = urlscan.buildReq(conf, artifact_type, artifact_value) + self.assertEqual("https://myurl/api/v1/scan/", result[0]) + self.assertEqual({'API-Key': 'abcd12345'}, result[1]) + + def test_sendReq(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + headers = {"API-Key": "abcd1234"} + data = {"url": "https://urlscan.io", "visibility": "public"} + response = urlscan.sendReq("https://myurl", headers=headers, data=data) + mock.assert_called_once_with("POST", url="https://myurl", headers={"API-Key": "abcd1234"}, data={"url": "https://urlscan.io", "visibility": "public"}) + self.assertIsNotNone(response) + + def test_getReport_noRetry(self): + output_report = MagicMock() + type(output_report).status_code = PropertyMock(return_value=404) + output_report_body = {"requests": "body"} + output_report.json.return_value = output_report_body + with patch('requests.request', new=MagicMock(return_value=output_report)) as mock: + result = urlscan.getReport({'timeout': 0}, "https://abc.com/report") + self.assertEqual(404, result.status_code) + mock.assert_called_once() + + def test_getReport_withRetry(self): + output_report = MagicMock() + type(output_report).status_code = PropertyMock(return_value=404) + output_report_body = {"requests": "body"} + output_report.json.return_value = output_report_body + with patch('requests.request', new=MagicMock(return_value=output_report)) as mock: + result = urlscan.getReport({'timeout': 3}, "https://abc.com/report") + self.assertEqual(404, result.status_code) + mock.assert_has_calls([call('GET', 'https://abc.com/report'), call('GET', 'https://abc.com/report')]) + + def test_prepareResults_sus(self): + raw = {"requests": [{"request": {"requestId": "1"}}], "verdicts": {"overall": {"score": 50, "malicious": False, "hasVerdicts": False}}} + results = urlscan.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "suspicious") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_mal(self): + raw = {"requests": [{"request": {"requestId": "2"}}], "verdicts": {"overall": {"score": 100, "malicious": True, "hasVerdicts": False}}} + results = urlscan.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "malicious") + self.assertEqual(results["status"], "threat") + + def test_prepareResults_info(self): + raw = {"requests": [{"request": {"requestId": "3"}}], "verdicts": {"overall": {"score": 0, "malicious": False, "hasVerdicts": False}}} + results = urlscan.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "Scan complete.") + self.assertEqual(results["status"], "info") + + def test_prepareResults_error(self): + raw = {} + results = urlscan.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_analyze(self): + output_req = "https://myurl/report" + output_report = MagicMock() + output_report_body = {"requests": [{"request": {"requestId": "3"}}], "verdicts": {"overall": {"score": 0, "malicious": False, "hasVerdicts": False}}} + output_report.json.return_value = output_report_body + artifactInput = '{"value":"https://abc.com","artifactType":"url"}' + conf = {'enabled': True, 'base_url': 'https://myurl/api/v1/', 'api_key': 'abcd12345', 'visibility': 'public'} + with patch('urlscan.urlscan.sendReq', new=MagicMock(return_value=output_req)) as mock_req: + with patch('urlscan.urlscan.getReport', new=MagicMock(return_value=output_report)) as mock_report: + results = urlscan.analyze(conf, artifactInput) + self.assertEqual(results["summary"], "Scan complete.") + mock_req.assert_called_once() + mock_report.assert_called_once() diff --git a/salt/sensoroni/files/analyzers/virustotal/__init__.py b/salt/sensoroni/files/analyzers/virustotal/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/salt/sensoroni/files/analyzers/virustotal/requirements.txt b/salt/sensoroni/files/analyzers/virustotal/requirements.txt new file mode 100644 index 000000000..a8980057f --- /dev/null +++ b/salt/sensoroni/files/analyzers/virustotal/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.27.1 +pyyaml>=6.0 diff --git a/salt/sensoroni/files/analyzers/virustotal/source-packages/PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl b/salt/sensoroni/files/analyzers/virustotal/source-packages/PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl new file mode 100644 index 000000000..b390ce52f Binary files /dev/null and b/salt/sensoroni/files/analyzers/virustotal/source-packages/PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl differ diff --git a/salt/sensoroni/files/analyzers/virustotal/source-packages/certifi-2021.10.8-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/virustotal/source-packages/certifi-2021.10.8-py2.py3-none-any.whl new file mode 100644 index 000000000..fbcb86b5f Binary files /dev/null and b/salt/sensoroni/files/analyzers/virustotal/source-packages/certifi-2021.10.8-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/virustotal/source-packages/charset_normalizer-2.0.12-py3-none-any.whl b/salt/sensoroni/files/analyzers/virustotal/source-packages/charset_normalizer-2.0.12-py3-none-any.whl new file mode 100644 index 000000000..17a2dfbeb Binary files /dev/null and b/salt/sensoroni/files/analyzers/virustotal/source-packages/charset_normalizer-2.0.12-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/virustotal/source-packages/idna-3.3-py3-none-any.whl b/salt/sensoroni/files/analyzers/virustotal/source-packages/idna-3.3-py3-none-any.whl new file mode 100644 index 000000000..060541bc9 Binary files /dev/null and b/salt/sensoroni/files/analyzers/virustotal/source-packages/idna-3.3-py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/virustotal/source-packages/requests-2.27.1-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/virustotal/source-packages/requests-2.27.1-py2.py3-none-any.whl new file mode 100644 index 000000000..807fc6110 Binary files /dev/null and b/salt/sensoroni/files/analyzers/virustotal/source-packages/requests-2.27.1-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/virustotal/source-packages/urllib3-1.26.9-py2.py3-none-any.whl b/salt/sensoroni/files/analyzers/virustotal/source-packages/urllib3-1.26.9-py2.py3-none-any.whl new file mode 100644 index 000000000..5019453dd Binary files /dev/null and b/salt/sensoroni/files/analyzers/virustotal/source-packages/urllib3-1.26.9-py2.py3-none-any.whl differ diff --git a/salt/sensoroni/files/analyzers/virustotal/virustotal.json b/salt/sensoroni/files/analyzers/virustotal/virustotal.json new file mode 100644 index 000000000..e26d67b4b --- /dev/null +++ b/salt/sensoroni/files/analyzers/virustotal/virustotal.json @@ -0,0 +1,7 @@ +{ + "name": "VirusTotal", + "version": "0.1", + "author": "Security Onion Solutions", + "description": "This analyzer queries VirusTotal to see if a string value is considered malicious.", + "supportedTypes" : ["domain", "hash", "ip", "url"] +} diff --git a/salt/sensoroni/files/analyzers/virustotal/virustotal.py b/salt/sensoroni/files/analyzers/virustotal/virustotal.py new file mode 100644 index 000000000..f06c5d14f --- /dev/null +++ b/salt/sensoroni/files/analyzers/virustotal/virustotal.py @@ -0,0 +1,91 @@ +import json +import requests +import argparse +import helpers +import os +import sys + + +def checkConfigRequirements(conf): + if "api_key" not in conf or len(conf['api_key']) == 0: + sys.exit(126) + else: + return True + + +def buildHeaders(conf): + headers = {"x-apikey": conf["api_key"]} + return headers + + +def sendReq(conf, meta, payload, headers): + url = conf['base_url'] + response = requests.request('GET', url + payload, headers=headers) + return response.json() + + +def prepareResults(raw): + malicious = 0 + harmless = 0 + undetected = 0 + suspicious = 0 + timeout = 0 + + if "data" in raw: + entries = raw["data"] + for data in entries: + if "attributes" in data: + attrs = data["attributes"] + if "last_analysis_stats" in attrs: + stats = attrs["last_analysis_stats"] + if len(stats) > 0: + suspicious += stats["suspicious"] + malicious += stats["malicious"] + harmless += stats["harmless"] + undetected += stats["undetected"] + timeout += stats["timeout"] + + if malicious > 0: + summary = "malicious" + status = "threat" + elif suspicious > 0: + summary = "suspicious" + status = "caution" + elif timeout > 0: + summary = "timeout" + status = "caution" + elif harmless > 0 or undetected > 0: + summary = "harmless" + status = "ok" + else: + summary = "internal_failure" + status = "caution" + + results = {'response': raw, 'summary': summary, 'status': status} + return results + + +def analyze(conf, input): + checkConfigRequirements(conf) + meta = helpers.loadMetadata(__file__) + data = helpers.parseArtifact(input) + helpers.checkSupportedType(meta, data["artifactType"]) + headers = buildHeaders(conf) + response = sendReq(conf, meta, data["value"], headers) + return prepareResults(response) + + +def main(): + dir = os.path.dirname(os.path.realpath(__file__)) + parser = argparse.ArgumentParser(description='Search VirusTotal for a given artifact') + parser.add_argument('artifact', help='the artifact represented in JSON format') + parser.add_argument('-c', '--config', metavar="CONFIG_FILE", default=dir + "/virustotal.yaml", help='optional config file to use instead of the default config file') + + args = parser.parse_args() + if args.artifact: + results = analyze(helpers.loadConfig(args.config), args.artifact) + print(json.dumps(results)) + + +if __name__ == "__main__": + main() diff --git a/salt/sensoroni/files/analyzers/virustotal/virustotal.yaml b/salt/sensoroni/files/analyzers/virustotal/virustotal.yaml new file mode 100644 index 000000000..0eb017842 --- /dev/null +++ b/salt/sensoroni/files/analyzers/virustotal/virustotal.yaml @@ -0,0 +1,2 @@ +base_url: https://www.virustotal.com/api/v3/search?query= +api_key: "{{ salt['pillar.get']('sensoroni:analyzers:virustotal:api_key', '') }}" \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/virustotal/virustotal_test.py b/salt/sensoroni/files/analyzers/virustotal/virustotal_test.py new file mode 100644 index 000000000..103e137ce --- /dev/null +++ b/salt/sensoroni/files/analyzers/virustotal/virustotal_test.py @@ -0,0 +1,155 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +from virustotal import virustotal +import unittest + + +class TestVirusTotalMethods(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + sys.argv = ["cmd"] + virustotal.main() + self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n") + sysmock.assert_called_once_with(2) + + def test_main_success(self): + output = {"foo": "bar"} + with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('virustotal.virustotal.analyze', new=MagicMock(return_value=output)) as mock: + sys.argv = ["cmd", "input"] + virustotal.main() + expected = '{"foo": "bar"}\n' + self.assertEqual(mock_stdout.getvalue(), expected) + mock.assert_called_once() + + def checkConfigRequirements(self): + conf = {"not_a_key": "abcd12345"} + with self.assertRaises(SystemExit) as cm: + virustotal.checkConfigRequirements(conf) + self.assertEqual(cm.exception.code, 126) + + def test_buildHeaders(self): + result = virustotal.buildHeaders({"api_key": "xyz"}) + self.assertEqual("xyz", result["x-apikey"]) + + def test_sendReq(self): + with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock: + meta = {} + conf = {"base_url": "myurl="} + response = virustotal.sendReq(conf=conf, meta=meta, payload="mypayload", headers={"x-apikey": "xyz"}) + mock.assert_called_once_with("GET", "myurl=mypayload", headers={"x-apikey": "xyz"}) + self.assertIsNotNone(response) + + def test_prepareResults_timeout(self): + raw = {"data": [{"attributes": {"last_analysis_stats": { + "harmless": 1, + "malicious": 0, + "suspicious": 0, + "undetected": 1, + "timeout": 1 + }}}, {"attributes": {"last_analysis_stats": { + "harmless": 7, + "malicious": 0, + "suspicious": 0, + "undetected": 11, + "timeout": 0 + }}}]} + results = virustotal.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "timeout") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_ok_multiple(self): + raw = {"data": [{"attributes": {"last_analysis_stats": { + "harmless": 1, + "malicious": 0, + "suspicious": 0, + "undetected": 0, + "timeout": 0 + }}}, {"attributes": {"last_analysis_stats": { + "harmless": 7, + "malicious": 0, + "suspicious": 0, + "undetected": 11, + "timeout": 0 + }}}]} + results = virustotal.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "harmless") + self.assertEqual(results["status"], "ok") + + def test_prepareResults_sus_multiple(self): + raw = {"data": [{"attributes": {"last_analysis_stats": { + "harmless": 10, + "malicious": 0, + "suspicious": 2, + "undetected": 0, + "timeout": 0 + }}}, {"attributes": {"last_analysis_stats": { + "harmless": 76, + "malicious": 0, + "suspicious": 1, + "undetected": 11, + "timeout": 0 + }}}]} + results = virustotal.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "suspicious") + self.assertEqual(results["status"], "caution") + + def test_prepareResults_threat_multiple(self): + raw = {"data": [{"attributes": {"last_analysis_stats": { + "harmless": 1, + "malicious": 0, + "suspicious": 0, + "undetected": 0, + "timeout": 0 + }}}, {"attributes": {"last_analysis_stats": { + "harmless": 76, + "malicious": 5, + "suspicious": 1, + "undetected": 11, + "timeout": 0 + }}}]} + results = virustotal.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "malicious") + self.assertEqual(results["status"], "threat") + + def test_prepareResults_threat(self): + raw = {"data": [{"attributes": {"last_analysis_stats": { + "harmless": 76, + "malicious": 5, + "suspicious": 1, + "undetected": 11, + "timeout": 0 + }}}]} + results = virustotal.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "malicious") + self.assertEqual(results["status"], "threat") + + def test_prepareResults_error(self): + raw = {} + results = virustotal.prepareResults(raw) + self.assertEqual(results["response"], raw) + self.assertEqual(results["summary"], "internal_failure") + self.assertEqual(results["status"], "caution") + + def test_analyze(self): + output = {"data": [{"attributes": {"last_analysis_stats": { + "harmless": 0, + "malicious": 0, + "suspicious": 0, + "undetected": 1, + "timeout": 0 + }}}]} + artifactInput = '{"value":"foo","artifactType":"url"}' + conf = {"api_key": "xyz"} + with patch('virustotal.virustotal.sendReq', new=MagicMock(return_value=output)) as mock: + results = virustotal.analyze(conf, artifactInput) + self.assertEqual(results["summary"], "harmless") + mock.assert_called_once() diff --git a/salt/sensoroni/files/sensoroni.json b/salt/sensoroni/files/sensoroni.json index 743021a7d..04f2abf93 100644 --- a/salt/sensoroni/files/sensoroni.json +++ b/salt/sensoroni/files/sensoroni.json @@ -2,6 +2,8 @@ {%- set DESCRIPTION = salt['pillar.get']('sensoroni:node_description', '') %} {%- set MODEL = salt['grains.get']('sosmodel', '') %} {%- set ADDRESS = salt['pillar.get']('sensoroni:node_address') %} +{%- set ANALYZE_TIMEOUT_MS = salt['pillar.get']('sensoroni:analyze_timeout_ms', 900000) %} +{%- set ANALYZE_PARALLEL_LIMIT = salt['pillar.get']('sensoroni:analyze_parallel_limit', 5) %} {%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %} {%- set CHECKININTERVALMS = salt['pillar.get']('sensoroni:node_checkin_interval_ms', 10000) %} {%- set ROLE = grains.id.split('_') | last %} @@ -11,6 +13,12 @@ {%- set STENODEFAULT = False %} {%- endif %} {%- set STENOENABLED = salt['pillar.get']('steno:enabled', STENODEFAULT) %} +{%- if ROLE in ['eval', 'standalone', 'import', 'manager', 'managersearch'] %} +{%- set ANALYZEDEFAULT = True %} +{%- else %} +{%- set ANALYZEDEFAULT = False %} +{%- endif %} +{%- set ANALYZEENABLED = salt['pillar.get']('sensoroni:analyze_enabled', ANALYZEDEFAULT) %} { "logFilename": "/opt/sensoroni/logs/sensoroni.log", "logLevel":"info", @@ -24,6 +32,12 @@ "serverUrl": "https://{{ URLBASE }}/sensoroniagents", "verifyCert": false, "modules": { +{%- if ANALYZEENABLED %} + "analyze": { + "timeoutMs": {{ ANALYZE_TIMEOUT_MS }}, + "parallelLimit": {{ ANALYZE_PARALLEL_LIMIT }} + }, +{%- endif %} "importer": {}, "statickeyauth": { "apiKey": "{{ SENSORONIKEY }}" diff --git a/salt/sensoroni/init.sls b/salt/sensoroni/init.sls index 1405c72bf..6d49d33ab 100644 --- a/salt/sensoroni/init.sls +++ b/salt/sensoroni/init.sls @@ -18,6 +18,13 @@ sensoroniagentconf: - mode: 600 - template: jinja +analyzersdir: + file.directory: + - name: /opt/so/conf/sensoroni/analyzers + - user: 939 + - group: 939 + - makedirs: True + sensoronilog: file.directory: - name: /opt/so/log/sensoroni @@ -25,6 +32,15 @@ sensoronilog: - group: 939 - makedirs: True +analyzerscripts: + file.recurse: + - name: /opt/so/conf/sensoroni/analyzers + - user: 939 + - group: 939 + - file_mode: 755 + - template: jinja + - source: salt://sensoroni/files/analyzers + so-sensoroni: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }} @@ -35,6 +51,7 @@ so-sensoroni: - /nsm/import:/nsm/import:rw - /nsm/pcapout:/nsm/pcapout:rw - /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro + - /opt/so/conf/sensoroni/analyzers:/opt/sensoroni/analyzers:rw - /opt/so/log/sensoroni:/opt/sensoroni/logs:rw - watch: - file: /opt/so/conf/sensoroni/sensoroni.json diff --git a/salt/soc/files/soc/dashboards.queries.json b/salt/soc/files/soc/dashboards.queries.json new file mode 100644 index 000000000..14247721c --- /dev/null +++ b/salt/soc/files/soc/dashboards.queries.json @@ -0,0 +1,45 @@ +[ + { "name": "Overview", "description": "Show all events grouped by the origin host", "query": "* | groupby event.dataset | groupby event.module | groupby event.category | groupby observer.name | groupby source.ip | groupby destination.ip"}, + { "name": "Elastalerts", "description": "", "query": "_type:elastalert | groupby rule.name"}, + { "name": "Alerts", "description": "Show all alerts", "query": "event.dataset: alert | groupby event.module | groupby rule.name | groupby event.severity | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "NIDS Alerts", "description": "NIDS alerts", "query": "event.category: network AND event.dataset: alert | groupby rule.category | groupby rule.gid | groupby rule.uuid | groupby rule.name | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "Wazuh/OSSEC", "description": "Wazuh/OSSEC HIDS alerts and logs", "query": "event.module:ossec | groupby rule.category | groupby rule.uuid | groupby rule.name | groupby agent.id | groupby agent.name | groupby log.full"}, + { "name": "Sysmon", "description": "Sysmon logs", "query": "event.module:sysmon | groupby event.dataset | groupby user.name | groupby process.executable | groupby process.command_line | groupby process.parent.command_line"}, + { "name": "Strelka", "description": "Strelka logs", "query": "event.module:strelka | groupby file.mime_type | groupby file.name | groupby file.source"}, + { "name": "Zeek Notice", "description": "Zeek Notice logs", "query": "event.dataset:notice | groupby notice.note | groupby notice.message | groupby notice.sub_message | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "Connections", "description": "Connection logs", "query": "event.dataset:conn | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby network.transport | groupby connection.history | groupby connection.state | groupby connection.state_description | groupby source.geo.country_name | groupby destination.geo.country_name"}, + { "name": "DCE_RPC", "description": "DCE_RPC logs", "query": "event.dataset:dce_rpc | groupby dce_rpc.operation | groupby dce_rpc.endpoint | groupby dce_rpc.named_pipe | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "DHCP", "description": "Dynamic Host Configuration Protocol leases", "query": "event.dataset:dhcp | groupby host.hostname | groupby host.domain | groupby dhcp.message_types | groupby client.address | groupby server.address"}, + { "name": "DNP3", "description": "DNP3 logs", "query": "event.dataset:dnp3 | groupby dnp3.fc_request | groupby dnp3.fc_reply | groupby dnp3.iin | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "DNS", "description": "Domain Name System queries", "query": "event.dataset:dns | groupby dns.query.name | groupby dns.query.type_name | groupby dns.response.code_name | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "DPD", "description": "Dynamic Protocol Detection errors", "query": "event.dataset:dpd | groupby error.reason | groupby source.ip | groupby destination.ip | groupby destination.port | groupby network.protocol"}, + { "name": "Files", "description": "Files seen in network traffic", "query": "event.dataset:file | groupby file.mime_type | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip"}, + { "name": "FTP", "description": "File Transfer Protocol logs", "query": "event.dataset:ftp | groupby ftp.command | groupby ftp.argument | groupby ftp.user | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "HTTP", "description": "Hyper Text Transport Protocol logs", "query": "event.dataset:http | groupby http.method | groupby http.status_code | groupby http.status_message | groupby http.useragent | groupby http.virtual_host | groupby http.uri | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "Intel", "description": "Zeek Intel framework hits", "query": "event.dataset:intel | groupby intel.indicator | groupby intel.indicator_type | groupby intel.seen_where | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "IRC", "description": "Internet Relay Chat logs", "query": "event.dataset:irc | groupby irc.command.type | groupby irc.username | groupby irc.nickname | groupby irc.command.value | groupby irc.command.info | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "KERBEROS", "description": "KERBEROS logs", "query": "event.dataset:kerberos | groupby kerberos.service | groupby kerberos.client | groupby kerberos.request_type | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "MODBUS", "description": "MODBUS logs", "query": "event.dataset:modbus | groupby modbus.function | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "MYSQL", "description": "MYSQL logs", "query": "event.dataset:mysql | groupby mysql.command | groupby mysql.argument | groupby mysql.success | groupby mysql.response | groupby mysql.rows | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "NOTICE", "description": "Zeek notice logs", "query": "event.dataset:notice | groupby notice.note | groupby notice.message | groupby notice.sub_message | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "NTLM", "description": "NTLM logs", "query": "event.dataset:ntlm | groupby ntlm.server.dns.name | groupby ntlm.server.nb.name | groupby ntlm.server.tree.name | groupby ntlm.success | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "Osquery Live Queries", "description": "Osquery Live Query results", "query": "event.dataset:live_query | groupby host.hostname"}, + { "name": "PE", "description": "PE files list", "query": "event.dataset:pe | groupby file.machine | groupby file.os | groupby file.subsystem | groupby file.section_names | groupby file.is_exe | groupby file.is_64bit"}, + { "name": "RADIUS", "description": "RADIUS logs", "query": "event.dataset:radius | groupby user.name.keyword | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "RDP", "description": "RDP logs", "query": "event.dataset:rdp | groupby client.name | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "RFB", "description": "RFB logs", "query": "event.dataset:rfb | groupby rfb.desktop.name.keyword | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "Signatures", "description": "Zeek signatures", "query": "event.dataset:signatures | groupby signature_id"}, + { "name": "SIP", "description": "SIP logs", "query": "event.dataset:sip | groupby client.user_agent | groupby sip.method | groupby sip.uri | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "SMB_Files", "description": "SMB files", "query": "event.dataset:smb_files | groupby file.action | groupby file.path | groupby file.name | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "SMB_Mapping", "description": "SMB mapping logs", "query": "event.dataset:smb_mapping | groupby smb.share_type | groupby smb.path | groupby smb.service | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "SMTP", "description": "SMTP logs", "query": "event.dataset:smtp | groupby smtp.from | groupby smtp.recipient_to | groupby smtp.subject | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "SNMP", "description": "SNMP logs", "query": "event.dataset:snmp | groupby snmp.community | groupby snmp.version | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "Software", "description": "List of software seen on the network by Zeek", "query": "event.dataset:software | groupby software.type | groupby software.name | groupby source.ip"}, + { "name": "SSH", "description": "SSH connections seen by Zeek", "query": "event.dataset:ssh | groupby ssh.client | groupby ssh.server | groupby ssh.direction | groupby ssh.version | groupby ssh.hassh_version | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "SSL", "description": "SSL logs", "query": "event.dataset:ssl | groupby ssl.version | groupby ssl.validation_status | groupby ssl.server_name | groupby ssl.certificate.issuer | groupby ssl.certificate.subject | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "SYSLOG", "description": "SYSLOG logs", "query": "event.dataset:syslog | groupby syslog.severity_label | groupby syslog.facility_label | groupby network.protocol | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "Tunnel", "description": "Tunnels seen by Zeek", "query": "event.dataset:tunnel | groupby tunnel.type | groupby event.action | groupby source.ip | groupby destination.ip | groupby destination.port"}, + { "name": "Weird", "description": "Weird network traffic seen by Zeek", "query": "event.dataset:weird | groupby weird.name | groupby weird.additional_info | groupby source.ip | groupby destination.ip | groupby destination.port "}, + { "name": "x509", "description": "x.509 certificates seen by Zeek", "query": "event.dataset:x509 | groupby x509.certificate.key.length | groupby x509.san_dns | groupby x509.certificate.key.type | groupby x509.certificate.subject | groupby x509.certificate.issuer"}, + { "name": "Firewall", "description": "Firewall logs", "query": "event.dataset:firewall | groupby rule.action | groupby interface.name | groupby network.transport | groupby source.ip | groupby destination.ip | groupby destination.port"} +] \ No newline at end of file diff --git a/salt/soc/files/soc/hunt.queries.json b/salt/soc/files/soc/hunt.queries.json index 5a76e0fa1..01692ad0b 100644 --- a/salt/soc/files/soc/hunt.queries.json +++ b/salt/soc/files/soc/hunt.queries.json @@ -1,67 +1,67 @@ [ - { "name": "Default Query", "description": "Show all events grouped by the origin host", "query": "* | groupby observer.name"}, - { "name": "Log Type", "description": "Show all events grouped by module and dataset", "query": "* | groupby event.module event.dataset"}, - { "name": "SOC Auth", "description": "Users authenticated to SOC grouped by IP address and identity", "query": "event.module:kratos AND event.dataset:audit AND msg:authenticated | groupby http_request.headers.x-real-ip identity_id"}, - { "name": "Elastalerts", "description": "", "query": "_type:elastalert | groupby rule.name"}, - { "name": "Alerts", "description": "Show all alerts grouped by alert source", "query": "event.dataset: alert | groupby event.module"}, - { "name": "NIDS Alerts", "description": "Show all NIDS alerts grouped by alert", "query": "event.category: network AND event.dataset: alert | groupby rule.category rule.gid rule.uuid rule.name"}, - { "name": "Wazuh/OSSEC Alerts", "description": "Show all Wazuh alerts at Level 5 or higher grouped by category", "query": "event.module:ossec AND event.dataset:alert AND rule.level:>4 | groupby rule.category rule.name"}, - { "name": "Wazuh/OSSEC Alerts", "description": "Show all Wazuh alerts at Level 4 or lower grouped by category", "query": "event.module:ossec AND event.dataset:alert AND rule.level:<5 | groupby rule.category rule.name"}, - { "name": "Wazuh/OSSEC Users and Commands", "description": "Show all Wazuh alerts grouped by username and command line", "query": "event.module:ossec AND event.dataset:alert | groupby user.escalated.keyword process.command_line"}, - { "name": "Wazuh/OSSEC Processes", "description": "Show all Wazuh alerts grouped by process name", "query": "event.module:ossec AND event.dataset:alert | groupby process.name"}, - { "name": "Sysmon Events", "description": "Show all Sysmon logs grouped by event type", "query": "event.module:sysmon | groupby event.dataset"}, - { "name": "Sysmon Usernames", "description": "Show all Sysmon logs grouped by username", "query": "event.module:sysmon | groupby event.dataset, user.name.keyword"}, - { "name": "Strelka", "description": "Show all Strelka logs grouped by file type", "query": "event.module:strelka | groupby file.mime_type"}, - { "name": "Zeek Notice", "description": "Show notices from Zeek", "query": "event.dataset:notice | groupby notice.note notice.message"}, - { "name": "Connections", "description": "Connections grouped by IP and Port", "query": "event.dataset:conn | groupby source.ip destination.ip network.protocol destination.port"}, - { "name": "Connections", "description": "Connections grouped by Service", "query": "event.dataset:conn | groupby network.protocol destination.port"}, - { "name": "Connections", "description": "Connections grouped by destination country", "query": "event.dataset:conn | groupby destination.geo.country_name"}, - { "name": "Connections", "description": "Connections grouped by source country", "query": "event.dataset:conn | groupby source.geo.country_name"}, - { "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.dataset:dce_rpc | groupby dce_rpc.operation"}, - { "name": "DHCP", "description": "DHCP leases", "query": "event.dataset:dhcp | groupby host.hostname client.address"}, - { "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.dataset:dhcp | groupby dhcp.message_types"}, - { "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.dataset:dnp3 | groupby dnp3.fc_reply"}, - { "name": "DNS", "description": "DNS queries grouped by port", "query": "event.dataset:dns | groupby dns.query.name destination.port"}, - { "name": "DNS", "description": "DNS queries grouped by type", "query": "event.dataset:dns | groupby dns.query.type_name destination.port"}, - { "name": "DNS", "description": "DNS queries grouped by response code", "query": "event.dataset:dns | groupby dns.response.code_name destination.port"}, - { "name": "DNS", "description": "DNS highest registered domain", "query": "event.dataset:dns | groupby dns.highest_registered_domain.keyword destination.port"}, - { "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.dataset:dns | groupby dns.parent_domain.keyword destination.port"}, - { "name": "DPD", "description": "Dynamic Protocol Detection errors", "query": "event.dataset:dpd | groupby error.reason"}, - { "name": "Files", "description": "Files grouped by mimetype", "query": "event.dataset:file | groupby file.mime_type source.ip"}, - { "name": "Files", "description": "Files grouped by source", "query": "event.dataset:file | groupby file.source source.ip"}, - { "name": "FTP", "description": "FTP grouped by command and argument", "query": "event.dataset:ftp | groupby ftp.command ftp.argument"}, - { "name": "FTP", "description": "FTP grouped by username and argument", "query": "event.dataset:ftp | groupby ftp.user ftp.argument"}, - { "name": "HTTP", "description": "HTTP grouped by destination port", "query": "event.dataset:http | groupby destination.port"}, - { "name": "HTTP", "description": "HTTP grouped by status code and message", "query": "event.dataset:http | groupby http.status_code http.status_message"}, - { "name": "HTTP", "description": "HTTP grouped by method and user agent", "query": "event.dataset:http | groupby http.method http.useragent"}, - { "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.dataset:http | groupby http.virtual_host"}, - { "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.dataset:http AND (file.resp_mime_types:dosexec OR file.resp_mime_types:executable) | groupby http.virtual_host"}, - { "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.dataset:intel | groupby intel.indicator.keyword"}, - { "name": "IRC", "description": "IRC grouped by command", "query": "event.dataset:irc | groupby irc.command.type"}, - { "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.dataset:kerberos | groupby kerberos.service"}, - { "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.dataset:modbus | groupby modbus.function"}, - { "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.dataset:mysql | groupby mysql.command"}, - { "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.dataset:notice | groupby notice.note notice.message"}, - { "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.dataset:ntlm | groupby ntlm.server.dns.name"}, - { "name": "Osquery Live Queries", "description": "Osquery Live Query results grouped by computer name", "query": "event.dataset:live_query | groupby host.hostname"}, - { "name": "PE", "description": "PE files list", "query": "event.dataset:pe | groupby file.machine file.os file.subsystem"}, - { "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.dataset:radius | groupby user.name.keyword"}, - { "name": "RDP", "description": "RDP grouped by client name", "query": "event.dataset:rdp | groupby client.name"}, - { "name": "RFB", "description": "RFB grouped by desktop name", "query": "event.dataset:rfb | groupby rfb.desktop.name.keyword"}, - { "name": "Signatures", "description": "Zeek signatures grouped by signature id", "query": "event.dataset:signatures | groupby signature_id"}, - { "name": "SIP", "description": "SIP grouped by user agent", "query": "event.dataset:sip | groupby client.user_agent"}, - { "name": "SMB_Files", "description": "SMB files grouped by action", "query": "event.dataset:smb_files | groupby file.action"}, - { "name": "SMB_Mapping", "description": "SMB mapping grouped by path", "query": "event.dataset:smb_mapping | groupby smb.path"}, - { "name": "SMTP", "description": "SMTP grouped by subject", "query": "event.dataset:smtp | groupby smtp.subject"}, - { "name": "SNMP", "description": "SNMP grouped by version and string", "query": "event.dataset:snmp | groupby snmp.community snmp.version"}, - { "name": "Software", "description": "List of software seen on the network", "query": "event.dataset:software | groupby software.type software.name"}, - { "name": "SSH", "description": "SSH grouped by version and client", "query": "event.dataset:ssh | groupby ssh.version ssh.client"}, - { "name": "SSL", "description": "SSL grouped by version and server name", "query": "event.dataset:ssl | groupby ssl.version ssl.server_name"}, - { "name": "SYSLOG", "description": "SYSLOG grouped by severity and facility ", "query": "event.dataset:syslog | groupby syslog.severity_label syslog.facility_label"}, - { "name": "Tunnel", "description": "Tunnels grouped by type and action", "query": "event.dataset:tunnel | groupby tunnel.type event.action"}, - { "name": "Weird", "description": "Zeek weird log grouped by name", "query": "event.dataset:weird | groupby weird.name"}, - { "name": "x509", "description": "x.509 grouped by key length and name", "query": "event.dataset:x509 | groupby x509.certificate.key.length x509.san_dns"}, - { "name": "x509", "description": "x.509 grouped by name and issuer", "query": "event.dataset:x509 | groupby x509.san_dns x509.certificate.issuer"}, - { "name": "x509", "description": "x.509 grouped by name and subject", "query": "event.dataset:x509 | groupby x509.san_dns x509.certificate.subject"}, - { "name": "Firewall", "description": "Firewall events grouped by action", "query": "event.dataset:firewall | groupby rule.action"} + { "name": "Default Query", "showSubtitle": true, "showSubtitle": true, "description": "Show all events grouped by the origin host", "query": "* | groupby observer.name"}, + { "name": "Log Type", "showSubtitle": true, "description": "Show all events grouped by module and dataset", "query": "* | groupby event.module event.dataset"}, + { "name": "SOC Auth", "showSubtitle": true, "description": "Users authenticated to SOC grouped by IP address and identity", "query": "event.module:kratos AND event.dataset:audit AND msg:authenticated | groupby http_request.headers.x-real-ip identity_id"}, + { "name": "Elastalerts", "showSubtitle": true, "description": "", "query": "_type:elastalert | groupby rule.name"}, + { "name": "Alerts", "showSubtitle": true, "description": "Show all alerts grouped by alert source", "query": "event.dataset: alert | groupby event.module"}, + { "name": "NIDS Alerts", "showSubtitle": true, "description": "Show all NIDS alerts grouped by alert", "query": "event.category: network AND event.dataset: alert | groupby rule.category rule.gid rule.uuid rule.name"}, + { "name": "Wazuh/OSSEC Alerts", "showSubtitle": true, "description": "Show all Wazuh alerts at Level 5 or higher grouped by category", "query": "event.module:ossec AND event.dataset:alert AND rule.level:>4 | groupby rule.category rule.name"}, + { "name": "Wazuh/OSSEC Alerts", "showSubtitle": true, "description": "Show all Wazuh alerts at Level 4 or lower grouped by category", "query": "event.module:ossec AND event.dataset:alert AND rule.level:<5 | groupby rule.category rule.name"}, + { "name": "Wazuh/OSSEC Users and Commands", "showSubtitle": true, "description": "Show all Wazuh alerts grouped by username and command line", "query": "event.module:ossec AND event.dataset:alert | groupby user.escalated.keyword process.command_line"}, + { "name": "Wazuh/OSSEC Processes", "showSubtitle": true, "description": "Show all Wazuh alerts grouped by process name", "query": "event.module:ossec AND event.dataset:alert | groupby process.name"}, + { "name": "Sysmon Events", "showSubtitle": true, "description": "Show all Sysmon logs grouped by event type", "query": "event.module:sysmon | groupby event.dataset"}, + { "name": "Sysmon Usernames", "showSubtitle": true, "description": "Show all Sysmon logs grouped by username", "query": "event.module:sysmon | groupby event.dataset, user.name.keyword"}, + { "name": "Strelka", "showSubtitle": true, "description": "Show all Strelka logs grouped by file type", "query": "event.module:strelka | groupby file.mime_type"}, + { "name": "Zeek Notice", "showSubtitle": true, "description": "Show notices from Zeek", "query": "event.dataset:notice | groupby notice.note notice.message"}, + { "name": "Connections", "showSubtitle": true, "description": "Connections grouped by IP and Port", "query": "event.dataset:conn | groupby source.ip destination.ip network.protocol destination.port"}, + { "name": "Connections", "showSubtitle": true, "description": "Connections grouped by Service", "query": "event.dataset:conn | groupby network.protocol destination.port"}, + { "name": "Connections", "showSubtitle": true, "description": "Connections grouped by destination country", "query": "event.dataset:conn | groupby destination.geo.country_name"}, + { "name": "Connections", "showSubtitle": true, "description": "Connections grouped by source country", "query": "event.dataset:conn | groupby source.geo.country_name"}, + { "name": "DCE_RPC", "showSubtitle": true, "description": "DCE_RPC grouped by operation", "query": "event.dataset:dce_rpc | groupby dce_rpc.operation"}, + { "name": "DHCP", "showSubtitle": true, "description": "DHCP leases", "query": "event.dataset:dhcp | groupby host.hostname client.address"}, + { "name": "DHCP", "showSubtitle": true, "description": "DHCP grouped by message type", "query": "event.dataset:dhcp | groupby dhcp.message_types"}, + { "name": "DNP3", "showSubtitle": true, "description": "DNP3 grouped by reply", "query": "event.dataset:dnp3 | groupby dnp3.fc_reply"}, + { "name": "DNS", "showSubtitle": true, "description": "DNS queries grouped by port", "query": "event.dataset:dns | groupby dns.query.name destination.port"}, + { "name": "DNS", "showSubtitle": true, "description": "DNS queries grouped by type", "query": "event.dataset:dns | groupby dns.query.type_name destination.port"}, + { "name": "DNS", "showSubtitle": true, "description": "DNS queries grouped by response code", "query": "event.dataset:dns | groupby dns.response.code_name destination.port"}, + { "name": "DNS", "showSubtitle": true, "description": "DNS highest registered domain", "query": "event.dataset:dns | groupby dns.highest_registered_domain.keyword destination.port"}, + { "name": "DNS", "showSubtitle": true, "description": "DNS grouped by parent domain", "query": "event.dataset:dns | groupby dns.parent_domain.keyword destination.port"}, + { "name": "DPD", "showSubtitle": true, "description": "Dynamic Protocol Detection errors", "query": "event.dataset:dpd | groupby error.reason"}, + { "name": "Files", "showSubtitle": true, "description": "Files grouped by mimetype", "query": "event.dataset:file | groupby file.mime_type source.ip"}, + { "name": "Files", "showSubtitle": true, "description": "Files grouped by source", "query": "event.dataset:file | groupby file.source source.ip"}, + { "name": "FTP", "showSubtitle": true, "description": "FTP grouped by command and argument", "query": "event.dataset:ftp | groupby ftp.command ftp.argument"}, + { "name": "FTP", "showSubtitle": true, "description": "FTP grouped by username and argument", "query": "event.dataset:ftp | groupby ftp.user ftp.argument"}, + { "name": "HTTP", "showSubtitle": true, "description": "HTTP grouped by destination port", "query": "event.dataset:http | groupby destination.port"}, + { "name": "HTTP", "showSubtitle": true, "description": "HTTP grouped by status code and message", "query": "event.dataset:http | groupby http.status_code http.status_message"}, + { "name": "HTTP", "showSubtitle": true, "description": "HTTP grouped by method and user agent", "query": "event.dataset:http | groupby http.method http.useragent"}, + { "name": "HTTP", "showSubtitle": true, "description": "HTTP grouped by virtual host", "query": "event.dataset:http | groupby http.virtual_host"}, + { "name": "HTTP", "showSubtitle": true, "description": "HTTP with exe downloads", "query": "event.dataset:http AND (file.resp_mime_types:dosexec OR file.resp_mime_types:executable) | groupby http.virtual_host"}, + { "name": "Intel", "showSubtitle": true, "description": "Intel framework hits grouped by indicator", "query": "event.dataset:intel | groupby intel.indicator.keyword"}, + { "name": "IRC", "showSubtitle": true, "description": "IRC grouped by command", "query": "event.dataset:irc | groupby irc.command.type"}, + { "name": "KERBEROS", "showSubtitle": true, "description": "KERBEROS grouped by service", "query": "event.dataset:kerberos | groupby kerberos.service"}, + { "name": "MODBUS", "showSubtitle": true, "description": "MODBUS grouped by function", "query": "event.dataset:modbus | groupby modbus.function"}, + { "name": "MYSQL", "showSubtitle": true, "description": "MYSQL grouped by command", "query": "event.dataset:mysql | groupby mysql.command"}, + { "name": "NOTICE", "showSubtitle": true, "description": "Zeek notice logs grouped by note and message", "query": "event.dataset:notice | groupby notice.note notice.message"}, + { "name": "NTLM", "showSubtitle": true, "description": "NTLM grouped by computer name", "query": "event.dataset:ntlm | groupby ntlm.server.dns.name"}, + { "name": "Osquery Live Queries", "showSubtitle": true, "description": "Osquery Live Query results grouped by computer name", "query": "event.dataset:live_query | groupby host.hostname"}, + { "name": "PE", "showSubtitle": true, "description": "PE files list", "query": "event.dataset:pe | groupby file.machine file.os file.subsystem"}, + { "name": "RADIUS", "showSubtitle": true, "description": "RADIUS grouped by username", "query": "event.dataset:radius | groupby user.name.keyword"}, + { "name": "RDP", "showSubtitle": true, "description": "RDP grouped by client name", "query": "event.dataset:rdp | groupby client.name"}, + { "name": "RFB", "showSubtitle": true, "description": "RFB grouped by desktop name", "query": "event.dataset:rfb | groupby rfb.desktop.name.keyword"}, + { "name": "Signatures", "showSubtitle": true, "description": "Zeek signatures grouped by signature id", "query": "event.dataset:signatures | groupby signature_id"}, + { "name": "SIP", "showSubtitle": true, "description": "SIP grouped by user agent", "query": "event.dataset:sip | groupby client.user_agent"}, + { "name": "SMB_Files", "showSubtitle": true, "description": "SMB files grouped by action", "query": "event.dataset:smb_files | groupby file.action"}, + { "name": "SMB_Mapping", "showSubtitle": true, "description": "SMB mapping grouped by path", "query": "event.dataset:smb_mapping | groupby smb.path"}, + { "name": "SMTP", "showSubtitle": true, "description": "SMTP grouped by subject", "query": "event.dataset:smtp | groupby smtp.subject"}, + { "name": "SNMP", "showSubtitle": true, "description": "SNMP grouped by version and string", "query": "event.dataset:snmp | groupby snmp.community snmp.version"}, + { "name": "Software", "showSubtitle": true, "description": "List of software seen on the network", "query": "event.dataset:software | groupby software.type software.name"}, + { "name": "SSH", "showSubtitle": true, "description": "SSH grouped by version and client", "query": "event.dataset:ssh | groupby ssh.version ssh.client"}, + { "name": "SSL", "showSubtitle": true, "description": "SSL grouped by version and server name", "query": "event.dataset:ssl | groupby ssl.version ssl.server_name"}, + { "name": "SYSLOG", "showSubtitle": true, "description": "SYSLOG grouped by severity and facility ", "query": "event.dataset:syslog | groupby syslog.severity_label syslog.facility_label"}, + { "name": "Tunnel", "showSubtitle": true, "description": "Tunnels grouped by type and action", "query": "event.dataset:tunnel | groupby tunnel.type event.action"}, + { "name": "Weird", "showSubtitle": true, "description": "Zeek weird log grouped by name", "query": "event.dataset:weird | groupby weird.name"}, + { "name": "x509", "showSubtitle": true, "description": "x.509 grouped by key length and name", "query": "event.dataset:x509 | groupby x509.certificate.key.length x509.san_dns"}, + { "name": "x509", "showSubtitle": true, "description": "x.509 grouped by name and issuer", "query": "event.dataset:x509 | groupby x509.san_dns x509.certificate.issuer"}, + { "name": "x509", "showSubtitle": true, "description": "x.509 grouped by name and subject", "query": "event.dataset:x509 | groupby x509.san_dns x509.certificate.subject"}, + { "name": "Firewall", "showSubtitle": true, "description": "Firewall events grouped by action", "query": "event.dataset:firewall | groupby rule.action"} ] diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 9b034ad57..7f08b79ba 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -217,6 +217,7 @@ "case": { "mostRecentlyUsedLimit": 5, "renderAbbreviatedCount": 30, + "analyzerNodeId": "{{ grains.host | lower }}", "presets": { "artifactType": {{ presets_artifacttype | json }}, "category": {{ presets_category | json }},