mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-07 17:52:46 +01:00
1
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
1
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -31,6 +31,7 @@ body:
|
||||
- 2.4.160
|
||||
- 2.4.170
|
||||
- 2.4.180
|
||||
- 2.4.190
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
### 2.4.170-20250812 ISO image released on 2025/08/12
|
||||
### 2.4.180-20250916 ISO image released on 2025/09/17
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.170-20250812 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.170-20250812.iso
|
||||
2.4.180-20250916 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
|
||||
|
||||
MD5: 50ECAAD05736298452DECEAE074FA773
|
||||
SHA1: 1B1EB520DE61ECC4BF34E512DAFE307317D7666A
|
||||
SHA256: 87D176A48A58BAD1C2D57196F999BED23DE9B526226E3754F0C166C866CCDC1A
|
||||
MD5: DE93880E38DE4BE45D05A41E1745CB1F
|
||||
SHA1: AEA6948911E50A4A38E8729E0E965C565402E3FC
|
||||
SHA256: C9BD8CA071E43B048ABF9ED145B87935CB1D4BB839B2244A06FAD1BBA8EAC84A
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.170-20250812.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.170-20250812.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.170-20250812.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.170-20250812.iso.sig securityonion-2.4.170-20250812.iso
|
||||
gpg --verify securityonion-2.4.180-20250916.iso.sig securityonion-2.4.180-20250916.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Fri 08 Aug 2025 06:24:56 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Tue 16 Sep 2025 06:30:19 PM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
@@ -263,6 +263,8 @@ base:
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- stig.soc_stig
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
|
||||
'*_import':
|
||||
- node_data.ips
|
||||
|
||||
@@ -268,6 +268,13 @@ for log_file in $(cat /tmp/log_check_files); do
|
||||
tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check
|
||||
check_for_errors
|
||||
done
|
||||
# Look for OOM specific errors in /var/log/messages which can lead to odd behavior / test failures
|
||||
if [[ -f /var/log/messages ]]; then
|
||||
status "Checking log file /var/log/messages"
|
||||
if journalctl --since "24 hours ago" | grep -iE 'out of memory|oom-kill'; then
|
||||
RESULT=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Cleanup temp files
|
||||
rm -f /tmp/log_check_files
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"paths": [
|
||||
"/opt/so/log/agents/agent-monitor.log"
|
||||
],
|
||||
"data_stream.dataset": "agent-monitor",
|
||||
"data_stream.dataset": "agentmonitor",
|
||||
"pipeline": "elasticagent.monitor",
|
||||
"parsers": "",
|
||||
"exclude_files": [
|
||||
@@ -36,7 +36,7 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": true,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": 1024,
|
||||
"fingerprint_length": 64,
|
||||
"file_identity_native": false,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
|
||||
@@ -23,14 +23,28 @@ function update_logstash_outputs() {
|
||||
}
|
||||
function update_kafka_outputs() {
|
||||
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
||||
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
|
||||
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
|
||||
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
# Update policy when fleet has secrets enabled
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update policy when fleet has secrets disabled or policy hasn't been force updated
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
fi
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
else
|
||||
printf "Failed to get current Kafka output policy..."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
|
||||
@@ -5,46 +5,78 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-managerhype'] %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
force=false
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-f|--force)
|
||||
force=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option $1"
|
||||
echo "Usage: $0 [-f|--force]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check to make sure that Kibana API is up & ready
|
||||
RETURN_CODE=0
|
||||
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
||||
RETURN_CODE=$?
|
||||
|
||||
if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
|
||||
exit 1
|
||||
echo -e "\nKibana API not accessible, can't setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
|
||||
if ! echo "$output" | grep -q "so-manager_kafka"; then
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
# Create a new output policy for Kafka. Default is disabled 'is_default: false & is_default_monitoring: false'
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 10 }, "topics":[{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
||||
)
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
|
||||
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
|
||||
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
else
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 0
|
||||
fi
|
||||
elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null) && [[ "$force" == "true" ]]; then
|
||||
# force an update to Kafka policy. Keep the current value of Kafka output policy (enabled/disabled).
|
||||
ENABLED_DISABLED=$(echo "$kafka_output" | jq -e .item.is_default)
|
||||
HOSTS=$(echo "$kafka_output" | jq -r '.item.hosts')
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
--argjson HOSTS "$HOSTS" \
|
||||
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
else
|
||||
echo -e "\nForced update to Elastic Fleet output policy for Kafka...\n"
|
||||
fi
|
||||
|
||||
elif echo "$output" | grep -q "so-manager_kafka"; then
|
||||
else
|
||||
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
|
||||
fi
|
||||
{% else %}
|
||||
|
||||
@@ -1323,7 +1323,7 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-agent-monitor:
|
||||
so-elastic-agent-monitor:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
@@ -1335,10 +1335,8 @@ elasticsearch:
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates:
|
||||
- logs-agent-monitor@custom
|
||||
index_patterns:
|
||||
- logs-agent-monitor-*
|
||||
- logs-agentmonitor-*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
@@ -1350,7 +1348,7 @@ elasticsearch:
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-logs-agent-monitor-logs
|
||||
name: so-elastic-agent-monitor-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
@@ -4175,7 +4173,7 @@ elasticsearch:
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 1d
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
|
||||
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"convert": {
|
||||
"field": "_ingest._value",
|
||||
"type": "ip",
|
||||
"target_field": "_ingest._temp_ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "temp._valid_ips",
|
||||
"allow_duplicates": false,
|
||||
"value": [
|
||||
"{{{_ingest._temp_ip}}}"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -24,7 +24,7 @@
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint'","description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -21,7 +21,10 @@
|
||||
{ "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null", "source": "def ips = []; for (item in ctx.dns.answers.name) { if (item =~ /^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$/ || item =~ /^([a-fA-F0-9:]+:+)+[a-fA-F0-9]+$/) { ips.add(item); } } ctx.dns.resolved_ip = ips;" } },
|
||||
{ "foreach": {"field": "dns.answers.name","processor": {"pipeline": {"name": "common.ip_validation"}},"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null","ignore_failure": true}},
|
||||
{ "foreach": {"field": "temp._valid_ips","processor": {"append": {"field": "dns.resolved_ip","allow_duplicates": false,"value": "{{{_ingest._value}}}","ignore_failure": true}},"ignore_failure": true}},
|
||||
{ "script": { "source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }","ignore_failure": true }},
|
||||
{ "remove": {"field": ["temp"], "ignore_missing": true ,"ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
|
||||
|
||||
@@ -91,7 +91,7 @@ COMMIT
|
||||
-A INPUT -m conntrack --ctstate INVALID -j DROP
|
||||
-A INPUT -p icmp -j ACCEPT
|
||||
-A INPUT -j LOGGING
|
||||
{% if GLOBALS.role in ['so-hypervisor', 'so-managerhyper'] -%}
|
||||
{% if GLOBALS.role in ['so-hypervisor', 'so-managerhype'] -%}
|
||||
-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
-A FORWARD -i br0 -o br0 -j ACCEPT
|
||||
{%- endif %}
|
||||
|
||||
@@ -4,6 +4,9 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
# We do not import GLOBALS in this state because it is called during setup
|
||||
include:
|
||||
- salt.mine_functions
|
||||
- salt.minion.service_file
|
||||
|
||||
down_original_mgmt_interface:
|
||||
cmd.run:
|
||||
@@ -28,29 +31,13 @@ wait_for_br0_ip:
|
||||
- timeout: 95
|
||||
- onchanges:
|
||||
- cmd: down_original_mgmt_interface
|
||||
|
||||
{% if grains.role == 'so-hypervisor' %}
|
||||
|
||||
update_mine_functions:
|
||||
file.managed:
|
||||
- name: /etc/salt/minion.d/mine_functions.conf
|
||||
- contents: |
|
||||
mine_interval: 25
|
||||
mine_functions:
|
||||
network.ip_addrs:
|
||||
- interface: br0
|
||||
{%- if role in ['so-eval','so-import','so-manager','so-managerhype','so-managersearch','so-standalone'] %}
|
||||
x509.get_pem_entries:
|
||||
- glob_path: '/etc/pki/ca.crt'
|
||||
{% endif %}
|
||||
- onchanges:
|
||||
- cmd: wait_for_br0_ip
|
||||
- onchanges_in:
|
||||
- file: salt_minion_service_unit_file
|
||||
- file: mine_functions
|
||||
|
||||
restart_salt_minion_service:
|
||||
service.running:
|
||||
- name: salt-minion
|
||||
- enable: True
|
||||
- listen:
|
||||
- file: update_mine_functions
|
||||
|
||||
{% endif %}
|
||||
- file: mine_functions
|
||||
|
||||
@@ -590,9 +590,6 @@ post_to_2.4.160() {
|
||||
}
|
||||
|
||||
post_to_2.4.170() {
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
|
||||
# Update kibana default space
|
||||
salt-call state.apply kibana.config queue=True
|
||||
echo "Updating Kibana default space"
|
||||
@@ -602,7 +599,12 @@ post_to_2.4.170() {
|
||||
}
|
||||
|
||||
post_to_2.4.180() {
|
||||
echo "Nothing to apply"
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
|
||||
# Force update to Kafka output policy
|
||||
/usr/sbin/so-kafka-fleet-output-policy --force
|
||||
|
||||
POSTVERSION=2.4.180
|
||||
}
|
||||
|
||||
@@ -857,14 +859,13 @@ up_to_2.4.170() {
|
||||
touch /opt/so/saltstack/local/pillar/$state/adv_$state.sls /opt/so/saltstack/local/pillar/$state/soc_$state.sls
|
||||
done
|
||||
|
||||
# Elastic Update for this release, so download Elastic Agent files
|
||||
determine_elastic_agent_upgrade
|
||||
|
||||
INSTALLEDVERSION=2.4.170
|
||||
}
|
||||
|
||||
up_to_2.4.180() {
|
||||
echo "Nothing to do for 2.4.180"
|
||||
# Elastic Update for this release, so download Elastic Agent files
|
||||
determine_elastic_agent_upgrade
|
||||
|
||||
INSTALLEDVERSION=2.4.180
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# this state was seperated from salt.minion state since it is called during setup
|
||||
# this state was separated from salt.minion state since it is called during setup
|
||||
# GLOBALS are imported in the salt.minion state and that is not available at that point in setup
|
||||
# this state is included in the salt.minion state
|
||||
|
||||
|
||||
@@ -1,18 +1,22 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'salt/map.jinja' import UPGRADECOMMAND with context %}
|
||||
{% from 'salt/map.jinja' import SALTVERSION %}
|
||||
{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %}
|
||||
{% from 'salt/map.jinja' import SALTPACKAGES %}
|
||||
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %}
|
||||
|
||||
include:
|
||||
- salt.python_modules
|
||||
- salt.patch.x509_v2
|
||||
- salt
|
||||
- systemd.reload
|
||||
- repo.client
|
||||
- salt.mine_functions
|
||||
- salt.minion.service_file
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
- ca
|
||||
{% endif %}
|
||||
@@ -94,17 +98,6 @@ enable_startup_states:
|
||||
- regex: '^startup_states: highstate$'
|
||||
- unless: pgrep so-setup
|
||||
|
||||
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
|
||||
# since this file is currently only adding a delay service start
|
||||
# it is not required to restart the service
|
||||
salt_minion_service_unit_file:
|
||||
file.managed:
|
||||
- name: {{ SYSTEMD_UNIT_FILE }}
|
||||
- source: salt://salt/service/salt-minion.service.jinja
|
||||
- template: jinja
|
||||
- onchanges_in:
|
||||
- module: systemd_reload
|
||||
|
||||
{% endif %}
|
||||
|
||||
# this has to be outside the if statement above since there are <requisite>_in calls to this state
|
||||
26
salt/salt/minion/service_file.sls
Normal file
26
salt/salt/minion/service_file.sls
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'salt/map.jinja' import SALTVERSION %}
|
||||
{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %}
|
||||
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
|
||||
|
||||
include:
|
||||
- systemd.reload
|
||||
|
||||
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
||||
|
||||
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
|
||||
# since this file is currently only adding a delay service start
|
||||
# it is not required to restart the service
|
||||
salt_minion_service_unit_file:
|
||||
file.managed:
|
||||
- name: {{ SYSTEMD_UNIT_FILE }}
|
||||
- source: salt://salt/service/salt-minion.service.jinja
|
||||
- template: jinja
|
||||
- onchanges_in:
|
||||
- module: systemd_reload
|
||||
|
||||
{% endif %}
|
||||
@@ -34,6 +34,8 @@ sensoroni:
|
||||
api_version: community
|
||||
localfile:
|
||||
file_path: []
|
||||
malwarebazaar:
|
||||
api_key:
|
||||
otx:
|
||||
base_url: https://otx.alienvault.com/api/v1/
|
||||
api_key:
|
||||
@@ -49,12 +51,16 @@ sensoroni:
|
||||
live_flow: False
|
||||
mailbox_email_address:
|
||||
message_source_id:
|
||||
threatfox:
|
||||
api_key:
|
||||
urlscan:
|
||||
base_url: https://urlscan.io/api/v1/
|
||||
api_key:
|
||||
enabled: False
|
||||
visibility: public
|
||||
timeout: 180
|
||||
urlhaus:
|
||||
api_key:
|
||||
virustotal:
|
||||
base_url: https://www.virustotal.com/api/v3/search?query=
|
||||
api_key:
|
||||
|
||||
@@ -35,15 +35,15 @@ Many analyzers require authentication, via an API key or similar. The table belo
|
||||
[EchoTrail](https://www.echotrail.io/docs/quickstart) |✓|
|
||||
[EmailRep](https://emailrep.io/key) |✓|
|
||||
[Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/setting-up-authentication.html) |✓|
|
||||
[GreyNoise](https://www.greynoise.io/plans/community) |✓|
|
||||
[GreyNoise (community)](https://www.greynoise.io/plans/community) |✗|
|
||||
[LocalFile](https://github.com/Security-Onion-Solutions/securityonion/tree/fix/sublime_analyzer_documentation/salt/sensoroni/files/analyzers/localfile) |✗|
|
||||
[Malware Hash Registry](https://hash.cymru.com/docs_whois) |✗|
|
||||
[MalwareBazaar](https://bazaar.abuse.ch/) |✗|
|
||||
[MalwareBazaar](https://bazaar.abuse.ch/) |✓|
|
||||
[Pulsedive](https://pulsedive.com/api/) |✓|
|
||||
[Spamhaus](https://www.spamhaus.org/dbl/) |✗|
|
||||
[Sublime Platform](https://sublime.security) |✓|
|
||||
[ThreatFox](https://threatfox.abuse.ch/) |✗|
|
||||
[Urlhaus](https://urlhaus.abuse.ch/) |✗|
|
||||
[ThreatFox](https://threatfox.abuse.ch/) |✓|
|
||||
[Urlhaus](https://urlhaus.abuse.ch/) |✓|
|
||||
[Urlscan](https://urlscan.io/docs/api/) |✓|
|
||||
[VirusTotal](https://developers.virustotal.com/reference/overview) |✓|
|
||||
[WhoisLookup](https://github.com/meeb/whoisit) |✗|
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# EchoTrail
|
||||
|
||||
|
||||
## Description
|
||||
Submit a filename, hash, commandline to EchoTrail for analysis
|
||||
|
||||
## Configuration Requirements
|
||||
|
||||
In SOC, navigate to `Administration`, toggle `Show all configurable settings, including advanced settings.`, and navigate to `sensoroni` -> `analyzers` -> `echotrail`.
|
||||

|
||||
|
||||
|
||||
The following configuration options are available for:
|
||||
|
||||
``api_key`` - API key used for communication with the Echotrail API (Required)
|
||||
|
||||
This value should be set in the ``sensoroni`` pillar, like so:
|
||||
|
||||
```
|
||||
sensoroni:
|
||||
analyzers:
|
||||
echotrail:
|
||||
api_key: $yourapikey
|
||||
```
|
||||
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"name": "Echotrail",
|
||||
"version": "0.1",
|
||||
"author": "Security Onion Solutions",
|
||||
"description": "This analyzer queries Echotrail to see if a related filename, hash, or commandline is considered malicious.",
|
||||
"supportedTypes" : ["filename","hash","commandline"],
|
||||
"baseUrl": "https://api.echotrail.io/insights/"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import requests
|
||||
import helpers
|
||||
import argparse
|
||||
|
||||
|
||||
# for test usage:
|
||||
# python3 echotrail.py '{"artifactType":"hash", "value":"438b6ccd84f4dd32d9684ed7d58fd7d1e5a75fe3f3d12ab6c788e6bb0ffad5e7"}'
|
||||
# You will need to provide an API key in the .yaml file.
|
||||
def checkConfigRequirements(conf):
|
||||
if not conf['api_key']:
|
||||
sys.exit(126)
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def sendReq(conf, observ_value):
|
||||
# send a get requests using a user-provided API key and the API url
|
||||
url = conf['base_url'] + observ_value
|
||||
headers = {'x-api-key': conf['api_key']}
|
||||
response = requests.request('GET', url=url, headers=headers)
|
||||
return response.json()
|
||||
|
||||
|
||||
def prepareResults(raw):
|
||||
# checking for the 'filenames' key alone does
|
||||
# not work when querying by filename.
|
||||
# So, we can account for a hash query, a filename query,
|
||||
# and anything else with these if statements.
|
||||
if 'filenames' in raw.keys():
|
||||
summary = raw['filenames'][0][0]
|
||||
elif 'tags' in raw.keys():
|
||||
summary = raw['tags'][0][0]
|
||||
else:
|
||||
summary = 'inconclusive'
|
||||
status = 'info'
|
||||
return {'response': raw, 'summary': summary, 'status': status}
|
||||
|
||||
|
||||
def analyze(conf, input):
|
||||
# put all of our methods together and return a properly formatted output.
|
||||
checkConfigRequirements(conf)
|
||||
meta = helpers.loadMetadata(__file__)
|
||||
data = helpers.parseArtifact(input)
|
||||
helpers.checkSupportedType(meta, data['artifactType'])
|
||||
response = sendReq(conf, data['value'])
|
||||
return prepareResults(response)
|
||||
|
||||
|
||||
def main():
|
||||
dir = os.path.dirname(os.path.realpath(__file__))
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Search Echotrail for a given artifact')
|
||||
parser.add_argument(
|
||||
'artifact', help='the artifact represented in JSON format')
|
||||
parser.add_argument('-c', '--config', metavar='CONFIG_FILE', default=dir + '/echotrail.yaml',
|
||||
help='optional config file to use instead of the default config file')
|
||||
args = parser.parse_args()
|
||||
if args.artifact:
|
||||
results = analyze(helpers.loadConfig(args.config), args.artifact)
|
||||
print(json.dumps(results))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,3 +0,0 @@
|
||||
base_url: "{{ salt['pillar.get']('sensoroni:analyzers:echotrail:base_url', 'https://api.echotrail.io/insights/') }}"
|
||||
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:echotrail:api_key', '') }}"
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
from io import StringIO
|
||||
import sys
|
||||
from unittest.mock import patch, MagicMock
|
||||
import unittest
|
||||
import echotrail
|
||||
|
||||
|
||||
class TestEchoTrailMethods(unittest.TestCase):
|
||||
def test_main_success(self):
|
||||
with patch('sys.stdout', new=StringIO()) as mock_cmd:
|
||||
with patch('echotrail.analyze', new=MagicMock(return_value={'test': 'val'})) as mock:
|
||||
sys.argv = ["test", "test"]
|
||||
echotrail.main()
|
||||
expected = '{"test": "val"}\n'
|
||||
self.assertEqual(mock_cmd.getvalue(), expected)
|
||||
mock.assert_called_once()
|
||||
|
||||
def test_main_missing_input(self):
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||
sys.argv = ["cmd"]
|
||||
echotrail.main()
|
||||
self.assertEqual(mock_stderr.getvalue(), "usage: cmd [-h] [-c CONFIG_FILE] artifact\ncmd: error: the following arguments are required: artifact\n")
|
||||
sysmock.assert_called_once()
|
||||
|
||||
def test_checkConfigRequirements(self):
|
||||
conf = {'base_url': 'https://www.randurl.xyz/', 'api_key': ''}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
echotrail.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
def test_sendReq(self):
|
||||
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
|
||||
response = echotrail.sendReq(conf={'base_url': 'https://www.randurl.xyz/', 'api_key': 'randkey'}, observ_value='example_data')
|
||||
self.assertIsNotNone(response)
|
||||
mock.assert_called_once()
|
||||
|
||||
def test_prepareResults_noinput(self):
|
||||
raw = {}
|
||||
sim_results = {'response': raw,
|
||||
'status': 'info', 'summary': 'inconclusive'}
|
||||
results = echotrail.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_none(self):
|
||||
raw = {'query_status': 'no_result'}
|
||||
sim_results = {'response': raw,
|
||||
'status': 'info', 'summary': 'inconclusive'}
|
||||
results = echotrail.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_filenames(self):
|
||||
raw = {'filenames': [["abc.exe", "def.exe"], ["abc.exe", "def.exe"]]}
|
||||
sim_results = {'response': raw,
|
||||
'status': 'info', 'summary': 'abc.exe'}
|
||||
results = echotrail.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_prepareResults_tags(self):
|
||||
raw = {'tags': [["tag1", "tag2"], ["tag1", "tag2"]]}
|
||||
sim_results = {'response': raw,
|
||||
'status': 'info', 'summary': 'tag1'}
|
||||
results = echotrail.prepareResults(raw)
|
||||
self.assertEqual(results, sim_results)
|
||||
|
||||
def test_analyze(self):
|
||||
sendReqOutput = {'threat': 'no_result'}
|
||||
input = '{"artifactType":"hash", "value":"1234"}'
|
||||
prepareResultOutput = {'response': '',
|
||||
'summary': 'inconclusive', 'status': 'info'}
|
||||
conf = {"api_key": "xyz"}
|
||||
|
||||
with patch('echotrail.sendReq', new=MagicMock(return_value=sendReqOutput)) as mock:
|
||||
with patch('echotrail.prepareResults', new=MagicMock(return_value=prepareResultOutput)) as mock2:
|
||||
results = echotrail.analyze(conf, input)
|
||||
self.assertEqual(results["summary"], "inconclusive")
|
||||
mock2.assert_called_once()
|
||||
mock.assert_called_once()
|
||||
@@ -1,2 +0,0 @@
|
||||
requests>=2.31.0
|
||||
pyyaml>=6.0
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "Greynoise IP Analyzer",
|
||||
"version": "0.1",
|
||||
"version": "0.2",
|
||||
"author": "Security Onion Solutions",
|
||||
"description": "This analyzer queries Greynoise for context around an IP address",
|
||||
"supportedTypes" : ["ip"]
|
||||
|
||||
@@ -7,6 +7,10 @@ import argparse
|
||||
|
||||
|
||||
def checkConfigRequirements(conf):
|
||||
# Community API doesn't require API key
|
||||
if conf.get('api_version') == 'community':
|
||||
return True
|
||||
# Other API versions require API key
|
||||
if "api_key" not in conf or len(conf['api_key']) == 0:
|
||||
sys.exit(126)
|
||||
else:
|
||||
@@ -17,10 +21,12 @@ def sendReq(conf, meta, ip):
|
||||
url = conf['base_url']
|
||||
if conf['api_version'] == 'community':
|
||||
url = url + 'v3/community/' + ip
|
||||
elif conf['api_version'] == 'investigate' or 'automate':
|
||||
# Community API doesn't use API key
|
||||
response = requests.request('GET', url=url)
|
||||
elif conf['api_version'] in ['investigate', 'automate']:
|
||||
url = url + 'v2/noise/context/' + ip
|
||||
headers = {"key": conf['api_key']}
|
||||
response = requests.request('GET', url=url, headers=headers)
|
||||
headers = {"key": conf['api_key']}
|
||||
response = requests.request('GET', url=url, headers=headers)
|
||||
return response.json()
|
||||
|
||||
|
||||
|
||||
@@ -31,13 +31,31 @@ class TestGreynoiseMethods(unittest.TestCase):
|
||||
greynoise.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
def test_checkConfigRequirements_community_no_key(self):
|
||||
conf = {"api_version": "community"}
|
||||
# Should not raise exception for community version
|
||||
result = greynoise.checkConfigRequirements(conf)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_checkConfigRequirements_investigate_no_key(self):
|
||||
conf = {"api_version": "investigate"}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
greynoise.checkConfigRequirements(conf)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
def test_checkConfigRequirements_investigate_with_key(self):
|
||||
conf = {"api_version": "investigate", "api_key": "test_key"}
|
||||
result = greynoise.checkConfigRequirements(conf)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_sendReq_community(self):
|
||||
with patch('requests.request', new=MagicMock(return_value=MagicMock())) as mock:
|
||||
meta = {}
|
||||
conf = {"base_url": "https://myurl/", "api_key": "abcd1234", "api_version": "community"}
|
||||
conf = {"base_url": "https://myurl/", "api_version": "community"}
|
||||
ip = "192.168.1.1"
|
||||
response = greynoise.sendReq(conf=conf, meta=meta, ip=ip)
|
||||
mock.assert_called_once_with("GET", headers={'key': 'abcd1234'}, url="https://myurl/v3/community/192.168.1.1")
|
||||
# Community API should not include headers
|
||||
mock.assert_called_once_with("GET", url="https://myurl/v3/community/192.168.1.1")
|
||||
self.assertIsNotNone(response)
|
||||
|
||||
def test_sendReq_investigate(self):
|
||||
@@ -115,3 +133,16 @@ class TestGreynoiseMethods(unittest.TestCase):
|
||||
results = greynoise.analyze(conf, artifactInput)
|
||||
self.assertEqual(results["summary"], "suspicious")
|
||||
mock.assert_called_once()
|
||||
|
||||
def test_analyze_community_no_key(self):
|
||||
output = {"ip": "8.8.8.8", "noise": "false", "riot": "true",
|
||||
"classification": "benign", "name": "Google Public DNS",
|
||||
"link": "https://viz.gn.io", "last_seen": "2022-04-26",
|
||||
"message": "Success"}
|
||||
artifactInput = '{"value":"8.8.8.8","artifactType":"ip"}'
|
||||
conf = {"base_url": "myurl/", "api_version": "community"}
|
||||
with patch('greynoise.greynoise.sendReq', new=MagicMock(return_value=output)) as mock:
|
||||
results = greynoise.analyze(conf, artifactInput)
|
||||
self.assertEqual(results["summary"], "harmless")
|
||||
self.assertEqual(results["status"], "ok")
|
||||
mock.assert_called_once()
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "Malwarebazaar",
|
||||
"version": "0.1",
|
||||
"version": "0.2",
|
||||
"author": "Security Onion Solutions",
|
||||
"description": "This analyzer queries Malwarebazaar to see if a hash, gimphash, tlsh, or telfhash is considered malicious.",
|
||||
"supportedTypes" : ["gimphash","hash","tlsh", "telfhash"],
|
||||
|
||||
@@ -2,12 +2,21 @@ import requests
|
||||
import helpers
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
|
||||
# supports querying for hash, gimphash, tlsh, and telfhash
|
||||
# usage is as follows:
|
||||
# python3 malwarebazaar.py '{"artifactType":"x", "value":"y"}'
|
||||
|
||||
|
||||
def checkConfigRequirements(conf):
|
||||
if not conf.get('api_key'):
|
||||
sys.exit(126)
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def buildReq(observ_type, observ_value):
|
||||
# determine correct query type to send based off of observable type
|
||||
unique_types = {'gimphash': 1, 'telfhash': 1, 'tlsh': 1}
|
||||
@@ -18,10 +27,13 @@ def buildReq(observ_type, observ_value):
|
||||
return {'query': qtype, observ_type: observ_value}
|
||||
|
||||
|
||||
def sendReq(meta, query):
|
||||
def sendReq(conf, meta, query):
|
||||
# send a post request with our compiled query to the API
|
||||
url = meta['baseUrl']
|
||||
response = requests.post(url, query)
|
||||
headers = {}
|
||||
if conf.get('api_key'):
|
||||
headers['Auth-Key'] = conf['api_key']
|
||||
response = requests.post(url, query, headers=headers)
|
||||
return response.json()
|
||||
|
||||
|
||||
@@ -113,10 +125,11 @@ def prepareResults(raw):
|
||||
return {'response': raw, 'summary': summary, 'status': status}
|
||||
|
||||
|
||||
def analyze(input):
|
||||
def analyze(conf, input):
|
||||
# put all of our methods together, pass them input, and return
|
||||
# properly formatted json/python dict output
|
||||
data = json.loads(input)
|
||||
checkConfigRequirements(conf)
|
||||
data = helpers.parseArtifact(input)
|
||||
meta = helpers.loadMetadata(__file__)
|
||||
helpers.checkSupportedType(meta, data["artifactType"])
|
||||
|
||||
@@ -127,7 +140,7 @@ def analyze(input):
|
||||
# twice for the sake of retrieving more specific data.
|
||||
|
||||
initialQuery = buildReq(data['artifactType'], data['value'])
|
||||
initialRaw = sendReq(meta, initialQuery)
|
||||
initialRaw = sendReq(conf, meta, initialQuery)
|
||||
|
||||
# To prevent double-querying when a tlsh/gimphash is invalid,
|
||||
# this if statement is necessary.
|
||||
@@ -140,16 +153,22 @@ def analyze(input):
|
||||
return prepareResults(initialRaw)
|
||||
|
||||
query = buildReq(data['artifactType'], data['value'])
|
||||
response = sendReq(meta, query)
|
||||
response = sendReq(conf, meta, query)
|
||||
return prepareResults(response)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) == 2:
|
||||
results = analyze(sys.argv[1])
|
||||
dir = os.path.dirname(os.path.realpath(__file__))
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Search MalwareBazaar for a given artifact')
|
||||
parser.add_argument(
|
||||
'artifact', help='the artifact represented in JSON format')
|
||||
parser.add_argument('-c', '--config', metavar='CONFIG_FILE', default=dir + '/malwarebazaar.yaml',
|
||||
help='optional config file to use instead of the default config file')
|
||||
args = parser.parse_args()
|
||||
if args.artifact:
|
||||
results = analyze(helpers.loadConfig(args.config), args.artifact)
|
||||
print(json.dumps(results))
|
||||
else:
|
||||
print("ERROR: Input is not in proper JSON format")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
api_key: "{{ salt['pillar.get']('sensoroni:analyzers:malwarebazaar:api_key', '') }}"
|
||||
@@ -6,22 +6,18 @@ import unittest
|
||||
|
||||
|
||||
class TestMalwarebazaarMethods(unittest.TestCase):
|
||||
def test_main_missing_input(self):
|
||||
with patch('sys.stdout', new=StringIO()) as mock_cmd:
|
||||
sys.argv = ["cmd"]
|
||||
malwarebazaar.main()
|
||||
self.assertEqual(mock_cmd.getvalue(),
|
||||
'ERROR: Input is not in proper JSON format\n')
|
||||
|
||||
def test_main_success(self):
|
||||
with patch('sys.stdout', new=StringIO()) as mock_cmd:
|
||||
with patch('malwarebazaar.malwarebazaar.analyze',
|
||||
new=MagicMock(return_value={'test': 'val'})) as mock:
|
||||
sys.argv = ["cmd", "input"]
|
||||
malwarebazaar.main()
|
||||
expected = '{"test": "val"}\n'
|
||||
self.assertEqual(mock_cmd.getvalue(), expected)
|
||||
mock.assert_called_once()
|
||||
output = {"test": "val"}
|
||||
config = {"api_key": "test_key"}
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
with patch('malwarebazaar.malwarebazaar.analyze', new=MagicMock(return_value=output)) as mock_analyze:
|
||||
with patch('helpers.loadConfig', new=MagicMock(return_value=config)) as mock_config:
|
||||
sys.argv = ["cmd", "input"]
|
||||
malwarebazaar.main()
|
||||
expected = '{"test": "val"}\n'
|
||||
self.assertEqual(mock_stdout.getvalue(), expected)
|
||||
mock_analyze.assert_called_once()
|
||||
mock_config.assert_called_once()
|
||||
|
||||
def test_isInJson_tail_greater_than_max_depth(self):
|
||||
max_depth = 1000
|
||||
@@ -84,6 +80,7 @@ class TestMalwarebazaarMethods(unittest.TestCase):
|
||||
and then we compared results['summary'] with 'no result' """
|
||||
sendReqOutput = {'threat': 'no_result', "query_status": "ok",
|
||||
'data': [{'sha256_hash': 'notavalidhash'}]}
|
||||
config = {"api_key": "test_key"}
|
||||
input = '{"artifactType": "hash", "value": "1234"}'
|
||||
input2 = '{"artifactType": "tlsh", "value": "1234"}'
|
||||
input3 = '{"artifactType": "gimphash", "value": "1234"}'
|
||||
@@ -94,9 +91,9 @@ class TestMalwarebazaarMethods(unittest.TestCase):
|
||||
new=MagicMock(return_value=sendReqOutput)) as mock:
|
||||
with patch('malwarebazaar.malwarebazaar.prepareResults',
|
||||
new=MagicMock(return_value=prep_res_sim)) as mock2:
|
||||
results = malwarebazaar.analyze(input)
|
||||
results2 = malwarebazaar.analyze(input2)
|
||||
results3 = malwarebazaar.analyze(input3)
|
||||
results = malwarebazaar.analyze(config, input)
|
||||
results2 = malwarebazaar.analyze(config, input2)
|
||||
results3 = malwarebazaar.analyze(config, input3)
|
||||
self.assertEqual(results["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results2["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results3["summary"], prep_res_sim['summary'])
|
||||
@@ -113,6 +110,7 @@ class TestMalwarebazaarMethods(unittest.TestCase):
|
||||
and then we compared results['summary'] with 'no result' """
|
||||
sendReqOutput = {'threat': 'threat', "query_status": "notok", 'data': [
|
||||
{'sha256_hash': 'validhash'}]}
|
||||
config = {"api_key": "test_key"}
|
||||
input = '{"artifactType": "hash", "value": "1234"}'
|
||||
input2 = '{"artifactType": "tlsh", "value": "1234"}'
|
||||
input3 = '{"artifactType": "gimphash", "value": "1234"}'
|
||||
@@ -123,9 +121,9 @@ class TestMalwarebazaarMethods(unittest.TestCase):
|
||||
new=MagicMock(return_value=sendReqOutput)) as mock:
|
||||
with patch('malwarebazaar.malwarebazaar.prepareResults',
|
||||
new=MagicMock(return_value=prep_res_sim)) as mock2:
|
||||
results = malwarebazaar.analyze(input)
|
||||
results2 = malwarebazaar.analyze(input2)
|
||||
results3 = malwarebazaar.analyze(input3)
|
||||
results = malwarebazaar.analyze(config, input)
|
||||
results2 = malwarebazaar.analyze(config, input2)
|
||||
results3 = malwarebazaar.analyze(config, input3)
|
||||
self.assertEqual(results["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results2["summary"], prep_res_sim['summary'])
|
||||
self.assertEqual(results3["summary"], prep_res_sim['summary'])
|
||||
@@ -239,7 +237,18 @@ class TestMalwarebazaarMethods(unittest.TestCase):
|
||||
def test_sendReq(self):
|
||||
with patch('requests.post',
|
||||
new=MagicMock(return_value=MagicMock())) as mock:
|
||||
conf = {"api_key": "test_key"}
|
||||
response = malwarebazaar.sendReq(
|
||||
{'baseUrl': 'https://www.randurl.xyz'}, 'example_data')
|
||||
conf, {'baseUrl': 'https://www.randurl.xyz'}, 'example_data')
|
||||
self.assertIsNotNone(response)
|
||||
mock.assert_called_once()
|
||||
|
||||
def test_checkConfigRequirements_valid(self):
|
||||
config = {"api_key": "test_key"}
|
||||
self.assertTrue(malwarebazaar.checkConfigRequirements(config))
|
||||
|
||||
def test_checkConfigRequirements_missing_key(self):
|
||||
config = {}
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
malwarebazaar.checkConfigRequirements(config)
|
||||
self.assertEqual(cm.exception.code, 126)
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user