Merge remote-tracking branch 'remotes/origin/dev' into feature/suripillar

This commit is contained in:
m0duspwnens
2020-06-26 13:09:15 -04:00
20 changed files with 242 additions and 19 deletions

View File

@@ -44,6 +44,10 @@ firewall:
ips:
delete:
insert:
strelka_frontend:
ips:
delete:
insert:
syslog:
ips:
delete:
@@ -59,4 +63,4 @@ firewall:
wazuh_authd:
ips:
delete:
insert:
insert:

View File

@@ -38,6 +38,11 @@ do
FULLROLE="beats_endpoint"
SKIP=1
;;
f)
FULLROLE="strelka_frontend"
SKIP=1
;;
i) IP=$OPTARG
;;
o)
@@ -72,6 +77,7 @@ if [ "$SKIP" -eq 0 ]; then
echo ""
echo "[a] - Analyst - ports 80/tcp and 443/tcp"
echo "[b] - Logstash Beat - port 5044/tcp"
echo "[f] - Strelka frontend - port 57314/tcp"
echo "[o] - Osquery endpoint - port 8090/tcp"
echo "[s] - Syslog device - 514/tcp/udp"
echo "[w] - Wazuh agent - port 1514/tcp/udp"
@@ -86,6 +92,8 @@ if [ "$SKIP" -eq 0 ]; then
FULLROLE=analyst
elif [ "$ROLE" == "b" ]; then
FULLROLE=beats_endpoint
elif [ "$ROLE" == "f" ]; then
FULLROLE=strelka_frontend
elif [ "$ROLE" == "o" ]; then
FULLROLE=osquery_endpoint
elif [ "$ROLE" == "w" ]; then

View File

@@ -50,7 +50,7 @@ done
if [ $SKIP -ne 1 ]; then
# List indices
echo
curl {{ MASTERIP }}:9200/_cat/indices?v&pretty
curl {{ MASTERIP }}:9200/_cat/indices?v
echo
# Inform user we are about to delete all data
echo
@@ -63,18 +63,54 @@ if [ $SKIP -ne 1 ]; then
if [ "$INPUT" != "AGREE" ] ; then exit 0; fi
fi
/usr/sbin/so-filebeat-stop
/usr/sbin/so-logstash-stop
# Check to see if Logstash/Filebeat are running
LS_ENABLED=$(so-status | grep logstash)
FB_ENABLED=$(so-status | grep filebeat)
EA_ENABLED=$(so-status | grep elastalert)
if [ ! -z "$FB_ENABLED" ]; then
/usr/sbin/so-filebeat-stop
fi
if [ ! -z "$LS_ENABLED" ]; then
/usr/sbin/so-logstash-stop
fi
if [ ! -z "$EA_ENABLED" ]; then
/usr/sbin/so-elastalert-stop
fi
# Delete data
echo "Deleting data..."
INDXS=$(curl -s -XGET {{ MASTERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert' | awk '{ print $3 }')
INDXS=$(curl -s -XGET {{ MASTERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
for INDX in ${INDXS}
do
curl -XDELETE "{{ MASTERIP }}:9200/${INDX}" > /dev/null 2>&1
done
/usr/sbin/so-logstash-start
/usr/sbin/so-filebeat-start
#Start Logstash/Filebeat
if [ ! -z "$FB_ENABLED" ]; then
/usr/sbin/so-filebeat-start
fi
if [ ! -z "$LS_ENABLED" ]; then
/usr/sbin/so-logstash-start
fi
if [ ! -z "$EA_ENABLED" ]; then
/usr/sbin/so-elastalert-start
fi

View File

@@ -21,8 +21,8 @@ clone_to_tmp() {
# Make a temp location for the files
mkdir /tmp/sogh
cd /tmp/sogh
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
git clone https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion.git
git clone https://github.com/Security-Onion-Solutions/securityonion.git
cd /tmp
}

View File

@@ -0,0 +1,84 @@
#!/bin/bash
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
#mkdir -p $output_dir
repos="$output_dir/repos.txt"
ignorefile="$output_dir/ignore.txt"
deletecounter=0
newcounter=0
updatecounter=0
gh_status=$(curl -s -o /dev/null -w "%{http_code}" http://github.com)
if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
while IFS= read -r repo; do
# Remove old repo if existing bc of previous error condition or unexpected disruption
repo_name=`echo $repo | awk -F '/' '{print $NF}'`
[ -d $repo_name ] && rm -rf $repo_name
# Clone repo and make appropriate directories for rules
git clone $repo
echo "Analyzing rules from $repo_name..."
mkdir -p $output_dir/$repo_name
[ -f $repo_name/LICENSE ] && cp $repo_name/LICENSE $output_dir/$repo_name
# Copy over rules
for i in $(find $repo_name -name "*.yar*"); do
rule_name=$(echo $i | awk -F '/' '{print $NF}')
repo_sum=$(sha256sum $i | awk '{print $1}')
# Check rules against those in ignore list -- don't copy if ignored.
if ! grep -iq $rule_name $ignorefile; then
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
# For existing rules, check to see if they need to be updated, by comparing checksums
if [ $existing_rules -gt 0 ];then
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
if [ "$repo_sum" != "$local_sum" ]; then
echo "Checksums do not match!"
echo "Updating $rule_name..."
cp $i $output_dir/$repo_name;
((updatecounter++))
fi
else
# If rule doesn't exist already, we'll add it
echo "Adding new rule: $rule_name..."
cp $i $output_dir/$repo_name
((newcounter++))
fi
fi;
done
# Check to see if we have any old rules that need to be removed
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
is_repo_rule=$(find $repo_name -name "$i" | wc -l)
if [ $is_repo_rule -eq 0 ]; then
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
rm $output_dir/$repo_name/$i
((deletecounter++))
fi
done
#rm -rf $repo_name
done < $repos
echo "Done!"
if [ "$newcounter" -gt 0 ];then
echo "$newcounter new rules added."
fi
if [ "$updatecounter" -gt 0 ];then
echo "$updatecounter rules updated."
fi
if [ "$deletecounter" -gt 0 ];then
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
fi
else
echo "Server returned $gh_status status code."
echo "No connectivity to Github...exiting..."
exit 1
fi

View File

@@ -30,6 +30,7 @@
{ "rename": { "field": "message2.extracted", "target_field": "file.extracted.filename", "ignore_missing": true } },
{ "rename": { "field": "message2.extracted_cutoff", "target_field": "file.extracted.cutoff", "ignore_missing": true } },
{ "rename": { "field": "message2.extracted_size", "target_field": "file.extracted.size", "ignore_missing": true } },
{ "set": { "field": "dataset", "value": "file" } },
{ "pipeline": { "name": "zeek.common" } }
]
}

View File

@@ -52,6 +52,9 @@ role:
osquery_endpoint:
portgroups:
- {{ portgroups.fleet_api }}
strelka_frontend:
portgroups:
- {{ portgroups.strelka_frontend }}
syslog:
portgroups:
- {{ portgroups.syslog }}
@@ -466,4 +469,4 @@ role:
- {{ portgroups.all }}
localhost:
portgroups:
- {{ portgroups.all }}
- {{ portgroups.all }}

View File

@@ -74,6 +74,9 @@ firewall:
ssh:
tcp:
- 22
strelka_frontend:
tcp:
- 57314
syslog:
tcp:
- 514
@@ -89,4 +92,4 @@ firewall:
- 55000
wazuh_authd:
tcp:
- 1515
- 1515

View File

@@ -107,8 +107,8 @@
{ "name": "DNS", "description": "DNS queries grouped by response code", "query": "event.dataset:dns | groupby dns.response.code_name destination.port"},
{ "name": "DNS", "description": "DNS highest registered domain", "query": "event.dataset:dns | groupby dns.highest_registered_domain.keyword destination.port"},
{ "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.dataset:dns | groupby dns.parent_domain.keyword destination.port"},
{ "name": "Files", "description": "Files grouped by mimetype", "query": "event.dataset:files | groupby file.mime_type source.ip"},
{ "name": "Files", "description": "Files grouped by source", "query": "event.dataset:files | groupby file.source source.ip"},
{ "name": "Files", "description": "Files grouped by mimetype", "query": "event.dataset:file | groupby file.mime_type source.ip"},
{ "name": "Files", "description": "Files grouped by source", "query": "event.dataset:file | groupby file.source source.ip"},
{ "name": "FTP", "description": "FTP grouped by argument", "query": "event.dataset:ftp | groupby ftp.argument"},
{ "name": "FTP", "description": "FTP grouped by command", "query": "event.dataset:ftp | groupby ftp.command"},
{ "name": "FTP", "description": "FTP grouped by username", "query": "event.dataset:ftp | groupby ftp.user"},

View File

@@ -4,6 +4,7 @@
[es]
es_url = http://{{MASTER}}:9200
es_ip = {{MASTER}}
es_user = YOURESUSER
es_pass = YOURESPASS
es_index_pattern = so-*
@@ -62,6 +63,7 @@ slack_webhook = YOURSLACKWEBHOOK
playbook_url = https://{{MASTER}}/playbook
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
playbook_verifycert = no
playbook_unit_test_index = playbook-testing
[log]
logfile = /var/log/SOCtopus/soctopus.log

View File

@@ -15,6 +15,7 @@
{%- set MASTER = grains['master'] %}
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{%- set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') -%}
# Strelka config
strelkaconfdir:
@@ -24,6 +25,13 @@ strelkaconfdir:
- group: 939
- makedirs: True
strelkarulesdir:
file.directory:
- name: /opt/so/conf/strelka/rules
- user: 939
- group: 939
- makedirs: True
# Sync dynamic config to conf dir
strelkasync:
file.recurse:
@@ -33,6 +41,21 @@ strelkasync:
- group: 939
- template: jinja
{%- if STRELKA_RULES == 1 %}
strelka_yara_update:
cron.present:
- user: root
- name: '[ -d /opt/so/saltstack/default/salt/strelka/rules/ ] && /usr/sbin/so-yara-update > /dev/null 2>&1'
- hour: '7'
strelkarules:
file.recurse:
- name: /opt/so/conf/strelka/rules
- source: salt://strelka/rules
- user: 939
- group: 939
{%- endif %}
strelkadatadir:
file.directory:
- name: /nsm/strelka
@@ -87,7 +110,7 @@ strelka_backend:
- image: {{ MASTER }}:5000/soshybridhunter/so-strelka-backend:{{ VERSION }}
- binds:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/backend/yara:/etc/yara/:ro
- /opt/so/conf/strelka/rules/:/etc/yara/:ro
- name: so-strelka-backend
- command: strelka-backend
- restart_policy: on-failure

View File

@@ -0,0 +1,4 @@
generic_anomalies.yar
general_cloaking.yar
thor_inverse_matches.yar
yara_mixed_ext_vars.yar

View File

@@ -0,0 +1 @@
https://github.com/Neo23x0/signature-base

View File

@@ -0,0 +1,6 @@
#!/bin/bash
# Gzip the eve logs
find /nsm/suricata/eve*.json -type f -printf '%T@\t%p\n' | sort -t $'\t' -g | head -n -1 | cut -d $'\t' -f 2- | xargs nice gzip
# TODO Add stats log

View File

@@ -71,6 +71,21 @@ surirulesync:
- user: 940
- group: 940
surilogscript:
file.managed:
- name: /usr/local/bin/surilogcompress
- source: salt://suricata/cron/surilogcompress
- mode: 755
/usr/local/bin/surilogcompress:
cron.present:
- user: suricata
- minute: '17'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
suriconfigsync:
file.managed:
- name: /opt/so/conf/suricata/suricata.yaml

View File

@@ -15,7 +15,9 @@ if [ $CHECKIT == 2 ]; then
else
CURRENTPACKETS=${RESULT[9]}
PASTPACKETS=${RESULT[19]}
TOTAL=$(($CURRENTPACKETS - $PASTPACKETS))
TOTALCURRENT=$(($CURRENTPACKETS + $CURRENTDROP))
TOTALPAST=$(($PASTPACKETS + $PASTDROP))
TOTAL=$(($TOTALCURRENT - $TOTALPAST))
LOSS=$(echo $DROPPED $TOTAL / p | dc)
echo "suridrop drop=$LOSS"

View File

@@ -7,7 +7,7 @@
{%- set DOMAINSTATS = salt['pillar.get']('master:domainstats', '0') -%}
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set STRELKA = salt['pillar.get']('static:strelka', '0') -%}
{%- set STRELKA = salt['pillar.get']('strelka:enabled', '0') -%}
base:

View File

@@ -953,9 +953,11 @@ master_static() {
" fleet_hostname: N/A"\
" fleet_ip: N/A"\
" sensoronikey: $SENSORONIKEY"\
" strelka: $STRELKA"\
" wazuh: $WAZUH"\
" masterupdate: $MASTERUPDATES"\
" wazuh: $WAZUH"\
" masterupdate: $MASTERUPDATES"\
"strelka:"\
" enabled: $STRELKA"\
" rules: $STRELKARULES"\
"elastic:"\
" features: False" > "$static_pillar"
@@ -1620,3 +1622,7 @@ es_heapsize() {
export NODE_ES_HEAP_SIZE
fi
}
strelka_yara_update() {
so-yara-update
}

View File

@@ -255,6 +255,12 @@ fi
if [[ $is_master ]]; then
whiptail_components_adv_warning
whiptail_enable_components
if [[ $STRELKA == 1 ]]; then
whiptail_strelka_rules
if [[ $STRELKARULES == 1 ]]; then
strelka_yara_update
fi
fi
collect_webuser_inputs
get_redirect
fi

View File

@@ -1028,6 +1028,25 @@ whiptail_shard_count() {
}
whiptail_strelka_rules() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "Do you want to enable the default YARA rules for Strelka?" 8 75
local exitstatus=$?
if [ $exitstatus == 0 ]; then
export STRELKARULES=1
else
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
export STRELKARULES
fi
}
whiptail_suricata_pins() {
[ -n "$TESTING" ] && return