Merge branch 'dev' into feature/turbo-proxy

# Conflicts:
#	setup/so-functions
This commit is contained in:
William Wernert
2020-05-26 16:37:08 -04:00
17 changed files with 213 additions and 40 deletions

View File

@@ -28,20 +28,83 @@ salttmp:
- group: 939
- makedirs: True
# Install packages needed for the sensor
sensorpkgs:
# Install epel
{% if grains['os'] == 'CentOS' %}
epel:
pkg.installed:
- skip_suggestions: False
- skip_suggestions: True
- pkgs:
- epel-release
{% endif %}
# Install common packages
{% if grains['os'] != 'CentOS' %}
commonpkgs:
pkg.installed:
- skip_suggestions: True
- pkgs:
- apache2-utils
- wget
- ntpdate
- jq
- python3-docker
- docker-ce
- curl
- ca-certificates
- software-properties-common
- apt-transport-https
- openssl
- netcat
- python3-mysqldb
- sqlite3
- argon2
- libssl-dev
- python3-dateutil
- python3-m2crypto
- python3-mysqldb
heldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.2.13-2
- docker-ce: 5:19.03.9~3-0~ubuntu-bionic
- hold: True
- update_holds: True
{% else %}
commonpkgs:
pkg.installed:
- skip_suggestions: True
- pkgs:
- wget
- ntpdate
- bind-utils
- jq
{% if grains['os'] != 'CentOS' %}
- apache2-utils
{% else %}
- net-tools
- tcpdump
- httpd-tools
{% endif %}
- net-tools
- curl
- sqlite
- argon2
- mariadb-devel
- nmap-ncat
- python3
- python36-docker
- python36-dateutil
- python36-m2crypto
- python36-mysql
- yum-utils
- device-mapper-persistent-data
- lvm2
- openssl
heldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.2.13-3.2.el7
- docker-ce: 3:19.03.9-3.el7
- hold: True
- update_holds: True
{% endif %}
# Always keep these packages up to date

View File

@@ -166,8 +166,7 @@ cat << EOF
What elasticsearch index do you want to use?
Below are the default Index Patterns used in Security Onion:
*:logstash-*
*:logstash-beats-*
*:so-ids-*
*:elastalert_status*
EOF

View File

@@ -24,9 +24,8 @@ actions:
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
exclude:
kind: regex
value: '^(logstash-.*|so-.*)$'
- filtertype: age
source: name
direction: older

View File

@@ -20,8 +20,8 @@ actions:
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
kind: regex
value: '^(logstash-.*|so-.*)$'
- filtertype: space
source: creation_date
use_age: True

View File

@@ -33,17 +33,17 @@ LOG="/opt/so/log/curator/so-curator-closed-delete.log"
# Check for 2 conditions:
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
# 2. Are there any closed logstash- indices that we can delete?
# 2. Are there any closed logstash-, or so- indices that we can delete?
# If both conditions are true, keep on looping until one of the conditions is false.
while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" > /dev/null; do
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" > /dev/null; do
# We need to determine OLDEST_INDEX.
# First, get the list of closed indices that are prefixed with "logstash-".
# First, get the list of closed indices that are prefixed with "logstash-" or "so-".
# For example: logstash-ids-YYYY.MM.DD
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
# Finally, select the first entry in that sorted list.
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" | awk '{print $2}' | sort -t- -k3 | head -1)
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}

View File

@@ -40,7 +40,7 @@ hive_alert_config:
title: '{match[rule][name]}'
type: 'NIDS'
source: 'SecurityOnion'
description: "`Hunting Pivot:` \n\n <https://{{MASTER}}/#/hunt?q=event.module%3A%20suricata%20AND%20rule.uuid%3A{match[rule][uuid]}%20%7C%20groupby%20source.ip%20destination.ip%20rule.name> \n\n `Kibana Dashboard:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:')),sort:!('@timestamp',desc))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
description: "`Hunting Pivot:` \n\n <https://{{MASTER}}/#/hunt?q=event.module%3A%20suricata%20AND%20rule.uuid%3A{match[rule][uuid]}%20%7C%20groupby%20source.ip%20destination.ip%20rule.name> \n\n `Kibana Dashboard - Signature Drilldown:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:')),sort:!('@timestamp',desc))> \n\n `Kibana Dashboard - Community_ID:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/30d0ac90-729f-11ea-8dd2-9d8795a1200b?_g=(filters:!(('$state':(store:globalState),meta:(alias:!n,disabled:!f,index:'*:so-*',key:network.community_id,negate:!f,params:(query:'{match[network][community_id]}'),type:phrase),query:(match_phrase:(network.community_id:'{match[network][community_id]}')))),refreshInterval:(pause:!t,value:0),time:(from:now-7d,to:now))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
severity: 2
tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}']
tlp: 3

View File

@@ -22,3 +22,7 @@ transport.bind_host: 0.0.0.0
transport.publish_host: {{ nodeip }}
transport.publish_port: 9300
{%- endif %}
cluster.routing.allocation.disk.threshold_enabled: true
cluster.routing.allocation.disk.watermark.low: 95%
cluster.routing.allocation.disk.watermark.high: 98%
cluster.routing.allocation.disk.watermark.flood_stage: 98%

View File

@@ -38,7 +38,7 @@
{ "rename": { "field": "module", "target_field": "event.module", "ignore_missing": true } },
{ "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_missing": true } },
{ "rename": { "field": "category", "target_field": "event.category", "ignore_missing": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } },
{
"remove": {
"field": [ "index_name_prefix", "message2", "type" ],

View File

@@ -31,7 +31,7 @@
{ "rename": { "field": "message3.columns.remote_port", "target_field": "remote.port", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.process_name", "target_field": "process.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.eventid", "target_field": "event.code", "ignore_missing": true } },
{ "set": { "if": "ctx.message3.columns.?data != null", "field": "dataset", "value": "wel-{{message3.columns.source}}", "override": true } },
{ "set": { "if": "ctx.message3.columns?.data != null", "field": "dataset", "value": "wel-{{message3.columns.source}}", "override": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.destinationIp", "target_field": "destination.ip", "ignore_missing": true } },

View File

@@ -15,6 +15,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
RETURN_CODE=0
ELASTICSEARCH_HOST=$1
ELASTICSEARCH_PORT=9200
@@ -46,7 +47,9 @@ fi
cd ${ELASTICSEARCH_INGEST_PIPELINES}
echo "Loading pipelines..."
for i in *; do echo $i; curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
echo
cd - >/dev/null
exit $RETURN_CODE

View File

@@ -32,7 +32,7 @@
"dateRangeMinutes": 1440,
"mostRecentlyUsedLimit": 5,
"eventFields": {
"default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ],
"default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id" ],
"bro_conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "service", "log.id.uid" ],
"bro_dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ],
"bro_dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ],

View File

@@ -1,9 +1,9 @@
{%- set ip = salt['pillar.get']('static:masterip', '') %}
{%- set MASTER = salt['pillar.get']('master:url_base', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
[es]
es_url = http://{{ip}}:9200
es_url = http://{{MASTER}}:9200
es_user = YOURESUSER
es_pass = YOURESPASS
es_index_pattern = so-*
@@ -11,7 +11,7 @@ es_verifycert = no
[cortex]
auto_analyze_alerts = no
cortex_url = https://{{ip}}/cortex/
cortex_url = https://{{MASTER}}/cortex/
cortex_key = {{ CORTEXKEY }}
supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
@@ -32,7 +32,7 @@ grr_user = YOURGRRUSER
grr_pass = YOURGRRPASS
[hive]
hive_url = https://{{ip}}/thehive/
hive_url = https://{{MASTER}}/thehive/
hive_key = {{ HIVEKEY }}
hive_tlp = 3
hive_verifycert = no
@@ -59,7 +59,7 @@ slack_url = YOURSLACKWORKSPACE
slack_webhook = YOURSLACKWEBHOOK
[playbook]
playbook_url = https://{{ip}}/playbook
playbook_url = https://{{MASTER}}/playbook
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
playbook_verifycert = no

View File

@@ -112,5 +112,5 @@ strelka_filestream:
strelka_zeek_extracted_sync:
cron.present:
- user: root
- name: mv /nsm/zeek/extracted/complete/* /nsm/strelka
- name: [ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1
- minute: '*'

View File

@@ -0,0 +1,75 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TESTING=true
address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
BASICBRO=7
BASICSURI=7
# BLOGS=
BNICS=eth1
BROVERSION=ZEEK
# CURCLOSEDAYS=
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MASTERADV=BASIC
MASTERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=
# MMASK=
MNIC=eth0
# MSEARCH=
# MSRV=
# MTU=
NAVIGATOR=1
NIDS=Suricata
# NODE_ES_HEAP_SIZE=
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
NODEUPDATES=MASTER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
# REDIRECTHOST=
REDIRECTINFO=IP
RULESETUP=ETOPEN
# SHARDCOUNT=
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
THEHIVE=1
WAZUH=1
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=onionuser
WEBPASSWD2=onionuser

View File

@@ -19,7 +19,7 @@ source ./so-whiptail
source ./so-variables
source ./so-common-functions
SOVERSION=1.3.0
SOVERSION=1.4.0
accept_salt_key_remote() {
systemctl restart salt-minion
@@ -514,7 +514,7 @@ detect_os() {
# Install bind-utils so the host command exists
if ! command -v host > /dev/null 2>&1; then
echo "Installing required packages to run installer"
yum -y install bind-utils >> "$setup_log" 2>&1
yum -y install bind-utils yum-plugin-versionlock >> "$setup_log" 2>&1
fi
@@ -583,7 +583,9 @@ docker_install() {
{
yum clean expire-cache;
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
yum -y install docker-ce;
yum -y install docker-ce-19.03.9-3.el7 containerd.io-1.2.6-3.el7;
yum versionlock docker-ce-19.03.9-3.el7;
yum versionlock containerd.io-1.2.6-3.el7
} >> "$setup_log" 2>&1
else
@@ -687,7 +689,7 @@ docker_seed_registry() {
# Tag it with the new registry destination
docker tag soshybridhunter/"$i" "$HOSTNAME":5000/soshybridhunter/"$i"
docker push "$HOSTNAME":5000/soshybridhunter/"$i"
docker rmi soshybridhunter/"$i"
#docker rmi soshybridhunter/"$i"
} >> "$setup_log" 2>&1
done
else
@@ -1120,7 +1122,7 @@ saltify() {
yum -y update exclude=salt*;
systemctl enable salt-minion;
} >> "$setup_log" 2>&1
echo "exclude=salt*" >> /etc/yum.conf
yum versionlock salt*
else
DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade >> "$setup_log" 2>&1
@@ -1279,8 +1281,6 @@ set_progress_str() {
'----'\
"$percentage% - ${progress_bar_text^^}"\
"----" >> "$setup_log" 2>&1
sleep 5
}
sensor_pillar() {

View File

@@ -24,6 +24,36 @@ source ./so-variables
setup_type=$1
export setup_type
automation=$2
automated=no
function progress() {
if [ $automated == no ]; then
whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0
fi
}
if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then
echo "Preselecting variable values based on automated setup: $automation"
source automation/$automation
automated=yes
attempt=1
attempts=60
ip a | grep "$MNIC:" | grep "state UP"
while [ $? -ne 0 ]; do
if [ $attempt -gt $attempts ]; then
echo "Network unavailable - setup cannot continue"
exit 1
fi
echo "Waiting for network to come up (attempt $attempt of $attempts)"
attempt=$((attempt + 1))
sleep 10;
ip a | grep "$MNIC:" | grep "state UP"
done
fi
case "$setup_type" in
iso | network) # Accepted values
echo "Beginning Security Onion $setup_type install"
@@ -495,7 +525,7 @@ fi
set_progress_str 95 'Verifying setup'
salt-call -l info state.highstate >> $setup_log 2>&1
} | whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
} | progress
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
if [[ "$success" = 0 ]]; then

View File

@@ -958,7 +958,7 @@ whiptail_setup_complete() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Finished $install_type install. Press ENTER to reboot." 8 75
whiptail --title "Security Onion Setup" --msgbox "Finished $install_type install. Press Ok to reboot." 8 75
install_cleanup >> $setup_log 2>&1
}
@@ -967,7 +967,7 @@ whiptail_setup_failed() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press ENTER to reboot." 8 75
whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press Ok to reboot." 8 75
install_cleanup >> $setup_log 2>&1
}