From c74ace89bae108dbccd7f4f75241fb5058ef7ef6 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 21 May 2020 14:34:00 -0400 Subject: [PATCH 01/76] Initial support - Ingest community_id --- salt/elasticsearch/files/ingest/zeek.common | 9 +++++++++ salt/elasticsearch/files/ingest/zeek.http | 1 + salt/elasticsearch/files/ingest/zeek.notice | 2 +- salt/elasticsearch/files/ingest/zeek.smtp | 1 + 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/zeek.common b/salt/elasticsearch/files/ingest/zeek.common index c31625db6..5859b354b 100644 --- a/salt/elasticsearch/files/ingest/zeek.common +++ b/salt/elasticsearch/files/ingest/zeek.common @@ -7,6 +7,15 @@ { "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } }, { "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } }, { "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } }, + + {"convert":{"field":"message2.id.orig_p","type":"string"}}, + {"convert":{"field":"message2.id.resp_p","type":"string"}}, + + {"community_id": {"if": "ctx.network?.transport != null", "field":["message2.id.orig_h","message2.id.orig_p","message2.id.resp_h","message2.id.resp_p","network.transport"],"target_field":"network.community_id"}}, + + {"convert":{"field":"message2.id.orig_p","type":"integer"}}, + {"convert":{"field":"message2.id.resp_p","type":"integer"}}, + { "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } }, { "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } }, { "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } }, diff --git a/salt/elasticsearch/files/ingest/zeek.http b/salt/elasticsearch/files/ingest/zeek.http index a1354044c..3368e45e1 100644 --- a/salt/elasticsearch/files/ingest/zeek.http +++ b/salt/elasticsearch/files/ingest/zeek.http @@ -29,6 +29,7 @@ { "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } }, { "script": { "lang": "painless", "source": "ctx.useragent_length = ctx.useragent.length()", "ignore_failure": true } }, { "script": { "lang": "painless", "source": "ctx.virtual_host_length = ctx.virtual_host.length()", "ignore_failure": true } }, + { "set": { "field": "network.transport", "value": "tcp" } }, { "pipeline": { "name": "zeek.common" } } ] } diff --git a/salt/elasticsearch/files/ingest/zeek.notice b/salt/elasticsearch/files/ingest/zeek.notice index 4e54f325d..b662393f6 100644 --- a/salt/elasticsearch/files/ingest/zeek.notice +++ b/salt/elasticsearch/files/ingest/zeek.notice @@ -6,7 +6,7 @@ { "rename": { "field": "message2.fuid", "target_field": "log.id.fuid", "ignore_missing": true } }, { "rename": { "field": "message2.mime", "target_field": "file.mimetype", "ignore_missing": true } }, { "rename": { "field": "message2.desc", "target_field": "file.description", "ignore_missing": true } }, - { "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } }, + { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } }, { "rename": { "field": "message2.note", "target_field": "notice.note", "ignore_missing": true } }, { "rename": { "field": "message2.msg", "target_field": "notice.message", "ignore_missing": true } }, { "rename": { "field": "message2.sub", "target_field": "notice.sub_message", "ignore_missing": true } }, diff --git a/salt/elasticsearch/files/ingest/zeek.smtp b/salt/elasticsearch/files/ingest/zeek.smtp index 473b4cce5..9bfb1e3e1 100644 --- a/salt/elasticsearch/files/ingest/zeek.smtp +++ b/salt/elasticsearch/files/ingest/zeek.smtp @@ -25,6 +25,7 @@ { "rename": { "field": "message2.tls", "target_field": "smtp.tls", "ignore_missing": true } }, { "rename": { "field": "message2.fuids", "target_field": "log.id.fuids", "ignore_missing": true } }, { "rename": { "field": "message2.is_webmail", "target_field": "smtp.is_webmail", "ignore_missing": true } }, + { "set": { "field": "network.transport", "value": "tcp" } }, { "pipeline": { "name": "zeek.common" } } ] } From bff86ea802ad731524441452409417ae743b4cce Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 21 May 2020 14:35:25 -0400 Subject: [PATCH 02/76] zeek.common ingest parser fix --- salt/elasticsearch/files/ingest/zeek.common | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/salt/elasticsearch/files/ingest/zeek.common b/salt/elasticsearch/files/ingest/zeek.common index 5859b354b..b0ac0d12a 100644 --- a/salt/elasticsearch/files/ingest/zeek.common +++ b/salt/elasticsearch/files/ingest/zeek.common @@ -6,16 +6,8 @@ { "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } }, { "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } }, { "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } }, - { "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } }, - - {"convert":{"field":"message2.id.orig_p","type":"string"}}, - {"convert":{"field":"message2.id.resp_p","type":"string"}}, - + { "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } }, {"community_id": {"if": "ctx.network?.transport != null", "field":["message2.id.orig_h","message2.id.orig_p","message2.id.resp_h","message2.id.resp_p","network.transport"],"target_field":"network.community_id"}}, - - {"convert":{"field":"message2.id.orig_p","type":"integer"}}, - {"convert":{"field":"message2.id.resp_p","type":"integer"}}, - { "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } }, { "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } }, { "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } }, From 967148890cc2e66f30c9fdfbfd5b48fccf9288a9 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 11:39:32 -0400 Subject: [PATCH 03/76] [fix] Only prompt user about network install on network install --- setup/proxies/docker.conf | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 setup/proxies/docker.conf diff --git a/setup/proxies/docker.conf b/setup/proxies/docker.conf new file mode 100644 index 000000000..9ab2c4b4c --- /dev/null +++ b/setup/proxies/docker.conf @@ -0,0 +1,2 @@ +[Service] +ExecStart=/usr/bin/dockerd /usr/bin/dockerd -H fd:// --registry-mirror "$proxy_addr" From b029d9aca62b9bafd9fb2f1915287bb195967c2b Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 11:40:16 -0400 Subject: [PATCH 04/76] [feat] Initial work for running setup through proxy --- setup/so-functions | 49 ++++++++++++++++++++++++++++++++++++++++++++++ setup/so-setup | 7 ++++++- setup/so-whiptail | 2 +- so-setup-network | 4 ++-- 4 files changed, 58 insertions(+), 4 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index a20953035..1e7bf7951 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -948,6 +948,35 @@ node_pillar() { cat "$pillar_file" >> "$setup_log" 2>&1 } +parse_options() { + case "$1" in + --turbo=*) + local proxy + proxy=$(echo "$1" | awk -F'--turbo=' '{print $2}') + use_proxy "http://$proxy" + ;; + --proxy=*) + echo "Unimplimented" + return + + if [[ $2 != --proxy-user=* ]] || [[ $3 != --proxy-pass=* ]]; then + echo "Invalid options passed for proxy. Order is --proxy-user= --proxy-pass=" + else + local proxy + local user + local password + proxy=$(echo "$1" | awk -F'--proxy=' '{print $2}') + user=$(echo "$2" | awk -F'--proxy-user=' '{print $2}') + password=$(echo "$3" | awk -F'--proxy-pass=' '{print $2}') + + use_proxy "$proxy" "$user" "$password" + fi + ;; + *) + echo "Invalid option" + esac +} + patch_pillar() { local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls @@ -1443,6 +1472,26 @@ update_packages() { fi } +use_proxy() { + local proxy_addr=$1 + #TODO: add options for username + pass + + if [[ $OS == 'centos' ]]; then + printf '%s\n'\ + "proxy=\"$proxy_addr\"" >> /etc/yum.conf + else + printf '%s\n'\ + "Acquire {"\ + "HTTP::proxy \"$proxy_addr\";"\ + "HTTPS::proxy \"$proxy_addr\";"\ + "}" > /etc/apt/apt.conf.d/proxy.conf + fi + mkdir -p /etc/systemd/system/docker.service.d + printf '%s\n'\ + "[Service]"\ + "ExecStart=/usr/bin/dockerd /usr/bin/dockerd -H fd:// --registry-mirror \"$proxy_addr\"" > /etc/systemd/system/docker.service.d/docker.conf +} + ls_heapsize() { if [ "$total_mem" -ge 32000 ]; then diff --git a/setup/so-setup b/setup/so-setup index 406d69763..5ec148208 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -48,7 +48,7 @@ fi setterm -blank 0 -if (whiptail_you_sure); then +if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then true else echo "User cancelled setup." >> $setup_log 2>&1 @@ -241,6 +241,11 @@ fi whiptail_make_changes +if [[ $# -gt 1 ]]; then + set -- "${@:2}" + parse_options "$@" +fi + if [[ "$setup_type" == 'iso' ]]; then # Init networking so rest of install works set_hostname_iso diff --git a/setup/so-whiptail b/setup/so-whiptail index cfe00b67b..559f791b7 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -1048,7 +1048,7 @@ whiptail_you_sure() { [ -n "$TESTING" ] && return - whiptail --title "Security Onion Setup" --yesno "Are you sure you want to install Security Onion over the internet?" 8 75 + whiptail --title "Security Onion Setup" --yesno "Are you sure you want to continue a network install of Security Onion?" 8 75 local exitstatus=$? return $exitstatus diff --git a/so-setup-network b/so-setup-network index ae9af4ffa..2528ff14b 100755 --- a/so-setup-network +++ b/so-setup-network @@ -15,6 +15,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -cd setup +cd setup || exit -./so-setup network +./so-setup network "$@" From c790b3827db56c3916752ee4bc1cf85650feea56 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 13:25:17 -0400 Subject: [PATCH 05/76] [fix] yum.conf proxy should not contain quotes --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 1e7bf7951..38b0b5b9a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1478,7 +1478,7 @@ use_proxy() { if [[ $OS == 'centos' ]]; then printf '%s\n'\ - "proxy=\"$proxy_addr\"" >> /etc/yum.conf + "proxy=$proxy_addr" >> /etc/yum.conf else printf '%s\n'\ "Acquire {"\ From 4b505827b19c9ec23d139753da0d19ab37fb1937 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 13:31:58 -0400 Subject: [PATCH 06/76] [fix] Use correct ports for acng and docker registry --- setup/so-functions | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 38b0b5b9a..934ff6cc1 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1478,18 +1478,18 @@ use_proxy() { if [[ $OS == 'centos' ]]; then printf '%s\n'\ - "proxy=$proxy_addr" >> /etc/yum.conf + "proxy=$proxy_addr:3412" >> /etc/yum.conf else printf '%s\n'\ "Acquire {"\ - "HTTP::proxy \"$proxy_addr\";"\ - "HTTPS::proxy \"$proxy_addr\";"\ + "HTTP::proxy \"$proxy_addr:3412\";"\ + "HTTPS::proxy \"$proxy_addr:3412\";"\ "}" > /etc/apt/apt.conf.d/proxy.conf fi - mkdir -p /etc/systemd/system/docker.service.d printf '%s\n'\ - "[Service]"\ - "ExecStart=/usr/bin/dockerd /usr/bin/dockerd -H fd:// --registry-mirror \"$proxy_addr\"" > /etc/systemd/system/docker.service.d/docker.conf + "{"\ + " \"registry-mirrors\": [\"$proxy_addr:5000\"]"\ + "}" > /etc/docker/daemon.json } ls_heapsize() { From 78f6261fccfae63331e38e494296209fb018ff49 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 14:00:01 -0400 Subject: [PATCH 07/76] [fix] acng should be port 3142 not 3412 --- setup/so-functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 934ff6cc1..303590d55 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1478,12 +1478,12 @@ use_proxy() { if [[ $OS == 'centos' ]]; then printf '%s\n'\ - "proxy=$proxy_addr:3412" >> /etc/yum.conf + "proxy=$proxy_addr:3142" >> /etc/yum.conf else printf '%s\n'\ "Acquire {"\ - "HTTP::proxy \"$proxy_addr:3412\";"\ - "HTTPS::proxy \"$proxy_addr:3412\";"\ + "HTTP::proxy \"$proxy_addr:3142\";"\ + "HTTPS::proxy \"$proxy_addr:3142\";"\ "}" > /etc/apt/apt.conf.d/proxy.conf fi printf '%s\n'\ From 579b6229358f9fb11fa7e1d8a9692feddd1879e8 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 14:36:55 -0400 Subject: [PATCH 08/76] [fix] Don't overwrite daemon.json, and only accept turbo mode on correct install types --- setup/so-functions | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 303590d55..ccfd59629 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -616,9 +616,10 @@ docker_registry() { echo "Setting up Docker Registry" >> "$setup_log" 2>&1 mkdir -p /etc/docker >> "$setup_log" 2>&1 # Make the host use the master docker registry + if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="$MSRV"; fi printf '%s\n'\ "{"\ - " \"registry-mirrors\": [\"https://$MSRV:5000\"]"\ + " \"registry-mirrors\": [\"https://$proxy:5000\"]"\ "}" > /etc/docker/daemon.json echo "Docker Registry Setup - Complete" >> "$setup_log" 2>&1 @@ -951,9 +952,15 @@ node_pillar() { parse_options() { case "$1" in --turbo=*) - local proxy - proxy=$(echo "$1" | awk -F'--turbo=' '{print $2}') - use_proxy "http://$proxy" + if [[ $is_master || $is_helix ]]; then + local proxy + proxy=$(echo "$1" | awk -F'--turbo=' '{print $2}') + proxy_addr="http://$proxy" + use_proxy "$proxy_addr" + TURBO="$proxy_addr" + else + echo "turbo is not supported on this install type" >> $setup_log 2>&1 + fi ;; --proxy=*) echo "Unimplimented" @@ -963,13 +970,13 @@ parse_options() { echo "Invalid options passed for proxy. Order is --proxy-user= --proxy-pass=" else local proxy - local user - local password + local proxy_user + local proxy_password proxy=$(echo "$1" | awk -F'--proxy=' '{print $2}') - user=$(echo "$2" | awk -F'--proxy-user=' '{print $2}') - password=$(echo "$3" | awk -F'--proxy-pass=' '{print $2}') + proxy_user=$(echo "$2" | awk -F'--proxy-user=' '{print $2}') + proxy_password=$(echo "$3" | awk -F'--proxy-pass=' '{print $2}') - use_proxy "$proxy" "$user" "$password" + use_proxy "$proxy" "$proxy_user" "$proxy_password" fi ;; *) @@ -1478,18 +1485,14 @@ use_proxy() { if [[ $OS == 'centos' ]]; then printf '%s\n'\ - "proxy=$proxy_addr:3142" >> /etc/yum.conf + "proxy=${proxy_addr}:3142" >> /etc/yum.conf else printf '%s\n'\ "Acquire {"\ - "HTTP::proxy \"$proxy_addr:3142\";"\ - "HTTPS::proxy \"$proxy_addr:3142\";"\ + "HTTP::proxy \"${proxy_addr}:3142\";"\ + "HTTPS::proxy \"${proxy_addr}:3142\";"\ "}" > /etc/apt/apt.conf.d/proxy.conf fi - printf '%s\n'\ - "{"\ - " \"registry-mirrors\": [\"$proxy_addr:5000\"]"\ - "}" > /etc/docker/daemon.json } ls_heapsize() { From 19cd57bce0f2c72fd1aeab9e13503ce40cb77a27 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 22 May 2020 14:44:57 -0400 Subject: [PATCH 09/76] Support automated setup --- setup/automation/pm_standalone_defaults | 75 +++++++++++++++++++++++++ setup/so-functions | 3 + setup/so-setup | 6 ++ 3 files changed, 84 insertions(+) create mode 100644 setup/automation/pm_standalone_defaults diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults new file mode 100644 index 000000000..b5a6258ff --- /dev/null +++ b/setup/automation/pm_standalone_defaults @@ -0,0 +1,75 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +TESTING=true + +address_type=DHCP +ADMINUSER=onionuser +ADMINPASS1=onionuser +ADMINPASS2=onionuser +BASICBRO=7 +BASICSURI=7 +# BLOGS= +BNICS=eth1 +BROVERSION=ZEEK +# CURCLOSEDAYS= +# EVALADVANCED=BASIC +GRAFANA=1 +# HELIXAPIKEY= +HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 +HNSENSOR=inherit +HOSTNAME=standalone +install_type=STANDALONE +# LSINPUTBATCHCOUNT= +# LSINPUTTHREADS= +# LSPIPELINEBATCH= +# LSPIPELINEWORKERS= +MASTERADV=BASIC +MASTERUPDATES=1 +# MDNS= +# MGATEWAY= +# MIP= +# MMASK= +MNIC=eth0 +# MSEARCH= +# MSRV= +# MTU= +NAVIGATOR=1 +NIDS=Suricata +# NODE_ES_HEAP_SIZE= +# NODE_LS_HEAP_SIZE= +NODESETUP=NODEBASIC +NSMSETUP=BASIC +NODEUPDATES=MASTER +# OINKCODE= +OSQUERY=1 +# PATCHSCHEDULEDAYS= +# PATCHSCHEDULEHOURS= +PATCHSCHEDULENAME=auto +PLAYBOOK=1 +# REDIRECTHOST= +REDIRECTINFO=IP +RULESETUP=ETOPEN +# SHARDCOUNT= +SOREMOTEPASS1=onionuser +SOREMOTEPASS2=onionuser +STRELKA=1 +THEHIVE=1 +WAZUH=1 +WEBUSER=onionuser@somewhere.invalid +WEBPASSWD1=onionuser +WEBPASSWD2=onionuser \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index a20953035..185f41d89 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -550,6 +550,9 @@ detect_os() { disable_onion_user() { # Disable the default account cause security. usermod -L onion + + # Remove the automated setup script from crontab, if it exists + crontab -u onion -r } disable_misc_network_features() { diff --git a/setup/so-setup b/setup/so-setup index 406d69763..3e54d3f66 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -24,6 +24,12 @@ source ./so-variables setup_type=$1 export setup_type +automation=$2 +if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then + echo "Preselecting variable values based on automated setup: $automation" + source automation/$automation +fi + case "$setup_type" in iso | network) # Accepted values echo "Beginning Security Onion $setup_type install" From 862631b93a1a8d84b333448a12be9eefeae6cc38 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 14:47:00 -0400 Subject: [PATCH 10/76] [fix] Add space in daemon.json --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index ccfd59629..083b9db61 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -619,7 +619,7 @@ docker_registry() { if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="$MSRV"; fi printf '%s\n'\ "{"\ - " \"registry-mirrors\": [\"https://$proxy:5000\"]"\ + " \"registry-mirrors\": [ \"https://$proxy:5000\" ]"\ "}" > /etc/docker/daemon.json echo "Docker Registry Setup - Complete" >> "$setup_log" 2>&1 From 08564ed6d4e6807c0a51aa429542bfe350f5bfea Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 14:50:42 -0400 Subject: [PATCH 11/76] [feat] Trim quotes from arguments --- setup/so-functions | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 083b9db61..8c911c3d7 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -972,9 +972,9 @@ parse_options() { local proxy local proxy_user local proxy_password - proxy=$(echo "$1" | awk -F'--proxy=' '{print $2}') - proxy_user=$(echo "$2" | awk -F'--proxy-user=' '{print $2}') - proxy_password=$(echo "$3" | awk -F'--proxy-pass=' '{print $2}') + proxy=$(echo "$1" | tr -d '"' | awk -F'--proxy=' '{print $2}') + proxy_user=$(echo "$2" | tr -d '"' | awk -F'--proxy-user=' '{print $2}') + proxy_password=$(echo "$3" | tr -d '"' | awk -F'--proxy-pass=' '{print $2}') use_proxy "$proxy" "$proxy_user" "$proxy_password" fi From ad96baab551c3f1970ded8a990369183513fd6e6 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 14:50:58 -0400 Subject: [PATCH 12/76] [feat] Trim quotes from arguments --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 8c911c3d7..b29d4adef 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -954,7 +954,7 @@ parse_options() { --turbo=*) if [[ $is_master || $is_helix ]]; then local proxy - proxy=$(echo "$1" | awk -F'--turbo=' '{print $2}') + proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}') proxy_addr="http://$proxy" use_proxy "$proxy_addr" TURBO="$proxy_addr" From d9feb0d95c3c463a6f15c76c6e77f40eb8c4ae70 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 15:09:04 -0400 Subject: [PATCH 13/76] [fix] Don't repeat protocol in daemon.json --- setup/so-functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index b29d4adef..4e01c935c 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -616,10 +616,10 @@ docker_registry() { echo "Setting up Docker Registry" >> "$setup_log" 2>&1 mkdir -p /etc/docker >> "$setup_log" 2>&1 # Make the host use the master docker registry - if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="$MSRV"; fi + if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi printf '%s\n'\ "{"\ - " \"registry-mirrors\": [ \"https://$proxy:5000\" ]"\ + " \"registry-mirrors\": [ \"$proxy:5000\" ]"\ "}" > /etc/docker/daemon.json echo "Docker Registry Setup - Complete" >> "$setup_log" 2>&1 From 29726e957f408835eaf4b6ef016e19e0d7fd6b9d Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 15:16:57 -0400 Subject: [PATCH 14/76] [fix] Redirect parse_options output to setup log --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 5ec148208..4b0a4d6f6 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -243,7 +243,7 @@ whiptail_make_changes if [[ $# -gt 1 ]]; then set -- "${@:2}" - parse_options "$@" + parse_options "$@" >> $setup_log 2>&1 fi if [[ "$setup_type" == 'iso' ]]; then From 13423bed18eca71c101252a0dc7d4683051eb5fb Mon Sep 17 00:00:00 2001 From: William Wernert Date: Fri, 22 May 2020 16:35:16 -0400 Subject: [PATCH 15/76] [fix] Add indent in proxy.conf --- setup/so-functions | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 04b2652a5..ddf265153 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1492,8 +1492,8 @@ use_proxy() { else printf '%s\n'\ "Acquire {"\ - "HTTP::proxy \"${proxy_addr}:3142\";"\ - "HTTPS::proxy \"${proxy_addr}:3142\";"\ + " HTTP::proxy \"${proxy_addr}:3142\";"\ + " HTTPS::proxy \"${proxy_addr}:3142\";"\ "}" > /etc/apt/apt.conf.d/proxy.conf fi } From 56f5fbdf6b095445094de6a379b80fb2d690b085 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 22 May 2020 17:11:08 -0400 Subject: [PATCH 16/76] Ingest pipeline commid fix for conn logs --- salt/elasticsearch/files/ingest/common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/common b/salt/elasticsearch/files/ingest/common index e70d5e2d8..a65742f99 100644 --- a/salt/elasticsearch/files/ingest/common +++ b/salt/elasticsearch/files/ingest/common @@ -38,7 +38,7 @@ { "rename": { "field": "module", "target_field": "event.module", "ignore_missing": true } }, { "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_missing": true } }, { "rename": { "field": "category", "target_field": "event.category", "ignore_missing": true } }, - { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } }, + { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } }, { "remove": { "field": [ "index_name_prefix", "message2", "type" ], From 1c207afb31bd321e25a1e7e72586b7df438c3870 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sat, 23 May 2020 12:00:24 -0400 Subject: [PATCH 17/76] Define packages in the common state --- salt/common/init.sls | 85 ++++++++++++++++++++++++++++++++++++++++---- setup/so-functions | 12 ++++--- 2 files changed, 85 insertions(+), 12 deletions(-) diff --git a/salt/common/init.sls b/salt/common/init.sls index 82ac4a062..a8c791c7f 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -28,21 +28,92 @@ salttmp: - group: 939 - makedirs: True -# Install packages needed for the sensor -sensorpkgs: +# Install epel +{% if grains['os'] == 'CentOS' %} +epel: pkg.installed: - - skip_suggestions: False + - skip_suggestions: True + - pkgs: + - epel-release +{% endif %} + +# Install common packages +commonpkgs: + pkg.installed: + - skip_suggestions: True - pkgs: - - wget - - jq {% if grains['os'] != 'CentOS' %} - apache2-utils + - wget + - jq + - python3-docker + - docker-ce + - curl + - ca-certificates + - software-properties-common + - apt-transport-https + - openssl + - netcat + - python3-mysqldb + - sqlite3 + - argon2 + - libssl-dev + - python3-dateutil + - python3-m2crypto + - python3-mysqldb + - salt-minion: 2019.2.5+ds-1 + - hold: True + - update_holds: True + {% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix' or grains['role'] == 'so-mastersearch' or grains['role'] == 'so-standalone' %} + - salt-master: 2019.2.5+ds-1 + - hold: True + - update_holds: True + - containerd.io: 1.2.13-2 + - hold: True + - update_holds: True + - docker-ce: 5:19.03.9~3-0~ubuntu-bionic + - hold: True + - update_holds: True + {% endif %} + - containerd.io + - docker-ce {% else %} - - net-tools + - wget + - bind-utils + - jq - tcpdump - httpd-tools + - net-tools + - curl + - sqlite + - argon2 + - maridb-devel + - nmap-ncat + - python3 + - python36-docker + - python36-dateutil + - python36-m2crypto + - python36-mysql + - yum-utils + - device-mapper-persistent-data + - lvm2 + - openssl + - salt-minion: 2019.2.5 + - hold: True + - update_holds: True + {% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix' or grains['role'] == 'so-mastersearch' or grains['role'] == 'so-standalone' %} + - salt-master: 2019.2.5 + - hold: True + - update_holds: True {% endif %} - + - containerd.io: 1.2.6-3 + - hold: True + - update_holds: True + - docker-ce: 19.03.9-3 + - hold: True + - update_holds: True + {% endif %}%} + # Always keep these packages up to date alwaysupdated: diff --git a/setup/so-functions b/setup/so-functions index ddf265153..702c3cebd 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -19,7 +19,7 @@ source ./so-whiptail source ./so-variables source ./so-common-functions -SOVERSION=1.3.0 +SOVERSION=1.4.0 accept_salt_key_remote() { systemctl restart salt-minion @@ -514,7 +514,7 @@ detect_os() { # Install bind-utils so the host command exists if ! command -v host > /dev/null 2>&1; then echo "Installing required packages to run installer" - yum -y install bind-utils >> "$setup_log" 2>&1 + yum -y install bind-utils yum-plugin-versionlock >> "$setup_log" 2>&1 fi @@ -583,7 +583,9 @@ docker_install() { { yum clean expire-cache; yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo; - yum -y install docker-ce; + yum -y install docker-ce-19.03.9-3 containerd.io-1.2.6-3; + yum versionlock docker-ce; + yum versionlock containerd.io } >> "$setup_log" 2>&1 else @@ -687,7 +689,7 @@ docker_seed_registry() { # Tag it with the new registry destination docker tag soshybridhunter/"$i" "$HOSTNAME":5000/soshybridhunter/"$i" docker push "$HOSTNAME":5000/soshybridhunter/"$i" - docker rmi soshybridhunter/"$i" + #docker rmi soshybridhunter/"$i" } >> "$setup_log" 2>&1 done else @@ -1107,7 +1109,7 @@ saltify() { yum -y update exclude=salt*; systemctl enable salt-minion; } >> "$setup_log" 2>&1 - echo "exclude=salt*" >> /etc/yum.conf + yum versionlock salt* else DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade >> "$setup_log" 2>&1 From 31b3563fb3608bc9b104bd6456bca84bbe8e5f81 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Sun, 24 May 2020 10:56:30 -0400 Subject: [PATCH 18/76] Fix package versions --- salt/common/init.sls | 8 ++++---- setup/so-functions | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/salt/common/init.sls b/salt/common/init.sls index a8c791c7f..17cbb1608 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -98,18 +98,18 @@ commonpkgs: - device-mapper-persistent-data - lvm2 - openssl - - salt-minion: 2019.2.5 + - salt-minion: 2019.2.5.el7 - hold: True - update_holds: True {% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix' or grains['role'] == 'so-mastersearch' or grains['role'] == 'so-standalone' %} - - salt-master: 2019.2.5 + - salt-master: 2019.2.5.el7 - hold: True - update_holds: True {% endif %} - - containerd.io: 1.2.6-3 + - containerd.io: 1.2.6-3.el7 - hold: True - update_holds: True - - docker-ce: 19.03.9-3 + - docker-ce: 19.03.9-3.el7 - hold: True - update_holds: True {% endif %}%} diff --git a/setup/so-functions b/setup/so-functions index 702c3cebd..bf6db26be 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -583,9 +583,9 @@ docker_install() { { yum clean expire-cache; yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo; - yum -y install docker-ce-19.03.9-3 containerd.io-1.2.6-3; - yum versionlock docker-ce; - yum versionlock containerd.io + yum -y install docker-ce-19.03.9-3.el7 containerd.io-1.2.6-3.el7; + yum versionlock docker-ce-19.03.9-3.el7; + yum versionlock containerd.io-1.2.6-3.el7 } >> "$setup_log" 2>&1 else From ad97092589f8d46600a4ce695d83155895cac84d Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sun, 24 May 2020 21:38:37 -0400 Subject: [PATCH 19/76] When automating setup installation wait for network availability --- setup/so-setup | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-setup b/setup/so-setup index b3c141fae..9384c5e96 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -28,6 +28,7 @@ automation=$2 if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then echo "Preselecting variable values based on automated setup: $automation" source automation/$automation + sleep 30 # Re-implement with network availability probe fi case "$setup_type" in From e09027e7954016e43a3f0b01ac7109e43fccaeab Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Tue, 26 May 2020 13:00:03 +0000 Subject: [PATCH 20/76] update Strelka cron --- salt/strelka/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls index 8bdbd8274..a9842924d 100644 --- a/salt/strelka/init.sls +++ b/salt/strelka/init.sls @@ -112,5 +112,5 @@ strelka_filestream: strelka_zeek_extracted_sync: cron.present: - user: root - - name: mv /nsm/zeek/extracted/complete/* /nsm/strelka + - name: [ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1 - minute: '*' From 330de4624955d2fbe0bce04869fb55f3e6376dbe Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Tue, 26 May 2020 13:19:15 +0000 Subject: [PATCH 21/76] update SOCtopus config for hostname/ip --- salt/soctopus/files/SOCtopus.conf | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf index f2415d010..e5878cb70 100644 --- a/salt/soctopus/files/SOCtopus.conf +++ b/salt/soctopus/files/SOCtopus.conf @@ -1,9 +1,9 @@ -{%- set ip = salt['pillar.get']('static:masterip', '') %} +{%- set MASTER = salt['pillar.get']('master:url_base', '') %} {%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %} {%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %} [es] -es_url = http://{{ip}}:9200 +es_url = http://{{MASTER}}:9200 es_user = YOURESUSER es_pass = YOURESPASS es_index_pattern = so-* @@ -11,7 +11,7 @@ es_verifycert = no [cortex] auto_analyze_alerts = no -cortex_url = https://{{ip}}/cortex/ +cortex_url = https://{{MASTER}}/cortex/ cortex_key = {{ CORTEXKEY }} supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS @@ -32,7 +32,7 @@ grr_user = YOURGRRUSER grr_pass = YOURGRRPASS [hive] -hive_url = https://{{ip}}/thehive/ +hive_url = https://{{MASTER}}/thehive/ hive_key = {{ HIVEKEY }} hive_tlp = 3 hive_verifycert = no @@ -59,7 +59,7 @@ slack_url = YOURSLACKWORKSPACE slack_webhook = YOURSLACKWEBHOOK [playbook] -playbook_url = https://{{ip}}/playbook +playbook_url = https://{{MASTER}}/playbook playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f playbook_verifycert = no From c28936d9e42ea3a23f5f17c49b0379981cbb6705 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 26 May 2020 09:40:29 -0400 Subject: [PATCH 22/76] temporarily abort automated installation --- setup/so-setup | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-setup b/setup/so-setup index 9384c5e96..69c5763f9 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -27,6 +27,7 @@ export setup_type automation=$2 if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then echo "Preselecting variable values based on automated setup: $automation" + exit 1 source automation/$automation sleep 30 # Re-implement with network availability probe fi From 3eb62287ac6c2195cf2536bc4909a150ca232b6e Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Tue, 26 May 2020 14:00:16 +0000 Subject: [PATCH 23/76] update Curator config for index transition --- salt/curator/files/action/close.yml | 5 ++--- salt/curator/files/action/delete.yml | 4 ++-- salt/curator/files/bin/so-curator-closed-delete-delete | 8 ++++---- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/salt/curator/files/action/close.yml b/salt/curator/files/action/close.yml index dfe5519e8..a65e9af3d 100644 --- a/salt/curator/files/action/close.yml +++ b/salt/curator/files/action/close.yml @@ -24,9 +24,8 @@ actions: disable_action: False filters: - filtertype: pattern - kind: prefix - value: logstash- - exclude: + kind: regex + value: '^(logstash-.*|so-.*)$' - filtertype: age source: name direction: older diff --git a/salt/curator/files/action/delete.yml b/salt/curator/files/action/delete.yml index e6f2f3833..030bbbfac 100644 --- a/salt/curator/files/action/delete.yml +++ b/salt/curator/files/action/delete.yml @@ -20,8 +20,8 @@ actions: disable_action: False filters: - filtertype: pattern - kind: prefix - value: logstash- + kind: regex + value: '^(logstash-.*|so-.*)$' - filtertype: space source: creation_date use_age: True diff --git a/salt/curator/files/bin/so-curator-closed-delete-delete b/salt/curator/files/bin/so-curator-closed-delete-delete index b0ec62424..3d397defc 100755 --- a/salt/curator/files/bin/so-curator-closed-delete-delete +++ b/salt/curator/files/bin/so-curator-closed-delete-delete @@ -33,17 +33,17 @@ LOG="/opt/so/log/curator/so-curator-closed-delete.log" # Check for 2 conditions: # 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT? -# 2. Are there any closed logstash- indices that we can delete? +# 2. Are there any closed logstash-, or so- indices that we can delete? # If both conditions are true, keep on looping until one of the conditions is false. while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] && -curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" > /dev/null; do +curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" > /dev/null; do # We need to determine OLDEST_INDEX. - # First, get the list of closed indices that are prefixed with "logstash-". + # First, get the list of closed indices that are prefixed with "logstash-" or "so-". # For example: logstash-ids-YYYY.MM.DD # Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field. # Finally, select the first entry in that sorted list. - OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" | awk '{print $2}' | sort -t- -k3 | head -1) + OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1) # Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it. curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX} From 97306d3acdbf41e311c52f9afb5fdf1e9d39c4a6 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Tue, 26 May 2020 14:05:33 +0000 Subject: [PATCH 24/76] rename indices --- salt/common/tools/sbin/so-elastalert-create | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-elastalert-create b/salt/common/tools/sbin/so-elastalert-create index fbe9527a7..0270503bf 100755 --- a/salt/common/tools/sbin/so-elastalert-create +++ b/salt/common/tools/sbin/so-elastalert-create @@ -166,8 +166,7 @@ cat << EOF What elasticsearch index do you want to use? Below are the default Index Patterns used in Security Onion: -*:logstash-* -*:logstash-beats-* +*:so-ids-* *:elastalert_status* EOF From 0e51ab41cf83df46eeeac5ee3a1cebd6f4022251 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Tue, 26 May 2020 14:18:58 +0000 Subject: [PATCH 25/76] Update ES watermark settings --- salt/elasticsearch/files/elasticsearch.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml index 73f3c9239..271ef40cf 100644 --- a/salt/elasticsearch/files/elasticsearch.yml +++ b/salt/elasticsearch/files/elasticsearch.yml @@ -22,3 +22,7 @@ transport.bind_host: 0.0.0.0 transport.publish_host: {{ nodeip }} transport.publish_port: 9300 {%- endif %} +cluster.routing.allocation.disk.threshold_enabled: true +cluster.routing.allocation.disk.watermark.low: 95% +cluster.routing.allocation.disk.watermark.high: 98% +cluster.routing.allocation.disk.watermark.flood_stage: 98% From 87407c9acefc7766f1f12ec1772822d49f965816 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 26 May 2020 11:21:59 -0400 Subject: [PATCH 26/76] rework common init.sls --- salt/common/init.sls | 72 +++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 41 deletions(-) diff --git a/salt/common/init.sls b/salt/common/init.sls index 17cbb1608..77801dd1a 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -38,11 +38,11 @@ epel: {% endif %} # Install common packages +{% if grains['os'] != 'CentOS' %} commonpkgs: pkg.installed: - skip_suggestions: True - pkgs: - {% if grains['os'] != 'CentOS' %} - apache2-utils - wget - jq @@ -61,23 +61,19 @@ commonpkgs: - python3-dateutil - python3-m2crypto - python3-mysqldb - - salt-minion: 2019.2.5+ds-1 - - hold: True - - update_holds: True - {% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix' or grains['role'] == 'so-mastersearch' or grains['role'] == 'so-standalone' %} - - salt-master: 2019.2.5+ds-1 - - hold: True - - update_holds: True +heldpackages: + pkg.installed: + - pkgs: - containerd.io: 1.2.13-2 - - hold: True - - update_holds: True - docker-ce: 5:19.03.9~3-0~ubuntu-bionic - - hold: True - - update_holds: True - {% endif %} - - containerd.io - - docker-ce - {% else %} + - hold: True + - update_holds: True + +{% else %} +commonpkgs: + pkg.installed: + - skip_suggestions: True + - pkgs: - wget - bind-utils - jq @@ -87,33 +83,27 @@ commonpkgs: - curl - sqlite - argon2 - - maridb-devel + - mariadb-devel - nmap-ncat - python3 - - python36-docker - - python36-dateutil - - python36-m2crypto - - python36-mysql - - yum-utils - - device-mapper-persistent-data - - lvm2 - - openssl - - salt-minion: 2019.2.5.el7 - - hold: True - - update_holds: True - {% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-helix' or grains['role'] == 'so-mastersearch' or grains['role'] == 'so-standalone' %} - - salt-master: 2019.2.5.el7 - - hold: True - - update_holds: True - {% endif %} - - containerd.io: 1.2.6-3.el7 - - hold: True - - update_holds: True - - docker-ce: 19.03.9-3.el7 - - hold: True - - update_holds: True - {% endif %}%} - + - python36-docker + - python36-dateutil + - python36-m2crypto + - python36-mysql + - yum-utils + - device-mapper-persistent-data + - lvm2 + - openssl + +heldpackages: + pkg.installed: + - pkgs: + - containerd.io: 1.2.13-3.2.el7 + - docker-ce: 3:19.03.9-3.el7 + - hold: True + - update_holds: True +{% endif %} + # Always keep these packages up to date alwaysupdated: From 2cb7464086c8208efa7184d935af78cd586c8686 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 26 May 2020 12:01:58 -0400 Subject: [PATCH 27/76] Add TheHive communityid link --- salt/elastalert/files/rules/so/nids2hive.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elastalert/files/rules/so/nids2hive.yaml b/salt/elastalert/files/rules/so/nids2hive.yaml index 68a173fcd..097511d56 100644 --- a/salt/elastalert/files/rules/so/nids2hive.yaml +++ b/salt/elastalert/files/rules/so/nids2hive.yaml @@ -40,7 +40,7 @@ hive_alert_config: title: '{match[rule][name]}' type: 'NIDS' source: 'SecurityOnion' - description: "`Hunting Pivot:` \n\n \n\n `Kibana Dashboard:` \n\n \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}" + description: "`Hunting Pivot:` \n\n \n\n `Kibana Dashboard - Signature Drilldown:` \n\n \n\n `Kibana Dashboard - Community_ID:` \n\n \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}" severity: 2 tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}'] tlp: 3 From 7a657d122989cdc4545ea01c3e1c49fd10a64f18 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Tue, 26 May 2020 12:58:01 -0400 Subject: [PATCH 28/76] add Community ID to default fields in bottom data table in Hunt --- salt/soc/files/soc/soc.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 6b76e622c..76770e2bd 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -32,7 +32,7 @@ "dateRangeMinutes": 1440, "mostRecentlyUsedLimit": 5, "eventFields": { - "default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ], + "default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id" ], "bro_conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "service", "log.id.uid" ], "bro_dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ], "bro_dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ], From 8723f8785ec2043f06945b3fd0b1d6ca424e5f11 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 26 May 2020 13:05:56 -0400 Subject: [PATCH 29/76] osquery pipeline fix and fail state if errors --- salt/elasticsearch/files/ingest/osquery.query_result | 2 +- salt/elasticsearch/files/so-elasticsearch-pipelines | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/ingest/osquery.query_result b/salt/elasticsearch/files/ingest/osquery.query_result index e9cdbe2d3..5b37655f9 100644 --- a/salt/elasticsearch/files/ingest/osquery.query_result +++ b/salt/elasticsearch/files/ingest/osquery.query_result @@ -31,7 +31,7 @@ { "rename": { "field": "message3.columns.remote_port", "target_field": "remote.port", "ignore_missing": true } }, { "rename": { "field": "message3.columns.process_name", "target_field": "process.name", "ignore_missing": true } }, { "rename": { "field": "message3.columns.eventid", "target_field": "event.code", "ignore_missing": true } }, - { "set": { "if": "ctx.message3.columns.?data != null", "field": "dataset", "value": "wel-{{message3.columns.source}}", "override": true } }, + { "set": { "if": "ctx.message3.columns?.data != null", "field": "dataset", "value": "wel-{{message3.columns.source}}", "override": true } }, { "rename": { "field": "message3.columns.winlog.EventData.SubjectUserName", "target_field": "user.name", "ignore_missing": true } }, { "rename": { "field": "message3.columns.winlog.EventData.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } }, { "rename": { "field": "message3.columns.winlog.EventData.destinationIp", "target_field": "destination.ip", "ignore_missing": true } }, diff --git a/salt/elasticsearch/files/so-elasticsearch-pipelines b/salt/elasticsearch/files/so-elasticsearch-pipelines index b1b6db158..514054359 100755 --- a/salt/elasticsearch/files/so-elasticsearch-pipelines +++ b/salt/elasticsearch/files/so-elasticsearch-pipelines @@ -15,6 +15,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +RETURN_CODE=0 ELASTICSEARCH_HOST=$1 ELASTICSEARCH_PORT=9200 @@ -46,7 +47,9 @@ fi cd ${ELASTICSEARCH_INGEST_PIPELINES} echo "Loading pipelines..." -for i in *; do echo $i; curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done +for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done echo cd - >/dev/null + +exit $RETURN_CODE \ No newline at end of file From d260224dfea4c7b8c384911a452d641d47a432a8 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 26 May 2020 13:39:36 -0400 Subject: [PATCH 30/76] Update reboot language --- setup/so-whiptail | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 559f791b7..72455fc9e 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -958,7 +958,7 @@ whiptail_setup_complete() { [ -n "$TESTING" ] && return - whiptail --title "Security Onion Setup" --msgbox "Finished $install_type install. Press ENTER to reboot." 8 75 + whiptail --title "Security Onion Setup" --msgbox "Finished $install_type install. Press Ok to reboot." 8 75 install_cleanup >> $setup_log 2>&1 } @@ -967,7 +967,7 @@ whiptail_setup_failed() { [ -n "$TESTING" ] && return - whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press ENTER to reboot." 8 75 + whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press Ok to reboot." 8 75 install_cleanup >> $setup_log 2>&1 } From 41935996d4384849fd4ffe93924d2c2fb6bd9ff7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 26 May 2020 13:48:36 -0400 Subject: [PATCH 31/76] add ntpdate/ntp --- salt/common/init.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/common/init.sls b/salt/common/init.sls index 77801dd1a..08ae7fa72 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -45,6 +45,7 @@ commonpkgs: - pkgs: - apache2-utils - wget + - ntp - jq - python3-docker - docker-ce @@ -75,6 +76,7 @@ commonpkgs: - skip_suggestions: True - pkgs: - wget + - ntpdate - bind-utils - jq - tcpdump From 21cd66d109c458b8aa335d3e93143fb9555a8d94 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 26 May 2020 14:05:47 -0400 Subject: [PATCH 32/76] add ntpdate/ntp --- salt/common/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/init.sls b/salt/common/init.sls index 08ae7fa72..09d71114b 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -45,7 +45,7 @@ commonpkgs: - pkgs: - apache2-utils - wget - - ntp + - ntpdate - jq - python3-docker - docker-ce From 92c6a524965308d0b66c0abd52a9baccbf4a1706 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 26 May 2020 14:49:22 -0400 Subject: [PATCH 33/76] [feat][WIP] Add functionality to run setup through a proxy --- setup/so-functions | 56 +++++++++++++++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 18 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 4e01c935c..fed162596 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -550,6 +550,9 @@ detect_os() { disable_onion_user() { # Disable the default account cause security. usermod -L onion + + # Remove the automated setup script from crontab, if it exists + crontab -u onion -r } disable_misc_network_features() { @@ -955,29 +958,47 @@ parse_options() { if [[ $is_master || $is_helix ]]; then local proxy proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}') - proxy_addr="http://$proxy" - use_proxy "$proxy_addr" - TURBO="$proxy_addr" + proxy_url="http://$proxy" + TURBO="$proxy_url" + use_turbo_proxy "$TURBO" else echo "turbo is not supported on this install type" >> $setup_log 2>&1 fi ;; --proxy=*) - echo "Unimplimented" - return + local proxy + proxy=$(echo "$1" | tr -d '"' | awk -F'--proxy=' '{print $2}') - if [[ $2 != --proxy-user=* ]] || [[ $3 != --proxy-pass=* ]]; then - echo "Invalid options passed for proxy. Order is --proxy-user= --proxy-pass=" - else - local proxy + local proxy_protocol + proxy_protocol=$(echo "$proxy" |tr -d '"' | awk 'match($0, /http|https/) { print substr($0, RSTART, RLENGTH) }') + + if [[ ! $proxy_protocol =~ ^(http|https) ]]; then + echo "Invalid proxy protocol" + echo "Ignoring proxy" + return + fi + + if [[ $2 == --proxy-user=* && $3 == --proxy-pass=* ]]; then local proxy_user local proxy_password - proxy=$(echo "$1" | tr -d '"' | awk -F'--proxy=' '{print $2}') proxy_user=$(echo "$2" | tr -d '"' | awk -F'--proxy-user=' '{print $2}') proxy_password=$(echo "$3" | tr -d '"' | awk -F'--proxy-pass=' '{print $2}') - - use_proxy "$proxy" "$proxy_user" "$proxy_password" + + local proxy_addr + proxy_addr=$(echo "$proxy" | tr -d '"' | awk -F'http\:\/\/|https\:\/\/' '{print $2}') + + export http_proxy="${proxy_protocol}://${proxy_user}:${proxy_password}@${proxy_addr}" + + elif [[ (-z $2 || -z $3) && (-n $2 || -n $3) || ($2 != --proxy-user=* || $3 != --proxy-pass=*) ]]; then + echo "Invalid options passed for proxy. Order is --proxy-user= --proxy-pass=" + echo "Ignoring proxy" + return + + else + export http_proxy="$proxy" fi + + export {https,ftp,rsync,all}_proxy="$http_proxy" ;; *) echo "Invalid option" @@ -1479,18 +1500,17 @@ update_packages() { fi } -use_proxy() { - local proxy_addr=$1 +use_turbo_proxy() { + local proxy_url=$1 #TODO: add options for username + pass if [[ $OS == 'centos' ]]; then - printf '%s\n'\ - "proxy=${proxy_addr}:3142" >> /etc/yum.conf + printf '%s\n' "proxy=${proxy_url}:3142" >> /etc/yum.conf else printf '%s\n'\ "Acquire {"\ - "HTTP::proxy \"${proxy_addr}:3142\";"\ - "HTTPS::proxy \"${proxy_addr}:3142\";"\ + " HTTP::proxy \"${proxy_url}:3142\";"\ + " HTTPS::proxy \"${proxy_url}:3142\";"\ "}" > /etc/apt/apt.conf.d/proxy.conf fi } From b748a8669474b66091f48d162023e9b1913a6eae Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 26 May 2020 15:12:00 -0400 Subject: [PATCH 34/76] Suricata 5 initial commit --- salt/suricata/files/suricata.yaml | 873 ++++++---- salt/suricata/files/suricataDEPRICATED.yaml | 1726 +++++++++++++++++++ 2 files changed, 2240 insertions(+), 359 deletions(-) create mode 100644 salt/suricata/files/suricataDEPRICATED.yaml diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml index 5a0121b63..ebebe0138 100644 --- a/salt/suricata/files/suricata.yaml +++ b/salt/suricata/files/suricata.yaml @@ -1,28 +1,28 @@ %YAML 1.1 --- -{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %} -{%- if grains['role'] == 'so-eval' %} -{%- set MTU = 1500 %} -{%- elif grains['role'] == 'so-helix' %} -{%- set MTU = 9000 %} -{%- else %} -{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %} -{%- endif %} -{%- if salt['pillar.get']('sensor:homenet') %} - {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %} -{%- else %} - {%- set homenet = salt['pillar.get']('static:hnmaster', '') %} -{%- endif %} + {%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %} + {%- if grains['role'] == 'so-eval' %} + {%- set MTU = 1500 %} + {%- elif grains['role'] == 'so-helix' %} + {%- set MTU = 9000 %} + {%- else %} + {%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %} + {%- endif %} + {%- if salt['pillar.get']('sensor:homenet') %} + {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %} + {%- else %} + {%- set homenet = salt['pillar.get']('static:hnmaster', '') %} + {%- endif %} # Suricata configuration file. In addition to the comments describing all # options in this file, full documentation can be found at: -# https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml +# https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html ## ## Step 1: inform Suricata about your network ## vars: - # more specifc is better for alert accuracy and performance + # more specific is better for alert accuracy and performance address-groups: HOME_NET: "[{{ homenet }}]" #HOME_NET: "[192.168.0.0/16]" @@ -39,6 +39,7 @@ vars: DNS_SERVERS: "$HOME_NET" TELNET_SERVERS: "$HOME_NET" AIM_SERVERS: "$EXTERNAL_NET" + DC_SERVERS: "$HOME_NET" DNP3_SERVER: "$HOME_NET" DNP3_CLIENT: "$HOME_NET" MODBUS_CLIENT: "$HOME_NET" @@ -55,23 +56,11 @@ vars: MODBUS_PORTS: 502 FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]" FTP_PORTS: 21 - + VXLAN_PORTS: 4789 + TEREDO_PORTS: 3544 ## -## Step 2: select the rules to enable or disable -## - -default-rule-path: /etc/suricata/rules -rule-files: - - all.rules - -classification-file: /etc/suricata/classification.config -reference-config-file: /etc/suricata/reference.config -# threshold-file: /usr/local/etc/suricata/threshold.config - - -## -## Step 3: select outputs to enable +## Step 2: select outputs to enable ## # The default logging directory. Any log or output file will be @@ -85,6 +74,13 @@ stats: # The interval field (in seconds) controls at what interval # the loggers are invoked. interval: 30 + # Add decode events as stats. + #decoder-events: true + # Decoder event prefix in stats. Has been 'decoder' before, but that leads + # to missing events in the eve.stats records. See issue #2225. + #decoder-events-prefix: "decoder.event" + # Add stream events as stats. + #stream-events: false # Configure the type of alert (and other) logging you would like. outputs: @@ -100,9 +96,6 @@ outputs: enabled: yes filetype: regular #regular|syslog|unix_dgram|unix_stream|redis filename: eve.json - rotate-interval: day - community-id: true - community-id-seed: 0 #prefix: "@cee: " # prefix to prepend to each log entry # the following are valid when type: syslog above #identity: "suricata" @@ -124,63 +117,141 @@ outputs: # pipelining: # enabled: yes ## set enable to yes to enable query pipelining # batch-size: 10 ## number of entry to keep in buffer + + # Include top level metadata. Default yes. + #metadata: no + + # include the name of the input pcap file in pcap file processing mode + pcap-file: false + + # Community Flow ID + # Adds a 'community_id' field to EVE records. These are meant to give + # a records a predictable flow id that can be used to match records to + # output of other tools such as Bro. + # + # Takes a 'seed' that needs to be same across sensors and tools + # to make the id less predictable. + + # enable/disable the community id feature. + community-id: true + # Seed value for the ID output. Valid values are 0-65535. + community-id-seed: 0 + + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + types: - alert: - # payload: yes # enable dumping payload in Base64 - # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log - # payload-printable: yes # enable dumping payload in printable (lossy) format - # packet: yes # enable dumping of packet (without stream segments) - # http-body: yes # enable dumping of http body in Base64 - # http-body-printable: yes # enable dumping of http body in printable format - metadata: - app-layer: false - flow: false - rule: - metadata: true - raw: true + payload: no # enable dumping payload in Base64 + payload-buffer-size: 4kb # max size of payload buffer to output in eve-log + payload-printable: yes # enable dumping payload in printable (lossy) format + packet: yes # enable dumping of packet (without stream segments) + metadata: + app-layer: false + flow: false + rule: + metadata: true + raw: true + + # http-body: yes # Requires metadata; enable dumping of http body in Base64 + # http-body-printable: yes # Requires metadata; enable dumping of http body in printable format # Enable the logging of tagged packets for rules using the # "tag" keyword. tagged-packets: no - - # HTTP X-Forwarded-For support by adding an extra field or overwriting - # the source or destination IP address (depending on flow direction) - # with the one reported in the X-Forwarded-For HTTP header. This is - # helpful when reviewing alerts for traffic that is being reverse - # or forward proxied. - xff: - enabled: no - # Two operation modes are available, "extra-data" and "overwrite". - mode: extra-data - # Two proxy deployments are supported, "reverse" and "forward". In - # a "reverse" deployment the IP address used is the last one, in a - # "forward" deployment the first IP address is used. - deployment: reverse - # Header name where the actual IP address will be reported, if more - # than one IP address is present, the last IP address will be the - # one taken into consideration. - header: X-Forwarded-For + #- anomaly: + # Anomaly log records describe unexpected conditions such + # as truncated packets, packets with invalid IP/UDP/TCP + # length values, and other events that render the packet + # invalid for further processing or describe unexpected + # behavior on an established stream. Networks which + # experience high occurrences of anomalies may experience + # packet processing degradation. + # + # Anomalies are reported for the following: + # 1. Decode: Values and conditions that are detected while + # decoding individual packets. This includes invalid or + # unexpected values for low-level protocol lengths as well + # as stream related events (TCP 3-way handshake issues, + # unexpected sequence number, etc). + # 2. Stream: This includes stream related events (TCP + # 3-way handshake issues, unexpected sequence number, + # etc). + # 3. Application layer: These denote application layer + # specific conditions that are unexpected, invalid or are + # unexpected given the application monitoring state. + # + # By default, anomaly logging is disabled. When anomaly + # logging is enabled, applayer anomaly reporting is + # enabled. + # enabled: no + # + # Choose one or more types of anomaly logging and whether to enable + # logging of the packet header for packet anomalies. + # types: + # decode: no + # stream: no + # applayer: yes + #packethdr: no #- http: - # extended: no # enable this for extended logging information + # extended: yes # enable this for extended logging information # custom allows additional http fields to be included in eve-log # the example below adds three additional fields when uncommented #custom: [Accept-Encoding, Accept-Language, Authorization] + # set this value to one and only one among {both, request, response} + # to dump all http headers for every http request and/or response + # dump-all-headers: none #- dns: - # control logging of queries and answers - # default yes, no to disable - # query: no # enable logging of DNS queries - # answer: no # enable logging of DNS answers - # control which RR types are logged - # all enabled if custom not specified - #custom: [a, aaaa, cname, mx, ns, ptr, txt] + # This configuration uses the new DNS logging format, + # the old configuration is still available: + # https://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html#dns-v1-format + + # As of Suricata 5.0, version 2 of the eve dns output + # format is the default. + #version: 2 + + # Enable/disable this logger. Default: enabled. + #enabled: yes + + # Control logging of requests and responses: + # - requests: enable logging of DNS queries + # - responses: enable logging of DNS answers + # By default both requests and responses are logged. + #requests: no + #responses: no + + # Format of answer logging: + # - detailed: array item per answer + # - grouped: answers aggregated by type + # Default: all + #formats: [detailed, grouped] + + # Types to log, based on the query type. + # Default: all. + #types: [a, aaaa, cname, mx, ns, ptr, txt] #- tls: - # extended: no # enable this for extended logging information + # extended: yes # enable this for extended logging information # output TLS transaction where the session is resumed using a # session id #session-resumption: no # custom allows to control which tls fields that are included # in eve-log - #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain] + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s] #- files: # force-magic: no # force logging magic on all logged files # force logging of checksums, available hash functions are md5, @@ -190,7 +261,7 @@ outputs: # alerts: yes # log alerts that caused drops # flows: all # start or all: 'start' logs only a single drop # # per flow direction. All logs each dropped pkt. - #- smtp: + - smtp: #extended: yes # enable this for extended logging information # this includes: bcc, message-id, subject, x_mailer, user-agent # custom fields logging from the list: @@ -204,60 +275,42 @@ outputs: #md5: [body, subject] #- dnp3 + #- ftp + #- rdp #- nfs - #- ssh: - #- stats: - # totals: yes # stats for all threads merged together - # threads: no # per thread stats - # deltas: no # include delta values + #- smb + #- tftp + #- ikev2 + #- krb5 + #- snmp + #- sip + #- dhcp: + # enabled: yes + # When extended mode is on, all DHCP messages are logged + # with full detail. When extended mode is off (the + # default), just enough information to map a MAC address + # to an IP address is logged. + # extended: no + #- ssh + - stats: + totals: yes # stats for all threads merged together + threads: no # per thread stats + deltas: no # include delta values # bi-directional flows - #- flow: + #- flow # uni-directional flows #- netflow - # Vars log flowbits and other packet and flow vars - #- vars - # alert output for use with Barnyard2 + # Metadata event type. Triggered whenever a pktvar is saved + # and will include the pktvars, flowvars, flowbits and + # flowints. + #- metadata + + # deprecated - unified2 alert format for use with Barnyard2 - unified2-alert: enabled: no - filename: unified2.alert - - # File size limit. Can be specified in kb, mb, gb. Just a number - # is parsed as bytes. - #limit: 32mb - - # By default unified2 log files have the file creation time (in - # unix epoch format) appended to the filename. Set this to yes to - # disable this behaviour. - #nostamp: no - - # Sensor ID field of unified2 alerts. - #sensor-id: 0 - - # Include payload of packets related to alerts. Defaults to true, set to - # false if payload is not required. - #payload: yes - - # HTTP X-Forwarded-For support by adding the unified2 extra header or - # overwriting the source or destination IP address (depending on flow - # direction) with the one reported in the X-Forwarded-For HTTP header. - # This is helpful when reviewing alerts for traffic that is being reverse - # or forward proxied. - xff: - enabled: no - # Two operation modes are available, "extra-data" and "overwrite". Note - # that in the "overwrite" mode, if the reported IP address in the HTTP - # X-Forwarded-For header is of a different version of the packet - # received, it will fall-back to "extra-data" mode. - mode: extra-data - # Two proxy deployments are supported, "reverse" and "forward". In - # a "reverse" deployment the IP address used is the last one, in a - # "forward" deployment the first IP address is used. - deployment: reverse - # Header name where the actual IP address will be reported, if more - # than one IP address is present, the last IP address will be the - # one taken into consideration. - header: X-Forwarded-For + # for further options see: + # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#alert-output-for-use-with-barnyard2-unified2-alert # a line based log of HTTP requests (no alerts) - http-log: @@ -266,7 +319,7 @@ outputs: append: yes #extended: yes # enable this for extended logging information #custom: yes # enabled the custom logging format (defined by customformat) - + #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P" #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' # a line based log of TLS handshake parameters (no alerts) @@ -276,6 +329,7 @@ outputs: append: yes #extended: yes # Log extended information like fingerprint #custom: yes # enabled the custom logging format (defined by customformat) + #customformat: "%{%D-%H:%M:%S}t.%z %a:%p -> %A:%P %v %n %d %D" #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' # output TLS transaction where the session is resumed using a # session id @@ -286,13 +340,6 @@ outputs: enabled: no #certs-log-dir: certs # directory to store the certificates files - # a line based log of DNS requests and/or replies (no alerts) - - dns-log: - enabled: no - filename: dns.log - append: yes - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' - # Packet log... log packets in pcap format. 3 modes of operation: "normal" # "multi" and "sguil". # @@ -334,6 +381,17 @@ outputs: # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit" max-files: 2000 + # Compression algorithm for pcap files. Possible values: none, lz4. + # Enabling compression is incompatible with the sguil mode. Note also + # that on Windows, enabling compression will *increase* disk I/O. + compression: none + + # Further options for lz4 compression. The compression level can be set + # to a value between 0 and 16, where higher values result in higher + # compression. + #lz4-checksum: no + #lz4-level: 0 + mode: normal # normal, multi or sguil. # Directory to place pcap files. If not provided the default log @@ -352,7 +410,7 @@ outputs: append: yes #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' - # alert output to prelude (http://www.prelude-technologies.com/) only + # alert output to prelude (https://www.prelude-siem.org/) only # available if Suricata has been compiled with --enable-prelude - alert-prelude: enabled: no @@ -360,14 +418,14 @@ outputs: log-packet-content: no log-packet-header: yes - # Stats.log contains data from various counters of the suricata engine. + # Stats.log contains data from various counters of the Suricata engine. - stats: enabled: yes filename: stats.log append: yes # append to file (yes) or overwrite it (no) totals: yes # stats for all threads merged together threads: no # per thread stats - #null-values: yes # print counters that have value 0 + null-values: yes # print counters that have value 0 # a line based alerts log similar to fast.log into syslog - syslog: @@ -379,60 +437,89 @@ outputs: #level: Info ## possible levels: Emergency, Alert, Critical, ## Error, Warning, Notice, Info, Debug - # a line based information for dropped packets in IPS mode + # deprecated a line based information for dropped packets in IPS mode - drop: enabled: no - filename: drop.log - append: yes - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + # further options documented at: + # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#drop-log-a-line-based-information-for-dropped-packets - # output module to store extracted files to disk + # Output module for storing files on disk. Files are stored in a + # directory names consisting of the first 2 characters of the + # SHA256 of the file. Each file is given its SHA256 as a filename. # - # The files are stored to the log-dir in a format "file." where is - # an incrementing number starting at 1. For each file "file." a meta - # file "file..meta" is created. + # When a duplicate file is found, the existing file is touched to + # have its timestamps updated. # - # File extraction depends on a lot of things to be fully done: - # - file-store stream-depth. For optimal results, set this to 0 (unlimited) - # - http request / response body sizes. Again set to 0 for optimal results. - # - rules that contain the "filestore" keyword. + # Unlike the older filestore, metadata is not written out by default + # as each file should already have a "fileinfo" record in the + # eve.log. If write-fileinfo is set to yes, the each file will have + # one more associated .json files that consists of the fileinfo + # record. A fileinfo file will be written for each occurrence of the + # file seen using a filename suffix to ensure uniqueness. + # + # To prune the filestore directory see the "suricatactl filestore + # prune" command which can delete files over a certain age. - file-store: - enabled: no # set to yes to enable - log-dir: files # directory to store the files - force-magic: no # force logging magic on all stored files - # force logging of checksums, available hash functions are md5, - # sha1 and sha256 - #force-hash: [md5] - force-filestore: no # force storing of all files - # override global stream-depth for sessions in which we want to - # perform file extraction. Set to 0 for unlimited. + version: 2 + enabled: no + + # Set the directory for the filestore. If the path is not + # absolute will be be relative to the default-log-dir. + #dir: filestore + + # Write out a fileinfo record for each occurrence of a + # file. Disabled by default as each occurrence is already logged + # as a fileinfo record to the main eve-log. + #write-fileinfo: yes + + # Force storing of all files. Default: no. + #force-filestore: yes + + # Override the global stream-depth for sessions in which we want + # to perform file extraction. Set to 0 for unlimited. #stream-depth: 0 - #waldo: file.waldo # waldo file to store the file_id across runs - # uncomment to disable meta file writing - #write-meta: no - # uncomment the following variable to define how many files can + + # Uncomment the following variable to define how many files can # remain open for filestore by Suricata. Default value is 0 which # means files get closed after each write #max-open-files: 1000 - # output module to log files tracked in a easily parsable json format - - file-log: - enabled: no - filename: files-json.log - append: yes - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + # Force logging of checksums, available hash functions are md5, + # sha1 and sha256. Note that SHA256 is automatically forced by + # the use of this output module as it uses the SHA256 as the + # file naming scheme. + #force-hash: [sha1, md5] + # NOTE: X-Forwarded configuration is ignored if write-fileinfo is disabled + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For - force-magic: no # force logging magic on all logged files - # force logging of checksums, available hash functions are md5, - # sha1 and sha256 - #force-hash: [md5] + # deprecated - file-store v1 + - file-store: + enabled: no + # further options documented at: + # https://suricata.readthedocs.io/en/suricata-5.0.0/file-extraction/file-extraction.html#file-store-version-1 # Log TCP data after stream normalization # 2 types: file or dir. File logs into a single logfile. Dir creates # 2 files per TCP session and stores the raw TCP data into them. # Using 'both' will enable both file and dir modes. # - # Note: limited by stream.depth + # Note: limited by stream.reassembly.depth - tcp-data: enabled: no type: file @@ -452,7 +539,7 @@ outputs: # Lua Output Support - execute lua script to generate alert and event # output. # Documented at: - # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Lua_Output + # https://suricata.readthedocs.io/en/latest/output/lua-output.html - lua: enabled: no #scripts-dir: /etc/suricata/lua-output/ @@ -466,20 +553,20 @@ logging: # Note that debug level logging will only be emitted if Suricata was # compiled with the --enable-debug configure option. # - # This value is overriden by the SC_LOG_LEVEL env var. + # This value is overridden by the SC_LOG_LEVEL env var. default-log-level: notice # The default output format. Optional parameter, should default to - # something reasonable if not provided. Can be overriden in an + # something reasonable if not provided. Can be overridden in an # output section. You can leave this out to get the default. # - # This value is overriden by the SC_LOG_FORMAT env var. + # This value is overridden by the SC_LOG_FORMAT env var. #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " # A regex to filter output. Can be overridden in an output section. # Defaults to empty (no filter). # - # This value is overriden by the SC_LOG_OP_FILTER env var. + # This value is overridden by the SC_LOG_OP_FILTER env var. default-output-filter: # Define your logging outputs. If none are defined, or they are all @@ -491,11 +578,23 @@ logging: - file: enabled: yes level: info - filename: /var/log/suricata/suricata.log + filename: suricata.log # type: json - syslog: enabled: no + facility: local5 + format: "[%i] <%d> -- " + # type: json + +## +## Step 4: configure common capture settings +## +## See "Advanced Capture Options" below for more options, including NETMAP +## and PF_RING. +## + +# Linux high speed capture support af-packet: - interface: {{ interface }} # Number of receive threads. "auto" uses the number of cores @@ -505,28 +604,21 @@ af-packet: # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash. # This is only supported for Linux kernel > 3.1 # possible value are: - # * cluster_round_robin: round robin load balancing # * cluster_flow: all packets of a given flow are send to the same socket # * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket # * cluster_qm: all packets linked by network card to a RSS queue are sent to the same # socket. Requires at least Linux 3.14. - # * cluster_random: packets are sent randomly to sockets but with an equipartition. - # Requires at least Linux 3.14. - # * cluster_rollover: kernel rotates between sockets filling each socket before moving - # to the next. Requires at least Linux 3.10. + # * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for + # more info. # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system # with capture card using RSS (require cpu affinity tuning and system irq tuning) cluster-type: cluster_flow # In some fragmentation case, the hash can not be computed. If "defrag" is set # to yes, the kernel will do the needed defragmentation before sending the packets. defrag: yes - # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is - # full then kernel will send the packet on the next socket with room available. This option - # can minimize packet drop and increase the treated bandwidth on single intensive flow. - #rollover: yes # To use the ring feature of AF_PACKET, set 'use-mmap' to yes - #use-mmap: yes - # Lock memory map to avoid it goes to swap. Be careful that over suscribing could lock + use-mmap: yes + # Lock memory map to avoid it goes to swap. Be careful that over subscribing could lock # your system #mmap-locked: yes # Use tpacket_v3 capture mode, only active if use-mmap is true @@ -572,13 +664,14 @@ af-packet: # will not be copied. #copy-mode: ips #copy-iface: eth1 + # For eBPF and XDP setup including bypass, filter and load balancing, please + # see doc/userguide/capture-hardware/ebpf-xdp.rst for more info. # Put default values here. These will be used for an interface that is not # in the list above. - interface: default #threads: auto #use-mmap: no - #rollover: yes #tpacket-v3: yes # Cross platform libpcap capture support @@ -595,7 +688,7 @@ pcap: # Possible values are: # - yes: checksum validation is forced # - no: checksum validation is disabled - # - auto: suricata uses a statistical approach to detect when + # - auto: Suricata uses a statistical approach to detect when # checksum off-loading is used. (default) # Warning: 'checksum-validation' must be set to yes to have any validation #checksum-checks: auto @@ -618,7 +711,7 @@ pcap-file: # Possible values are: # - yes: checksum validation is forced # - no: checksum validation is disabled - # - auto: suricata uses a statistical approach to detect when + # - auto: Suricata uses a statistical approach to detect when # checksum off-loading is used. (default) # Warning: 'checksum-validation' must be set to yes to have checksum tested checksum-checks: auto @@ -639,42 +732,66 @@ pcap-file: # "detection-only" enables protocol detection only (parser disabled). app-layer: protocols: + krb5: + enabled: yes + snmp: + enabled: yes + ikev2: + enabled: yes tls: - enabled: detection-only + enabled: yes detection-ports: dp: 443 - # Completely stop processing TLS/SSL session after the handshake - # completed. If bypass is enabled this will also trigger flow - # bypass. If disabled (the default), TLS/SSL session is still - # tracked for Heartbleed and other anomalies. - #no-reassemble: yes + # Generate JA3 fingerprint from client hello. If not specified it + # will be disabled by default, but enabled if rules require it. + #ja3-fingerprints: auto + + # What to do when the encrypted communications start: + # - default: keep tracking TLS session, check for protocol anomalies, + # inspect tls_* keywords. Disables inspection of unmodified + # 'content' signatures. + # - bypass: stop processing this flow as much as possible. No further + # TLS parsing and inspection. Offload flow bypass to kernel + # or hardware if possible. + # - full: keep tracking and inspection as normal. Unmodified content + # keyword signatures are inspected as well. + # + # For best performance, select 'bypass'. + # + #encryption-handling: default + dcerpc: - enabled: detection-only + enabled: yes ftp: - enabled: detection-only + enabled: yes + # memcap: 64mb + # RDP, disabled by default. + rdp: + #enabled: no ssh: - enabled: detection-only + enabled: yes smtp: - enabled: detection-only + enabled: yes + raw-extraction: no # Configure SMTP-MIME Decoder mime: # Decode MIME messages from SMTP transactions # (may be resource intensive) # This field supercedes all others because it turns the entire # process on or off - decode-mime: detection-only + decode-mime: yes # Decode MIME entity bodies (ie. base64, quoted-printable, etc.) - decode-base64: detection-only - decode-quoted-printable: detection-only + decode-base64: yes + decode-quoted-printable: yes # Maximum bytes per header data value stored in the data structure # (default is 2000) header-value-depth: 2000 # Extract URLs and save in state data structure - extract-urls: detection-only + extract-urls: yes # Set to yes to compute the md5 of the mail body. You will then # be able to journalize it. body-md5: no @@ -685,19 +802,18 @@ app-layer: content-inspect-window: 4096 imap: enabled: detection-only - msn: - enabled: detection-only smb: - enabled: detection-only + enabled: yes detection-ports: dp: 139, 445 - # smb2 detection is disabled internally inside the engine. - #smb2: - # enabled: yes - # Note: NFS parser depends on Rust support: pass --enable-rust - # to configure. + + # Stream reassembly size for SMB streams. By default track it completely. + #stream-depth: 0 + nfs: - enabled: no + enabled: yes + tftp: + enabled: yes dns: # memcaps. Globally and per flow/state. #global-memcap: 16mb @@ -708,16 +824,17 @@ app-layer: #request-flood: 500 tcp: - enabled: detection-only + enabled: yes detection-ports: dp: 53 udp: - enabled: detection-only + enabled: yes detection-ports: dp: 53 http: - enabled: detection-only - # memcap: 64mb + enabled: yes + # memcap: Maximum memory capacity for http + # Default is unlimited, value can be such as 64mb # default-config: Used when no server-config matches # personality: List of personalities used by default @@ -725,37 +842,15 @@ app-layer: # by http_client_body & pcre /P option. # response-body-limit: Limit reassembly of response body for inspection # by file_data, http_server_body & pcre /Q option. - # double-decode-path: Double decode path section of the URI - # double-decode-query: Double decode query section of the URI - # response-body-decompress-layer-limit: - # Limit to how many layers of compression will be - # decompressed. Defaults to 2. # + # For advanced options, see the user guide + + # server-config: List of server configurations to use if address matches - # address: List of ip addresses or networks for this block + # address: List of IP addresses or networks for this block # personalitiy: List of personalities used by this block - # request-body-limit: Limit reassembly of request body for inspection - # by http_client_body & pcre /P option. - # response-body-limit: Limit reassembly of response body for inspection - # by file_data, http_server_body & pcre /Q option. - # double-decode-path: Double decode path section of the URI - # double-decode-query: Double decode query section of the URI # - # uri-include-all: Include all parts of the URI. By default the - # 'scheme', username/password, hostname and port - # are excluded. Setting this option to true adds - # all of them to the normalized uri as inspected - # by http_uri, urilen, pcre with /U and the other - # keywords that inspect the normalized uri. - # Note that this does not affect http_raw_uri. - # Also, note that including all was the default in - # 1.4 and 2.0beta1. - # - # meta-field-limit: Hard size limit for request and response size - # limits. Applies to request line and headers, - # response line and headers. Does not apply to - # request or response bodies. Default is 18k. - # If this limit is reached an event is raised. + # Then, all the fields from default-config can be overloaded # # Currently Available Personalities: # Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0, @@ -781,6 +876,20 @@ app-layer: # auto will use http-body-inline mode in IPS mode, yes or no set it statically http-body-inline: auto + # Decompress SWF files. + # 2 types: 'deflate', 'lzma', 'both' will decompress deflate and lzma + # compress-depth: + # Specifies the maximum amount of data to decompress, + # set 0 for unlimited. + # decompress-depth: + # Specifies the maximum amount of decompressed data to obtain, + # set 0 for unlimited. + swf-decompression: + enabled: yes + type: both + compress-depth: 0 + decompress-depth: 0 + # Take a random value for inspection sizes around the specified value. # This lower the risk of some evasion technics but could lead # detection change between runs. It is set to 'yes' by default. @@ -795,6 +904,15 @@ app-layer: double-decode-path: no double-decode-query: no + # Can disable LZMA decompression + #lzma-enabled: yes + # Memory limit usage for LZMA decompression dictionary + # Data is decompressed until dictionary reaches this size + #lzma-memlimit: 1mb + # Maximum decompressed size with a compression ratio + # above 2048 (only LZMA can reach this ratio, deflate cannot) + #compression-bomb-limit: 1mb + server-config: #- apache: @@ -854,10 +972,15 @@ app-layer: dp: 44818 sp: 44818 - # Note: parser depends on experimental Rust support - # with --enable-rust-experimental passed to configure ntp: - enabled: no + enabled: yes + + dhcp: + enabled: yes + + # SIP, disabled by default. + sip: + #enabled: no # Limit for the maximum number of asn1 frames to decode (default 256) asn1-max-frames: 256 @@ -885,13 +1008,18 @@ run-as: # Default location of the pid file. The pid file is only used in # daemon mode (start Suricata with -D). If not running in daemon mode # the --pidfile command line option must be used to create a pid file. -#pid-file: /usr/local/var/run/suricata.pid +#pid-file: /var/run/suricata.pid # Daemon working directory # Suricata will change directory to this one if provided # Default: "/" #daemon-directory: "/" +# Umask. +# Suricata will use this umask if it is provided. By default it will use the +# umask passed on by the shell. +#umask: 022 + # Suricata core dump configuration. Limits the size of the core dump file to # approximately max-dump. The actual core dump size will be a multiple of the # page size. Core dumps that would be larger than max-dump are truncated. On @@ -904,7 +1032,7 @@ run-as: coredump: max-dump: unlimited -# If suricata box is a router for the sniffed networks, set it to 'router'. If +# If Suricata box is a router for the sniffed networks, set it to 'router'. If # it is a pure sniffing setup, set it to 'sniffer-only'. # If set to auto, the variable is internally switch to 'router' in IPS mode # and 'sniffer-only' in IDS mode. @@ -914,36 +1042,29 @@ host-mode: auto # Number of packets preallocated per thread. The default is 1024. A higher number # will make sure each CPU will be more easily kept busy, but may negatively # impact caching. -# -# If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules -# apply. In that case try something like 60000 or more. This is because the CUDA -# pattern matcher buffers and scans as many packets as possible in parallel. #max-pending-packets: 1024 # Runmode the engine should use. Please check --list-runmodes to get the available -# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned -# load balancing). +# runmodes for each packet acquisition method. Default depends on selected capture +# method. 'workers' generally gives best performance. runmode: workers # Specifies the kind of flow load balancer used by the flow pinned autofp mode. # # Supported schedulers are: # -# round-robin - Flows assigned to threads in a round robin fashion. -# active-packets - Flows assigned to threads that have the lowest number of -# unprocessed packets (default). -# hash - Flow alloted usihng the address hash. More of a random -# technique. Was the default in Suricata 1.2.1 and older. +# hash - Flow assigned to threads using the 5-7 tuple hash. +# ippair - Flow assigned to threads using addresses only. # -#autofp-scheduler: active-packets +#autofp-scheduler: hash # Preallocated size for packet. Default is 1514 which is the classical # size for pcap on ethernet. You should adjust this value to the highest # packet size (MTU + hardware header) on your system. default-packet-size: {{ MTU + 15 }} -# Unix command socket can be used to pass commands to suricata. -# An external tool can then connect to get information from suricata +# Unix command socket can be used to pass commands to Suricata. +# An external tool can then connect to get information from Suricata # or trigger some modifications of the engine. Set enabled to yes # to activate the feature. In auto mode, the feature will only be # activated in live capture mode. You can use the filename variable to set @@ -956,6 +1077,10 @@ unix-command: #magic-file: /usr/share/file/magic #magic-file: +# GeoIP2 database file. Specify path and filename of GeoIP2 database +# if using rules with "geoip" rule option. +#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb + legacy: uricontent: enabled @@ -963,7 +1088,7 @@ legacy: ## Detection settings ## -# Set the order of alerts bassed on actions +# Set the order of alerts based on actions # The default order is pass, drop, reject, alert # action-order: # - pass @@ -972,8 +1097,8 @@ legacy: # - alert # IP Reputation -#reputation-categories-file: /usr/local/etc/suricata/iprep/categories.txt -#default-reputation-path: /usr/local/etc/suricata/iprep +#reputation-categories-file: /etc/suricata/iprep/categories.txt +#default-reputation-path: /etc/suricata/iprep #reputation-files: # - reputation.list @@ -1051,10 +1176,10 @@ defrag: # emergency-recovery is the percentage of flows that the engine need to # prune before unsetting the emergency state. The emergency state is activated # when the memcap limit is reached, allowing to create new flows, but -# prunning them with the emergency timeouts (they are defined below). +# pruning them with the emergency timeouts (they are defined below). # If the memcap is reached, the engine will try to prune flows -# with the default timeouts. If it doens't find a flow to prune, it will set -# the emergency bit and it will try again with more agressive timeouts. +# with the default timeouts. If it doesn't find a flow to prune, it will set +# the emergency bit and it will try again with more aggressive timeouts. # If that doesn't work, then it will try to kill the last time seen flows # not in use. # The memcap can be specified in kb, mb, gb. Just a number indicates it's @@ -1077,7 +1202,7 @@ vlan: # Specific timeouts for flows. Here you can specify the timeouts that the # active flows will wait to transit from the current state to another, on each -# protocol. The value of "new" determine the seconds to wait after a hanshake or +# protocol. The value of "new" determine the seconds to wait after a handshake or # stream startup before the engine free the data of that flow it doesn't # change the state to established (usually if we don't receive more packets # of that flow). The value of "established" is the amount of @@ -1138,7 +1263,7 @@ flow-timeouts: # # packet. If csum validation is specified as # # "yes", then packet with invalid csum will not # # be processed by the engine stream/app layer. -# # Warning: locally generated trafic can be +# # Warning: locally generated traffic can be # # generated without checksum due to hardware offload # # of checksum. You can control the handling of checksum # # on a per-interface basis via the 'checksum-checks' @@ -1149,7 +1274,9 @@ flow-timeouts: # inline: no # stream inline mode # drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine # max-synack-queued: 5 # Max different SYN/ACKs to queue -# bypass: no # Bypass packets when stream.depth is reached +# bypass: no # Bypass packets when stream.reassembly.depth is reached. +# # Warning: first side to reach this triggers +# # the bypass. # # reassembly: # memcap: 64mb # Can be specified in kb, mb, gb. Just a number @@ -1222,9 +1349,22 @@ host: decoder: # Teredo decoder is known to not be completely accurate - # it will sometimes detect non-teredo as teredo. + # as it will sometimes detect non-teredo as teredo. teredo: enabled: true + # ports to look for Teredo. Max 4 ports. If no ports are given, or + # the value is set to 'any', Teredo detection runs on _all_ UDP packets. + ports: $TEREDO_PORTS # syntax: '[3544, 1234]' or '3533' or 'any'. + + # VXLAN decoder is assigned to up to 4 UDP ports. By default only the + # IANA assigned port 4789 is enabled. + vxlan: + enabled: true + ports: $VXLAN_PORTS # syntax: '8472, 4789' + # ERSPAN Type I decode support + erspan: + typeI: + enabled: false ## @@ -1292,7 +1432,6 @@ detect: # The supported algorithms are: # "ac" - Aho-Corasick, default implementation # "ac-bs" - Aho-Corasick, reduced memory implementation -# "ac-cuda" - Aho-Corasick, CUDA implementation # "ac-ks" - Aho-Corasick, "Ken Steele" variant # "hs" - Hyperscan, available when built with Hyperscan support # @@ -1305,10 +1444,6 @@ detect: # to be set to "single", because of ac's memory requirements, unless the # ruleset is small enough to fit in one's memory, in which case one can # use "full" with "ac". Rest of the mpms can be run in "full" mode. -# -# There is also a CUDA pattern matcher (only available if Suricata was -# compiled with --enable-cuda: b2g_cuda. Make sure to update your -# max-pending-packets setting above as well if you use b2g_cuda. mpm-algo: auto @@ -1338,19 +1473,26 @@ threading: {%- if salt['pillar.get']('sensor:suriprocs') %} cpu-affinity: - management-cpu-set: - cpu: [ all ] # include only these cpus in affinity settings + cpu: [ all ] # include only these CPUs in affinity settings - receive-cpu-set: - cpu: [ all ] # include only these cpus in affinity settings + cpu: [ all ] # include only these CPUs in affinity settings - worker-cpu-set: cpu: [ "all" ] mode: "exclusive" # Use explicitely 3 threads and don't compute number by using # detect-thread-ratio variable: + # threads: 3 threads: {{ salt['pillar.get']('sensor:suriprocs') }} prio: + low: [ 0 ] + medium: [ "1-2" ] + high: [ 3 ] default: "high" - {% endif %} - + #- verdict-cpu-set: + # cpu: [ 0 ] + # prio: + # default: "high" + {%- endif -%} {%- if salt['pillar.get']('sensor:suripins') %} cpu-affinity: - management-cpu-set: @@ -1367,10 +1509,6 @@ threading: default: "high" {% endif %} - #- verdict-cpu-set: - # cpu: [ 0 ] - # prio: - # default: "high" # # By default Suricata creates one "detect" thread per available CPU/CPU core. # This setting allows controlling this behaviour. A ratio setting of 2 will @@ -1425,6 +1563,11 @@ profiling: filename: keyword_perf.log append: yes + prefilter: + enabled: yes + filename: prefilter_perf.log + append: yes + # per rulegroup profiling rulegroups: enabled: yes @@ -1466,7 +1609,7 @@ profiling: # When running in NFQ inline mode, it is possible to use a simulated # non-terminal NFQUEUE verdict. -# This permit to do send all needed packet to suricata via this a rule: +# This permit to do send all needed packet to Suricata via this a rule: # iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE # And below, you can have your standard filtering ruleset. To activate # this mode, you need to set mode to 'repeat' @@ -1475,7 +1618,7 @@ profiling: # On linux >= 3.1, you can set batchcount to a value > 1 to improve performance # by processing several packets before sending a verdict (worker runmode only). # On linux >= 3.6, you can set the fail-open option to yes to have the kernel -# accept the packet if suricata is not able to keep pace. +# accept the packet if Suricata is not able to keep pace. # bypass mark and mask can be used to implement NFQ bypass. If bypass mark is # set then the NFQ bypass is activated. Suricata will set the bypass mark/mask # on packet of a flow that need to be bypassed. The Nefilter ruleset has to @@ -1513,17 +1656,17 @@ nflog: # general settings affecting packet capture capture: - # disable NIC offloading. It's restored when Suricata exists. - # Enabled by default + # disable NIC offloading. It's restored when Suricata exits. + # Enabled by default. #disable-offloading: false # # disable checksum validation. Same as setting '-k none' on the - # commandline + # commandline. #checksum-validation: none # Netmap support # -# Netmap operates with NIC directly in driver, so you need FreeBSD wich have +# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which have # built-in netmap support or compile and install netmap module and appropriate # NIC driver on your Linux system. # To reach maximum throughput disable all receive-, segmentation-, @@ -1535,7 +1678,9 @@ capture: netmap: # To specify OS endpoint add plus sign at the end (e.g. "eth0+") - interface: eth2 - # Number of receive threads. "auto" uses number of RSS queues on interface. + # Number of capture threads. "auto" uses number of RSS queues on interface. + # Warning: unless the RSS hashing is symmetrical, this will lead to + # accuracy issues. #threads: auto # You can use the following variables to activate netmap tap or IPS mode. # If copy-mode is set to ips or tap, the traffic coming to the current @@ -1558,7 +1703,7 @@ netmap: # Possible values are: # - yes: checksum validation is forced # - no: checksum validation is disabled - # - auto: suricata uses a statistical approach to detect when + # - auto: Suricata uses a statistical approach to detect when # checksum off-loading is used. # Warning: 'checksum-validation' must be set to yes to have any validation #checksum-checks: auto @@ -1575,9 +1720,9 @@ netmap: # for more info see http://www.ntop.org/products/pf_ring/ pfring: - interface: eth0 - # Number of receive threads (>1 will enable experimental flow pinned - # runmode) - threads: 1 + # Number of receive threads. If set to 'auto' Suricata will first try + # to use CPU (core) count and otherwise RSS queue count. + threads: auto # Default clusterid. PF_RING will load balance packets based on flow. # All threads/processes that will participate need to have the same @@ -1587,8 +1732,15 @@ pfring: # Default PF_RING cluster type. PF_RING can load balance per flow. # Possible values are cluster_flow or cluster_round_robin. cluster-type: cluster_flow + # bpf filter for this interface #bpf-filter: tcp + + # If bypass is set then the PF_RING hw bypass is activated, when supported + # by the interface in use. Suricata will instruct the interface to bypass + # all future packets for a flow that need to be bypassed. + #bypass: yes + # Choose checksum verification mode for the interface. At the moment # of the capture, some packets may be with an invalid checksum due to # offloading to the network card of the checksum computation. @@ -1596,7 +1748,7 @@ pfring: # - rxonly: only compute checksum for packets received by network card. # - yes: checksum validation is forced # - no: checksum validation is disabled - # - auto: suricata uses a statistical approach to detect when + # - auto: Suricata uses a statistical approach to detect when # checksum off-loading is used. (default) # Warning: 'checksum-validation' must be set to yes to have any validation #checksum-checks: auto @@ -1641,80 +1793,83 @@ napatech: # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back) # This may be enabled when sharing streams with another application. # Otherwise, it should be turned off. - hba: -1 + #hba: -1 - # use_all_streams set to "yes" will query the Napatech service for all configured - # streams and listen on all of them. When set to "no" the streams config array - # will be used. - use-all-streams: yes + # When use_all_streams is set to "yes" the initialization code will query + # the Napatech service for all configured streams and listen on all of them. + # When set to "no" the streams config array will be used. + # + # This option necessitates running the appropriate NTPL commands to create + # the desired streams prior to running suricata. + #use-all-streams: no - # The streams to listen on. This can be either: - # a list of individual streams (e.g. streams: [0,1,2,3]) + # The streams to listen on when auto-config is disabled or when and threading + # cpu-affinity is disabled. This can be either: + # an individual stream (e.g. streams: [0]) # or # a range of streams (e.g. streams: ["0-3"]) + # streams: ["0-3"] -# Tilera mpipe configuration. for use on Tilera TILE-Gx. -mpipe: + # When auto-config is enabled the streams will be created and assigned + # automatically to the NUMA node where the thread resides. If cpu-affinity + # is enabled in the threading section. Then the streams will be created + # according to the number of worker threads specified in the worker cpu set. + # Otherwise, the streams array is used to define the streams. + # + # This option cannot be used simultaneous with "use-all-streams". + # + auto-config: yes - # Load balancing modes: "static", "dynamic", "sticky", or "round-robin". - load-balance: dynamic + # Ports indicates which napatech ports are to be used in auto-config mode. + # these are the port ID's of the ports that will be merged prior to the + # traffic being distributed to the streams. + # + # This can be specified in any of the following ways: + # + # a list of individual ports (e.g. ports: [0,1,2,3]) + # + # a range of ports (e.g. ports: [0-3]) + # + # "all" to indicate that all ports are to be merged together + # (e.g. ports: [all]) + # + # This has no effect if auto-config is disabled. + # + ports: [all] - # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536 - iqueue-packets: 2048 - - # List of interfaces we will listen on. - inputs: - - interface: xgbe2 - - interface: xgbe3 - - interface: xgbe4 - - - # Relative weight of memory for packets of each mPipe buffer size. - stack: - size128: 0 - size256: 9 - size512: 0 - size1024: 0 - size1664: 7 - size4096: 0 - size10386: 0 - size16384: 0 + # When auto-config is enabled the hashmode specifies the algorithm for + # determining to which stream a given packet is to be delivered. + # This can be any valid Napatech NTPL hashmode command. + # + # The most common hashmode commands are: hash2tuple, hash2tuplesorted, + # hash5tuple, hash5tuplesorted and roundrobin. + # + # See Napatech NTPL documentation other hashmodes and details on their use. + # + # This has no effect if auto-config is disabled. + # + hashmode: hash5tuplesorted ## -## Hardware accelaration +## Configure Suricata to load Suricata-Update managed rules. +## +## If this section is completely commented out move down to the "Advanced rule +## file configuration". ## -# Cuda configuration. -cuda: - # The "mpm" profile. On not specifying any of these parameters, the engine's - # internal default values are used, which are same as the ones specified in - # in the default conf file. - mpm: - # The minimum length required to buffer data to the gpu. - # Anything below this is MPM'ed on the CPU. - # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. - # A value of 0 indicates there's no limit. - data-buffer-size-min-limit: 0 - # The maximum length for data that we would buffer to the gpu. - # Anything over this is MPM'ed on the CPU. - # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. - data-buffer-size-max-limit: 1500 - # The ring buffer size used by the CudaBuffer API to buffer data. - cudabuffer-buffer-size: 500mb - # The max chunk size that can be sent to the gpu in a single go. - gpu-transfer-size: 50mb - # The timeout limit for batching of packets in microseconds. - batching-timeout: 2000 - # The device to use for the mpm. Currently we don't support load balancing - # on multiple gpus. In case you have multiple devices on your system, you - # can specify the device to use, using this conf. By default we hold 0, to - # specify the first device cuda sees. To find out device-id associated with - # the card(s) on the system run "suricata --list-cuda-cards". - device-id: 0 - # No of Cuda streams used for asynchronous processing. All values > 0 are valid. - # For this option you need a device with Compute Capability > 1.0. - cuda-streams: 2 +default-rule-path: /etc/suricata/rules + +rule-files: + - all.rules + +## +## Auxiliary configuration files. +## + +classification-file: /etc/suricata/classification.config +reference-config-file: /etc/suricata/reference.config +# threshold-file: /etc/suricata/threshold.config ## ## Include other configs @@ -1723,4 +1878,4 @@ cuda: # Includes. Files included here will be handled as if they were # inlined in this configuration file. #include: include1.yaml -#include: include2.yaml +#include: include2.yaml \ No newline at end of file diff --git a/salt/suricata/files/suricataDEPRICATED.yaml b/salt/suricata/files/suricataDEPRICATED.yaml new file mode 100644 index 000000000..5a0121b63 --- /dev/null +++ b/salt/suricata/files/suricataDEPRICATED.yaml @@ -0,0 +1,1726 @@ +%YAML 1.1 +--- +{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %} +{%- if grains['role'] == 'so-eval' %} +{%- set MTU = 1500 %} +{%- elif grains['role'] == 'so-helix' %} +{%- set MTU = 9000 %} +{%- else %} +{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %} +{%- endif %} +{%- if salt['pillar.get']('sensor:homenet') %} + {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %} +{%- else %} + {%- set homenet = salt['pillar.get']('static:hnmaster', '') %} +{%- endif %} +# Suricata configuration file. In addition to the comments describing all +# options in this file, full documentation can be found at: +# https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml + +## +## Step 1: inform Suricata about your network +## + +vars: + # more specifc is better for alert accuracy and performance + address-groups: + HOME_NET: "[{{ homenet }}]" + #HOME_NET: "[192.168.0.0/16]" + #HOME_NET: "[10.0.0.0/8]" + #HOME_NET: "[172.16.0.0/12]" + #HOME_NET: "any" + + EXTERNAL_NET: "!$HOME_NET" + #EXTERNAL_NET: "any" + + HTTP_SERVERS: "$HOME_NET" + SMTP_SERVERS: "$HOME_NET" + SQL_SERVERS: "$HOME_NET" + DNS_SERVERS: "$HOME_NET" + TELNET_SERVERS: "$HOME_NET" + AIM_SERVERS: "$EXTERNAL_NET" + DNP3_SERVER: "$HOME_NET" + DNP3_CLIENT: "$HOME_NET" + MODBUS_CLIENT: "$HOME_NET" + MODBUS_SERVER: "$HOME_NET" + ENIP_CLIENT: "$HOME_NET" + ENIP_SERVER: "$HOME_NET" + + port-groups: + HTTP_PORTS: "80" + SHELLCODE_PORTS: "!80" + ORACLE_PORTS: 1521 + SSH_PORTS: 22 + DNP3_PORTS: 20000 + MODBUS_PORTS: 502 + FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]" + FTP_PORTS: 21 + + +## +## Step 2: select the rules to enable or disable +## + +default-rule-path: /etc/suricata/rules +rule-files: + - all.rules + +classification-file: /etc/suricata/classification.config +reference-config-file: /etc/suricata/reference.config +# threshold-file: /usr/local/etc/suricata/threshold.config + + +## +## Step 3: select outputs to enable +## + +# The default logging directory. Any log or output file will be +# placed here if its not specified with a full path name. This can be +# overridden with the -l command line parameter. +default-log-dir: /var/log/suricata/ + +# global stats configuration +stats: + enabled: yes + # The interval field (in seconds) controls at what interval + # the loggers are invoked. + interval: 30 + +# Configure the type of alert (and other) logging you would like. +outputs: + # a line based alerts log similar to Snort's fast.log + - fast: + enabled: no + filename: fast.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # Extensible Event Format (nicknamed EVE) event log in JSON format + - eve-log: + enabled: yes + filetype: regular #regular|syslog|unix_dgram|unix_stream|redis + filename: eve.json + rotate-interval: day + community-id: true + community-id-seed: 0 + #prefix: "@cee: " # prefix to prepend to each log entry + # the following are valid when type: syslog above + #identity: "suricata" + #facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + #redis: + # server: 127.0.0.1 + # port: 6379 + # async: true ## if redis replies are read asynchronously + # mode: list ## possible values: list|lpush (default), rpush, channel|publish + # ## lpush and rpush are using a Redis list. "list" is an alias for lpush + # ## publish is using a Redis channel. "channel" is an alias for publish + # key: suricata ## key or channel to use (default to suricata) + # Redis pipelining set up. This will enable to only do a query every + # 'batch-size' events. This should lower the latency induced by network + # connection at the cost of some memory. There is no flushing implemented + # so this setting as to be reserved to high traffic suricata. + # pipelining: + # enabled: yes ## set enable to yes to enable query pipelining + # batch-size: 10 ## number of entry to keep in buffer + types: + - alert: + # payload: yes # enable dumping payload in Base64 + # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log + # payload-printable: yes # enable dumping payload in printable (lossy) format + # packet: yes # enable dumping of packet (without stream segments) + # http-body: yes # enable dumping of http body in Base64 + # http-body-printable: yes # enable dumping of http body in printable format + metadata: + app-layer: false + flow: false + rule: + metadata: true + raw: true + + # Enable the logging of tagged packets for rules using the + # "tag" keyword. + tagged-packets: no + + # HTTP X-Forwarded-For support by adding an extra field or overwriting + # the source or destination IP address (depending on flow direction) + # with the one reported in the X-Forwarded-For HTTP header. This is + # helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + #- http: + # extended: no # enable this for extended logging information + # custom allows additional http fields to be included in eve-log + # the example below adds three additional fields when uncommented + #custom: [Accept-Encoding, Accept-Language, Authorization] + #- dns: + # control logging of queries and answers + # default yes, no to disable + # query: no # enable logging of DNS queries + # answer: no # enable logging of DNS answers + # control which RR types are logged + # all enabled if custom not specified + #custom: [a, aaaa, cname, mx, ns, ptr, txt] + #- tls: + # extended: no # enable this for extended logging information + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + # custom allows to control which tls fields that are included + # in eve-log + #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain] + #- files: + # force-magic: no # force logging magic on all logged files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + #- drop: + # alerts: yes # log alerts that caused drops + # flows: all # start or all: 'start' logs only a single drop + # # per flow direction. All logs each dropped pkt. + #- smtp: + #extended: yes # enable this for extended logging information + # this includes: bcc, message-id, subject, x_mailer, user-agent + # custom fields logging from the list: + # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, + # x-originating-ip, in-reply-to, references, importance, priority, + # sensitivity, organization, content-md5, date + #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] + # output md5 of fields: body, subject + # for the body you need to set app-layer.protocols.smtp.mime.body-md5 + # to yes + #md5: [body, subject] + + #- dnp3 + #- nfs + #- ssh: + #- stats: + # totals: yes # stats for all threads merged together + # threads: no # per thread stats + # deltas: no # include delta values + # bi-directional flows + #- flow: + # uni-directional flows + #- netflow + # Vars log flowbits and other packet and flow vars + #- vars + + # alert output for use with Barnyard2 + - unified2-alert: + enabled: no + filename: unified2.alert + + # File size limit. Can be specified in kb, mb, gb. Just a number + # is parsed as bytes. + #limit: 32mb + + # By default unified2 log files have the file creation time (in + # unix epoch format) appended to the filename. Set this to yes to + # disable this behaviour. + #nostamp: no + + # Sensor ID field of unified2 alerts. + #sensor-id: 0 + + # Include payload of packets related to alerts. Defaults to true, set to + # false if payload is not required. + #payload: yes + + # HTTP X-Forwarded-For support by adding the unified2 extra header or + # overwriting the source or destination IP address (depending on flow + # direction) with the one reported in the X-Forwarded-For HTTP header. + # This is helpful when reviewing alerts for traffic that is being reverse + # or forward proxied. + xff: + enabled: no + # Two operation modes are available, "extra-data" and "overwrite". Note + # that in the "overwrite" mode, if the reported IP address in the HTTP + # X-Forwarded-For header is of a different version of the packet + # received, it will fall-back to "extra-data" mode. + mode: extra-data + # Two proxy deployments are supported, "reverse" and "forward". In + # a "reverse" deployment the IP address used is the last one, in a + # "forward" deployment the first IP address is used. + deployment: reverse + # Header name where the actual IP address will be reported, if more + # than one IP address is present, the last IP address will be the + # one taken into consideration. + header: X-Forwarded-For + + # a line based log of HTTP requests (no alerts) + - http-log: + enabled: no + filename: http.log + append: yes + #extended: yes # enable this for extended logging information + #custom: yes # enabled the custom logging format (defined by customformat) + + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # a line based log of TLS handshake parameters (no alerts) + - tls-log: + enabled: no # Log TLS connections. + filename: tls.log # File to store TLS logs. + append: yes + #extended: yes # Log extended information like fingerprint + #custom: yes # enabled the custom logging format (defined by customformat) + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + # output TLS transaction where the session is resumed using a + # session id + #session-resumption: no + + # output module to store certificates chain to disk + - tls-store: + enabled: no + #certs-log-dir: certs # directory to store the certificates files + + # a line based log of DNS requests and/or replies (no alerts) + - dns-log: + enabled: no + filename: dns.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # Packet log... log packets in pcap format. 3 modes of operation: "normal" + # "multi" and "sguil". + # + # In normal mode a pcap file "filename" is created in the default-log-dir, + # or are as specified by "dir". + # In multi mode, a file is created per thread. This will perform much + # better, but will create multiple files where 'normal' would create one. + # In multi mode the filename takes a few special variables: + # - %n -- thread number + # - %i -- thread id + # - %t -- timestamp (secs or secs.usecs based on 'ts-format' + # E.g. filename: pcap.%n.%t + # + # Note that it's possible to use directories, but the directories are not + # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the + # per thread directory. + # + # Also note that the limit and max-files settings are enforced per thread. + # So the size limit when using 8 threads with 1000mb files and 2000 files + # is: 8*1000*2000 ~ 16TiB. + # + # In Sguil mode "dir" indicates the base directory. In this base dir the + # pcaps are created in th directory structure Sguil expects: + # + # $sguil-base-dir/YYYY-MM-DD/$filename. + # + # By default all packets are logged except: + # - TCP streams beyond stream.reassembly.depth + # - encrypted streams after the key exchange + # + - pcap-log: + enabled: no + filename: log.pcap + + # File size limit. Can be specified in kb, mb, gb. Just a number + # is parsed as bytes. + limit: 1000mb + + # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit" + max-files: 2000 + + mode: normal # normal, multi or sguil. + + # Directory to place pcap files. If not provided the default log + # directory will be used. Required for "sguil" mode. + #dir: /nsm_data/ + + #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec + use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets + honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stopped being logged. + + # a full alerts log containing much information for signature writers + # or for investigating suspected false positives. + - alert-debug: + enabled: no + filename: alert-debug.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # alert output to prelude (http://www.prelude-technologies.com/) only + # available if Suricata has been compiled with --enable-prelude + - alert-prelude: + enabled: no + profile: suricata + log-packet-content: no + log-packet-header: yes + + # Stats.log contains data from various counters of the suricata engine. + - stats: + enabled: yes + filename: stats.log + append: yes # append to file (yes) or overwrite it (no) + totals: yes # stats for all threads merged together + threads: no # per thread stats + #null-values: yes # print counters that have value 0 + + # a line based alerts log similar to fast.log into syslog + - syslog: + enabled: no + # reported identity to syslog. If ommited the program name (usually + # suricata) will be used. + #identity: "suricata" + facility: local5 + #level: Info ## possible levels: Emergency, Alert, Critical, + ## Error, Warning, Notice, Info, Debug + + # a line based information for dropped packets in IPS mode + - drop: + enabled: no + filename: drop.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + # output module to store extracted files to disk + # + # The files are stored to the log-dir in a format "file." where is + # an incrementing number starting at 1. For each file "file." a meta + # file "file..meta" is created. + # + # File extraction depends on a lot of things to be fully done: + # - file-store stream-depth. For optimal results, set this to 0 (unlimited) + # - http request / response body sizes. Again set to 0 for optimal results. + # - rules that contain the "filestore" keyword. + - file-store: + enabled: no # set to yes to enable + log-dir: files # directory to store the files + force-magic: no # force logging magic on all stored files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + force-filestore: no # force storing of all files + # override global stream-depth for sessions in which we want to + # perform file extraction. Set to 0 for unlimited. + #stream-depth: 0 + #waldo: file.waldo # waldo file to store the file_id across runs + # uncomment to disable meta file writing + #write-meta: no + # uncomment the following variable to define how many files can + # remain open for filestore by Suricata. Default value is 0 which + # means files get closed after each write + #max-open-files: 1000 + + # output module to log files tracked in a easily parsable json format + - file-log: + enabled: no + filename: files-json.log + append: yes + #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' + + force-magic: no # force logging magic on all logged files + # force logging of checksums, available hash functions are md5, + # sha1 and sha256 + #force-hash: [md5] + + # Log TCP data after stream normalization + # 2 types: file or dir. File logs into a single logfile. Dir creates + # 2 files per TCP session and stores the raw TCP data into them. + # Using 'both' will enable both file and dir modes. + # + # Note: limited by stream.depth + - tcp-data: + enabled: no + type: file + filename: tcp-data.log + + # Log HTTP body data after normalization, dechunking and unzipping. + # 2 types: file or dir. File logs into a single logfile. Dir creates + # 2 files per HTTP session and stores the normalized data into them. + # Using 'both' will enable both file and dir modes. + # + # Note: limited by the body limit settings + - http-body-data: + enabled: no + type: file + filename: http-data.log + + # Lua Output Support - execute lua script to generate alert and event + # output. + # Documented at: + # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Lua_Output + - lua: + enabled: no + #scripts-dir: /etc/suricata/lua-output/ + scripts: + # - script1.lua + +# Logging configuration. This is not about logging IDS alerts/events, but +# output about what Suricata is doing, like startup messages, errors, etc. +logging: + # The default log level, can be overridden in an output section. + # Note that debug level logging will only be emitted if Suricata was + # compiled with the --enable-debug configure option. + # + # This value is overriden by the SC_LOG_LEVEL env var. + default-log-level: notice + + # The default output format. Optional parameter, should default to + # something reasonable if not provided. Can be overriden in an + # output section. You can leave this out to get the default. + # + # This value is overriden by the SC_LOG_FORMAT env var. + #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " + + # A regex to filter output. Can be overridden in an output section. + # Defaults to empty (no filter). + # + # This value is overriden by the SC_LOG_OP_FILTER env var. + default-output-filter: + + # Define your logging outputs. If none are defined, or they are all + # disabled you will get the default - console output. + outputs: + - console: + enabled: yes + # type: json + - file: + enabled: yes + level: info + filename: /var/log/suricata/suricata.log + # type: json + - syslog: + enabled: no + +af-packet: + - interface: {{ interface }} + # Number of receive threads. "auto" uses the number of cores + #threads: auto + # Default clusterid. AF_PACKET will load balance packets based on flow. + cluster-id: 59 + # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash. + # This is only supported for Linux kernel > 3.1 + # possible value are: + # * cluster_round_robin: round robin load balancing + # * cluster_flow: all packets of a given flow are send to the same socket + # * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket + # * cluster_qm: all packets linked by network card to a RSS queue are sent to the same + # socket. Requires at least Linux 3.14. + # * cluster_random: packets are sent randomly to sockets but with an equipartition. + # Requires at least Linux 3.14. + # * cluster_rollover: kernel rotates between sockets filling each socket before moving + # to the next. Requires at least Linux 3.10. + # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system + # with capture card using RSS (require cpu affinity tuning and system irq tuning) + cluster-type: cluster_flow + # In some fragmentation case, the hash can not be computed. If "defrag" is set + # to yes, the kernel will do the needed defragmentation before sending the packets. + defrag: yes + # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is + # full then kernel will send the packet on the next socket with room available. This option + # can minimize packet drop and increase the treated bandwidth on single intensive flow. + #rollover: yes + # To use the ring feature of AF_PACKET, set 'use-mmap' to yes + #use-mmap: yes + # Lock memory map to avoid it goes to swap. Be careful that over suscribing could lock + # your system + #mmap-locked: yes + # Use tpacket_v3 capture mode, only active if use-mmap is true + # Don't use it in IPS or TAP mode as it causes severe latency + #tpacket-v3: yes + # Ring size will be computed with respect to max_pending_packets and number + # of threads. You can set manually the ring size in number of packets by setting + # the following value. If you are using flow cluster-type and have really network + # intensive single-flow you could want to set the ring-size independently of the number + # of threads: + #ring-size: 2048 + # Block size is used by tpacket_v3 only. It should set to a value high enough to contain + # a decent number of packets. Size is in bytes so please consider your MTU. It should be + # a power of 2 and it must be multiple of page size (usually 4096). + #block-size: 32768 + # tpacket_v3 block timeout: an open block is passed to userspace if it is not + # filled after block-timeout milliseconds. + #block-timeout: 10 + # On busy system, this could help to set it to yes to recover from a packet drop + # phase. This will result in some packets (at max a ring flush) being non treated. + #use-emergency-flush: yes + # recv buffer size, increase value could improve performance + # buffer-size: 32768 + # Set to yes to disable promiscuous mode + # disable-promisc: no + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - kernel: use indication sent by kernel for each packet (default) + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: kernel + # BPF filter to apply to this interface. The pcap filter syntax apply here. + #bpf-filter: port 80 or udp + # You can use the following variables to activate AF_PACKET tap or IPS mode. + # If copy-mode is set to ips or tap, the traffic coming to the current + # interface will be copied to the copy-iface interface. If 'tap' is set, the + # copy is complete. If 'ips' is set, the packet matching a 'drop' action + # will not be copied. + #copy-mode: ips + #copy-iface: eth1 + + # Put default values here. These will be used for an interface that is not + # in the list above. + - interface: default + #threads: auto + #use-mmap: no + #rollover: yes + #tpacket-v3: yes + +# Cross platform libpcap capture support +pcap: + - interface: eth0 + # On Linux, pcap will try to use mmaped capture and will use buffer-size + # as total of memory used by the ring. So set this to something bigger + # than 1% of your bandwidth. + #buffer-size: 16777216 + #bpf-filter: "tcp and port 25" + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # With some accelerator cards using a modified libpcap (like myricom), you + # may want to have the same number of capture threads as the number of capture + # rings. In this case, set up the threads variable to N to start N threads + # listening on the same interface. + #threads: 16 + # set to no to disable promiscuous mode: + #promisc: no + # set snaplen, if not set it defaults to MTU if MTU can be known + # via ioctl call and to full capture if not. + #snaplen: 1518 + # Put default values here + - interface: default + #checksum-checks: auto + +# Settings for reading pcap files +pcap-file: + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have checksum tested + checksum-checks: auto + +# See "Advanced Capture Options" below for more options, including NETMAP +# and PF_RING. + + +## +## Step 5: App Layer Protocol Configuration +## + +# Configure the app-layer parsers. The protocols section details each +# protocol. +# +# The option "enabled" takes 3 values - "yes", "no", "detection-only". +# "yes" enables both detection and the parser, "no" disables both, and +# "detection-only" enables protocol detection only (parser disabled). +app-layer: + protocols: + tls: + enabled: detection-only + detection-ports: + dp: 443 + + # Completely stop processing TLS/SSL session after the handshake + # completed. If bypass is enabled this will also trigger flow + # bypass. If disabled (the default), TLS/SSL session is still + # tracked for Heartbleed and other anomalies. + #no-reassemble: yes + dcerpc: + enabled: detection-only + ftp: + enabled: detection-only + ssh: + enabled: detection-only + smtp: + enabled: detection-only + # Configure SMTP-MIME Decoder + mime: + # Decode MIME messages from SMTP transactions + # (may be resource intensive) + # This field supercedes all others because it turns the entire + # process on or off + decode-mime: detection-only + + # Decode MIME entity bodies (ie. base64, quoted-printable, etc.) + decode-base64: detection-only + decode-quoted-printable: detection-only + + # Maximum bytes per header data value stored in the data structure + # (default is 2000) + header-value-depth: 2000 + + # Extract URLs and save in state data structure + extract-urls: detection-only + # Set to yes to compute the md5 of the mail body. You will then + # be able to journalize it. + body-md5: no + # Configure inspected-tracker for file_data keyword + inspected-tracker: + content-limit: 100000 + content-inspect-min-size: 32768 + content-inspect-window: 4096 + imap: + enabled: detection-only + msn: + enabled: detection-only + smb: + enabled: detection-only + detection-ports: + dp: 139, 445 + # smb2 detection is disabled internally inside the engine. + #smb2: + # enabled: yes + # Note: NFS parser depends on Rust support: pass --enable-rust + # to configure. + nfs: + enabled: no + dns: + # memcaps. Globally and per flow/state. + #global-memcap: 16mb + #state-memcap: 512kb + + # How many unreplied DNS requests are considered a flood. + # If the limit is reached, app-layer-event:dns.flooded; will match. + #request-flood: 500 + + tcp: + enabled: detection-only + detection-ports: + dp: 53 + udp: + enabled: detection-only + detection-ports: + dp: 53 + http: + enabled: detection-only + # memcap: 64mb + + # default-config: Used when no server-config matches + # personality: List of personalities used by default + # request-body-limit: Limit reassembly of request body for inspection + # by http_client_body & pcre /P option. + # response-body-limit: Limit reassembly of response body for inspection + # by file_data, http_server_body & pcre /Q option. + # double-decode-path: Double decode path section of the URI + # double-decode-query: Double decode query section of the URI + # response-body-decompress-layer-limit: + # Limit to how many layers of compression will be + # decompressed. Defaults to 2. + # + # server-config: List of server configurations to use if address matches + # address: List of ip addresses or networks for this block + # personalitiy: List of personalities used by this block + # request-body-limit: Limit reassembly of request body for inspection + # by http_client_body & pcre /P option. + # response-body-limit: Limit reassembly of response body for inspection + # by file_data, http_server_body & pcre /Q option. + # double-decode-path: Double decode path section of the URI + # double-decode-query: Double decode query section of the URI + # + # uri-include-all: Include all parts of the URI. By default the + # 'scheme', username/password, hostname and port + # are excluded. Setting this option to true adds + # all of them to the normalized uri as inspected + # by http_uri, urilen, pcre with /U and the other + # keywords that inspect the normalized uri. + # Note that this does not affect http_raw_uri. + # Also, note that including all was the default in + # 1.4 and 2.0beta1. + # + # meta-field-limit: Hard size limit for request and response size + # limits. Applies to request line and headers, + # response line and headers. Does not apply to + # request or response bodies. Default is 18k. + # If this limit is reached an event is raised. + # + # Currently Available Personalities: + # Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0, + # IIS_7_0, IIS_7_5, Apache_2 + libhtp: + default-config: + personality: IDS + + # Can be specified in kb, mb, gb. Just a number indicates + # it's in bytes. + request-body-limit: 100kb + response-body-limit: 100kb + + # inspection limits + request-body-minimal-inspect-size: 32kb + request-body-inspect-window: 4kb + response-body-minimal-inspect-size: 40kb + response-body-inspect-window: 16kb + + # response body decompression (0 disables) + response-body-decompress-layer-limit: 2 + + # auto will use http-body-inline mode in IPS mode, yes or no set it statically + http-body-inline: auto + + # Take a random value for inspection sizes around the specified value. + # This lower the risk of some evasion technics but could lead + # detection change between runs. It is set to 'yes' by default. + #randomize-inspection-sizes: yes + # If randomize-inspection-sizes is active, the value of various + # inspection size will be choosen in the [1 - range%, 1 + range%] + # range + # Default value of randomize-inspection-range is 10. + #randomize-inspection-range: 10 + + # decoding + double-decode-path: no + double-decode-query: no + + server-config: + + #- apache: + # address: [192.168.1.0/24, 127.0.0.0/8, "::1"] + # personality: Apache_2 + # # Can be specified in kb, mb, gb. Just a number indicates + # # it's in bytes. + # request-body-limit: 4096 + # response-body-limit: 4096 + # double-decode-path: no + # double-decode-query: no + + #- iis7: + # address: + # - 192.168.0.0/24 + # - 192.168.10.0/24 + # personality: IIS_7_0 + # # Can be specified in kb, mb, gb. Just a number indicates + # # it's in bytes. + # request-body-limit: 4096 + # response-body-limit: 4096 + # double-decode-path: no + # double-decode-query: no + + # Note: Modbus probe parser is minimalist due to the poor significant field + # Only Modbus message length (greater than Modbus header length) + # And Protocol ID (equal to 0) are checked in probing parser + # It is important to enable detection port and define Modbus port + # to avoid false positive + modbus: + # How many unreplied Modbus requests are considered a flood. + # If the limit is reached, app-layer-event:modbus.flooded; will match. + #request-flood: 500 + + enabled: no + detection-ports: + dp: 502 + # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it + # is recommended to keep the TCP connection opened with a remote device + # and not to open and close it for each MODBUS/TCP transaction. In that + # case, it is important to set the depth of the stream reassembling as + # unlimited (stream.reassembly.depth: 0) + + # Stream reassembly size for modbus. By default track it completely. + stream-depth: 0 + + # DNP3 + dnp3: + enabled: no + detection-ports: + dp: 20000 + + # SCADA EtherNet/IP and CIP protocol support + enip: + enabled: no + detection-ports: + dp: 44818 + sp: 44818 + + # Note: parser depends on experimental Rust support + # with --enable-rust-experimental passed to configure + ntp: + enabled: no + +# Limit for the maximum number of asn1 frames to decode (default 256) +asn1-max-frames: 256 + + +############################################################################## +## +## Advanced settings below +## +############################################################################## + +## +## Run Options +## + +# Run suricata as user and group. +run-as: + user: suricata + group: suricata + +# Some logging module will use that name in event as identifier. The default +# value is the hostname +#sensor-name: suricata + +# Default location of the pid file. The pid file is only used in +# daemon mode (start Suricata with -D). If not running in daemon mode +# the --pidfile command line option must be used to create a pid file. +#pid-file: /usr/local/var/run/suricata.pid + +# Daemon working directory +# Suricata will change directory to this one if provided +# Default: "/" +#daemon-directory: "/" + +# Suricata core dump configuration. Limits the size of the core dump file to +# approximately max-dump. The actual core dump size will be a multiple of the +# page size. Core dumps that would be larger than max-dump are truncated. On +# Linux, the actual core dump size may be a few pages larger than max-dump. +# Setting max-dump to 0 disables core dumping. +# Setting max-dump to 'unlimited' will give the full core dump file. +# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size +# to be 'unlimited'. + +coredump: + max-dump: unlimited + +# If suricata box is a router for the sniffed networks, set it to 'router'. If +# it is a pure sniffing setup, set it to 'sniffer-only'. +# If set to auto, the variable is internally switch to 'router' in IPS mode +# and 'sniffer-only' in IDS mode. +# This feature is currently only used by the reject* keywords. +host-mode: auto + +# Number of packets preallocated per thread. The default is 1024. A higher number +# will make sure each CPU will be more easily kept busy, but may negatively +# impact caching. +# +# If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules +# apply. In that case try something like 60000 or more. This is because the CUDA +# pattern matcher buffers and scans as many packets as possible in parallel. +#max-pending-packets: 1024 + +# Runmode the engine should use. Please check --list-runmodes to get the available +# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned +# load balancing). +runmode: workers + +# Specifies the kind of flow load balancer used by the flow pinned autofp mode. +# +# Supported schedulers are: +# +# round-robin - Flows assigned to threads in a round robin fashion. +# active-packets - Flows assigned to threads that have the lowest number of +# unprocessed packets (default). +# hash - Flow alloted usihng the address hash. More of a random +# technique. Was the default in Suricata 1.2.1 and older. +# +#autofp-scheduler: active-packets + +# Preallocated size for packet. Default is 1514 which is the classical +# size for pcap on ethernet. You should adjust this value to the highest +# packet size (MTU + hardware header) on your system. +default-packet-size: {{ MTU + 15 }} + +# Unix command socket can be used to pass commands to suricata. +# An external tool can then connect to get information from suricata +# or trigger some modifications of the engine. Set enabled to yes +# to activate the feature. In auto mode, the feature will only be +# activated in live capture mode. You can use the filename variable to set +# the file name of the socket. +unix-command: + enabled: auto + #filename: custom.socket + +# Magic file. The extension .mgc is added to the value here. +#magic-file: /usr/share/file/magic +#magic-file: + +legacy: + uricontent: enabled + +## +## Detection settings +## + +# Set the order of alerts bassed on actions +# The default order is pass, drop, reject, alert +# action-order: +# - pass +# - drop +# - reject +# - alert + +# IP Reputation +#reputation-categories-file: /usr/local/etc/suricata/iprep/categories.txt +#default-reputation-path: /usr/local/etc/suricata/iprep +#reputation-files: +# - reputation.list + +# When run with the option --engine-analysis, the engine will read each of +# the parameters below, and print reports for each of the enabled sections +# and exit. The reports are printed to a file in the default log dir +# given by the parameter "default-log-dir", with engine reporting +# subsection below printing reports in its own report file. +engine-analysis: + # enables printing reports for fast-pattern for every rule. + rules-fast-pattern: yes + # enables printing reports for each rule + rules: yes + +#recursion and match limits for PCRE where supported +pcre: + match-limit: 3500 + match-limit-recursion: 1500 + +## +## Advanced Traffic Tracking and Reconstruction Settings +## + +# Host specific policies for defragmentation and TCP stream +# reassembly. The host OS lookup is done using a radix tree, just +# like a routing table so the most specific entry matches. +host-os-policy: + # Make the default policy windows. + windows: [0.0.0.0/0] + bsd: [] + bsd-right: [] + old-linux: [] + linux: [] + old-solaris: [] + solaris: [] + hpux10: [] + hpux11: [] + irix: [] + macos: [] + vista: [] + windows2k3: [] + +# Defrag settings: + +defrag: + memcap: 32mb + hash-size: 65536 + trackers: 65535 # number of defragmented flows to follow + max-frags: 65535 # number of fragments to keep (higher than trackers) + prealloc: yes + timeout: 60 + +# Enable defrag per host settings +# host-config: +# +# - dmz: +# timeout: 30 +# address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"] +# +# - lan: +# timeout: 45 +# address: +# - 192.168.0.0/24 +# - 192.168.10.0/24 +# - 172.16.14.0/24 + +# Flow settings: +# By default, the reserved memory (memcap) for flows is 32MB. This is the limit +# for flow allocation inside the engine. You can change this value to allow +# more memory usage for flows. +# The hash-size determine the size of the hash used to identify flows inside +# the engine, and by default the value is 65536. +# At the startup, the engine can preallocate a number of flows, to get a better +# performance. The number of flows preallocated is 10000 by default. +# emergency-recovery is the percentage of flows that the engine need to +# prune before unsetting the emergency state. The emergency state is activated +# when the memcap limit is reached, allowing to create new flows, but +# prunning them with the emergency timeouts (they are defined below). +# If the memcap is reached, the engine will try to prune flows +# with the default timeouts. If it doens't find a flow to prune, it will set +# the emergency bit and it will try again with more agressive timeouts. +# If that doesn't work, then it will try to kill the last time seen flows +# not in use. +# The memcap can be specified in kb, mb, gb. Just a number indicates it's +# in bytes. + +flow: + memcap: 128mb + hash-size: 65536 + prealloc: 10000 + emergency-recovery: 30 + #managers: 1 # default to one flow manager + #recyclers: 1 # default to one flow recycler thread + +# This option controls the use of vlan ids in the flow (and defrag) +# hashing. Normally this should be enabled, but in some (broken) +# setups where both sides of a flow are not tagged with the same vlan +# tag, we can ignore the vlan id's in the flow hashing. +vlan: + use-for-tracking: true + +# Specific timeouts for flows. Here you can specify the timeouts that the +# active flows will wait to transit from the current state to another, on each +# protocol. The value of "new" determine the seconds to wait after a hanshake or +# stream startup before the engine free the data of that flow it doesn't +# change the state to established (usually if we don't receive more packets +# of that flow). The value of "established" is the amount of +# seconds that the engine will wait to free the flow if it spend that amount +# without receiving new packets or closing the connection. "closed" is the +# amount of time to wait after a flow is closed (usually zero). "bypassed" +# timeout controls locally bypassed flows. For these flows we don't do any other +# tracking. If no packets have been seen after this timeout, the flow is discarded. +# +# There's an emergency mode that will become active under attack circumstances, +# making the engine to check flow status faster. This configuration variables +# use the prefix "emergency-" and work similar as the normal ones. +# Some timeouts doesn't apply to all the protocols, like "closed", for udp and +# icmp. + +flow-timeouts: + + default: + new: 30 + established: 300 + closed: 0 + bypassed: 100 + emergency-new: 10 + emergency-established: 100 + emergency-closed: 0 + emergency-bypassed: 50 + tcp: + new: 60 + established: 600 + closed: 60 + bypassed: 100 + emergency-new: 5 + emergency-established: 100 + emergency-closed: 10 + emergency-bypassed: 50 + udp: + new: 30 + established: 300 + bypassed: 100 + emergency-new: 10 + emergency-established: 100 + emergency-bypassed: 50 + icmp: + new: 30 + established: 300 + bypassed: 100 + emergency-new: 10 + emergency-established: 100 + emergency-bypassed: 50 + +# Stream engine settings. Here the TCP stream tracking and reassembly +# engine is configured. +# +# stream: +# memcap: 32mb # Can be specified in kb, mb, gb. Just a +# # number indicates it's in bytes. +# checksum-validation: yes # To validate the checksum of received +# # packet. If csum validation is specified as +# # "yes", then packet with invalid csum will not +# # be processed by the engine stream/app layer. +# # Warning: locally generated trafic can be +# # generated without checksum due to hardware offload +# # of checksum. You can control the handling of checksum +# # on a per-interface basis via the 'checksum-checks' +# # option +# prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread +# midstream: false # don't allow midstream session pickups +# async-oneside: false # don't enable async stream handling +# inline: no # stream inline mode +# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine +# max-synack-queued: 5 # Max different SYN/ACKs to queue +# bypass: no # Bypass packets when stream.depth is reached +# +# reassembly: +# memcap: 64mb # Can be specified in kb, mb, gb. Just a number +# # indicates it's in bytes. +# depth: 1mb # Can be specified in kb, mb, gb. Just a number +# # indicates it's in bytes. +# toserver-chunk-size: 2560 # inspect raw stream in chunks of at least +# # this size. Can be specified in kb, mb, +# # gb. Just a number indicates it's in bytes. +# toclient-chunk-size: 2560 # inspect raw stream in chunks of at least +# # this size. Can be specified in kb, mb, +# # gb. Just a number indicates it's in bytes. +# randomize-chunk-size: yes # Take a random value for chunk size around the specified value. +# # This lower the risk of some evasion technics but could lead +# # detection change between runs. It is set to 'yes' by default. +# randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is +# # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size +# # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same +# # calculation for toclient-chunk-size. +# # Default value of randomize-chunk-range is 10. +# +# raw: yes # 'Raw' reassembly enabled or disabled. +# # raw is for content inspection by detection +# # engine. +# +# segment-prealloc: 2048 # number of segments preallocated per thread +# +# check-overlap-different-data: true|false +# # check if a segment contains different data +# # than what we've already seen for that +# # position in the stream. +# # This is enabled automatically if inline mode +# # is used or when stream-event:reassembly_overlap_different_data; +# # is used in a rule. +# +stream: + memcap: 64mb + checksum-validation: yes # reject wrong csums + inline: auto # auto will use inline mode in IPS mode, yes or no set it statically + reassembly: + memcap: 256mb + depth: 1mb # reassemble 1mb into a stream + toserver-chunk-size: 2560 + toclient-chunk-size: 2560 + randomize-chunk-size: yes + #randomize-chunk-range: 10 + #raw: yes + #segment-prealloc: 2048 + #check-overlap-different-data: true + +# Host table: +# +# Host table is used by tagging and per host thresholding subsystems. +# +host: + hash-size: 4096 + prealloc: 1000 + memcap: 32mb + +# IP Pair table: +# +# Used by xbits 'ippair' tracking. +# +#ippair: +# hash-size: 4096 +# prealloc: 1000 +# memcap: 32mb + +# Decoder settings + +decoder: + # Teredo decoder is known to not be completely accurate + # it will sometimes detect non-teredo as teredo. + teredo: + enabled: true + + +## +## Performance tuning and profiling +## + +# The detection engine builds internal groups of signatures. The engine +# allow us to specify the profile to use for them, to manage memory on an +# efficient way keeping a good performance. For the profile keyword you +# can use the words "low", "medium", "high" or "custom". If you use custom +# make sure to define the values at "- custom-values" as your convenience. +# Usually you would prefer medium/high/low. +# +# "sgh mpm-context", indicates how the staging should allot mpm contexts for +# the signature groups. "single" indicates the use of a single context for +# all the signature group heads. "full" indicates a mpm-context for each +# group head. "auto" lets the engine decide the distribution of contexts +# based on the information the engine gathers on the patterns from each +# group head. +# +# The option inspection-recursion-limit is used to limit the recursive calls +# in the content inspection code. For certain payload-sig combinations, we +# might end up taking too much time in the content inspection code. +# If the argument specified is 0, the engine uses an internally defined +# default limit. On not specifying a value, we use no limits on the recursion. +detect: + profile: medium + custom-values: + toclient-groups: 3 + toserver-groups: 25 + sgh-mpm-context: auto + inspection-recursion-limit: 3000 + # If set to yes, the loading of signatures will be made after the capture + # is started. This will limit the downtime in IPS mode. + #delayed-detect: yes + + prefilter: + # default prefiltering setting. "mpm" only creates MPM/fast_pattern + # engines. "auto" also sets up prefilter engines for other keywords. + # Use --list-keywords=all to see which keywords support prefiltering. + default: mpm + + # the grouping values above control how many groups are created per + # direction. Port whitelisting forces that port to get it's own group. + # Very common ports will benefit, as well as ports with many expensive + # rules. + grouping: + #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080 + #udp-whitelist: 53, 135, 5060 + + profiling: + # Log the rules that made it past the prefilter stage, per packet + # default is off. The threshold setting determines how many rules + # must have made it past pre-filter for that rule to trigger the + # logging. + #inspect-logging-threshold: 200 + grouping: + dump-to-disk: false + include-rules: false # very verbose + include-mpm-stats: false + +# Select the multi pattern algorithm you want to run for scan/search the +# in the engine. +# +# The supported algorithms are: +# "ac" - Aho-Corasick, default implementation +# "ac-bs" - Aho-Corasick, reduced memory implementation +# "ac-cuda" - Aho-Corasick, CUDA implementation +# "ac-ks" - Aho-Corasick, "Ken Steele" variant +# "hs" - Hyperscan, available when built with Hyperscan support +# +# The default mpm-algo value of "auto" will use "hs" if Hyperscan is +# available, "ac" otherwise. +# +# The mpm you choose also decides the distribution of mpm contexts for +# signature groups, specified by the conf - "detect.sgh-mpm-context". +# Selecting "ac" as the mpm would require "detect.sgh-mpm-context" +# to be set to "single", because of ac's memory requirements, unless the +# ruleset is small enough to fit in one's memory, in which case one can +# use "full" with "ac". Rest of the mpms can be run in "full" mode. +# +# There is also a CUDA pattern matcher (only available if Suricata was +# compiled with --enable-cuda: b2g_cuda. Make sure to update your +# max-pending-packets setting above as well if you use b2g_cuda. + +mpm-algo: auto + +# Select the matching algorithm you want to use for single-pattern searches. +# +# Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only +# available if Suricata has been built with Hyperscan support). +# +# The default of "auto" will use "hs" if available, otherwise "bm". + +spm-algo: auto + +# Suricata is multi-threaded. Here the threading can be influenced. +threading: + set-cpu-affinity: yes + # Tune cpu affinity of threads. Each family of threads can be bound + # on specific CPUs. + # + # These 2 apply to the all runmodes: + # management-cpu-set is used for flow timeout handling, counters + # worker-cpu-set is used for 'worker' threads + # + # Additionally, for autofp these apply: + # receive-cpu-set is used for capture threads + # verdict-cpu-set is used for IPS verdict threads + # + {%- if salt['pillar.get']('sensor:suriprocs') %} + cpu-affinity: + - management-cpu-set: + cpu: [ all ] # include only these cpus in affinity settings + - receive-cpu-set: + cpu: [ all ] # include only these cpus in affinity settings + - worker-cpu-set: + cpu: [ "all" ] + mode: "exclusive" + # Use explicitely 3 threads and don't compute number by using + # detect-thread-ratio variable: + threads: {{ salt['pillar.get']('sensor:suriprocs') }} + prio: + default: "high" + {% endif %} + + {%- if salt['pillar.get']('sensor:suripins') %} + cpu-affinity: + - management-cpu-set: + cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] # include only these cpus in affinity settings + - receive-cpu-set: + cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] # include only these cpus in affinity settings + - worker-cpu-set: + cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] + mode: "exclusive" + # Use explicitely 3 threads and don't compute number by using + # detect-thread-ratio variable: + threads: {{ salt['pillar.get']('sensor:suripins')|length }} + prio: + default: "high" + {% endif %} + + #- verdict-cpu-set: + # cpu: [ 0 ] + # prio: + # default: "high" + # + # By default Suricata creates one "detect" thread per available CPU/CPU core. + # This setting allows controlling this behaviour. A ratio setting of 2 will + # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this + # will result in 4 detect threads. If values below 1 are used, less threads + # are created. So on a dual core CPU a setting of 0.5 results in 1 detect + # thread being created. Regardless of the setting at a minimum 1 detect + # thread will always be created. + # + detect-thread-ratio: 1.0 + +# Luajit has a strange memory requirement, it's 'states' need to be in the +# first 2G of the process' memory. +# +# 'luajit.states' is used to control how many states are preallocated. +# State use: per detect script: 1 per detect thread. Per output script: 1 per +# script. +luajit: + states: 128 + +# Profiling settings. Only effective if Suricata has been built with the +# the --enable-profiling configure flag. +# +profiling: + # Run profiling for every xth packet. The default is 1, which means we + # profile every packet. If set to 1000, one packet is profiled for every + # 1000 received. + #sample-rate: 1000 + + # rule profiling + rules: + + # Profiling can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: yes + filename: rule_perf.log + append: yes + + # Sort options: ticks, avgticks, checks, matches, maxticks + # If commented out all the sort options will be used. + #sort: avgticks + + # Limit the number of sids for which stats are shown at exit (per sort). + limit: 10 + + # output to json + json: yes + + # per keyword profiling + keywords: + enabled: yes + filename: keyword_perf.log + append: yes + + # per rulegroup profiling + rulegroups: + enabled: yes + filename: rule_group_perf.log + append: yes + + # packet profiling + packets: + + # Profiling can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: yes + filename: packet_stats.log + append: yes + + # per packet csv output + csv: + + # Output can be disabled here, but it will still have a + # performance impact if compiled in. + enabled: no + filename: packet_stats.csv + + # profiling of locking. Only available when Suricata was built with + # --enable-profiling-locks. + locks: + enabled: no + filename: lock_stats.log + append: yes + + pcap-log: + enabled: no + filename: pcaplog_stats.log + append: yes + +## +## Netfilter integration +## + +# When running in NFQ inline mode, it is possible to use a simulated +# non-terminal NFQUEUE verdict. +# This permit to do send all needed packet to suricata via this a rule: +# iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE +# And below, you can have your standard filtering ruleset. To activate +# this mode, you need to set mode to 'repeat' +# If you want packet to be sent to another queue after an ACCEPT decision +# set mode to 'route' and set next-queue value. +# On linux >= 3.1, you can set batchcount to a value > 1 to improve performance +# by processing several packets before sending a verdict (worker runmode only). +# On linux >= 3.6, you can set the fail-open option to yes to have the kernel +# accept the packet if suricata is not able to keep pace. +# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is +# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask +# on packet of a flow that need to be bypassed. The Nefilter ruleset has to +# directly accept all packets of a flow once a packet has been marked. +nfq: +# mode: accept +# repeat-mark: 1 +# repeat-mask: 1 +# bypass-mark: 1 +# bypass-mask: 1 +# route-queue: 2 +# batchcount: 20 +# fail-open: yes + +#nflog support +nflog: + # netlink multicast group + # (the same as the iptables --nflog-group param) + # Group 0 is used by the kernel, so you can't use it + - group: 2 + # netlink buffer size + buffer-size: 18432 + # put default value here + - group: default + # set number of packet to queue inside kernel + qthreshold: 1 + # set the delay before flushing packet in the queue inside kernel + qtimeout: 100 + # netlink max buffer size + max-size: 20000 + +## +## Advanced Capture Options +## + +# general settings affecting packet capture +capture: + # disable NIC offloading. It's restored when Suricata exists. + # Enabled by default + #disable-offloading: false + # + # disable checksum validation. Same as setting '-k none' on the + # commandline + #checksum-validation: none + +# Netmap support +# +# Netmap operates with NIC directly in driver, so you need FreeBSD wich have +# built-in netmap support or compile and install netmap module and appropriate +# NIC driver on your Linux system. +# To reach maximum throughput disable all receive-, segmentation-, +# checksum- offloadings on NIC. +# Disabling Tx checksum offloading is *required* for connecting OS endpoint +# with NIC endpoint. +# You can find more information at https://github.com/luigirizzo/netmap +# +netmap: + # To specify OS endpoint add plus sign at the end (e.g. "eth0+") + - interface: eth2 + # Number of receive threads. "auto" uses number of RSS queues on interface. + #threads: auto + # You can use the following variables to activate netmap tap or IPS mode. + # If copy-mode is set to ips or tap, the traffic coming to the current + # interface will be copied to the copy-iface interface. If 'tap' is set, the + # copy is complete. If 'ips' is set, the packet matching a 'drop' action + # will not be copied. + # To specify the OS as the copy-iface (so the OS can route packets, or forward + # to a service running on the same machine) add a plus sign at the end + # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0 + # for return packets. Hardware checksumming must be *off* on the interface if + # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD + # or 'ethtool -K eth0 tx off rx off' for Linux). + #copy-mode: tap + #copy-iface: eth3 + # Set to yes to disable promiscuous mode + # disable-promisc: no + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # BPF filter to apply to this interface. The pcap filter syntax apply here. + #bpf-filter: port 80 or udp + #- interface: eth3 + #threads: auto + #copy-mode: tap + #copy-iface: eth2 + # Put default values here + - interface: default + +# PF_RING configuration. for use with native PF_RING support +# for more info see http://www.ntop.org/products/pf_ring/ +pfring: + - interface: eth0 + # Number of receive threads (>1 will enable experimental flow pinned + # runmode) + threads: 1 + + # Default clusterid. PF_RING will load balance packets based on flow. + # All threads/processes that will participate need to have the same + # clusterid. + cluster-id: 99 + + # Default PF_RING cluster type. PF_RING can load balance per flow. + # Possible values are cluster_flow or cluster_round_robin. + cluster-type: cluster_flow + # bpf filter for this interface + #bpf-filter: tcp + # Choose checksum verification mode for the interface. At the moment + # of the capture, some packets may be with an invalid checksum due to + # offloading to the network card of the checksum computation. + # Possible values are: + # - rxonly: only compute checksum for packets received by network card. + # - yes: checksum validation is forced + # - no: checksum validation is disabled + # - auto: suricata uses a statistical approach to detect when + # checksum off-loading is used. (default) + # Warning: 'checksum-validation' must be set to yes to have any validation + #checksum-checks: auto + # Second interface + #- interface: eth1 + # threads: 3 + # cluster-id: 93 + # cluster-type: cluster_flow + # Put default values here + - interface: default + #threads: 2 + +# For FreeBSD ipfw(8) divert(4) support. +# Please make sure you have ipfw_load="YES" and ipdivert_load="YES" +# in /etc/loader.conf or kldload'ing the appropriate kernel modules. +# Additionally, you need to have an ipfw rule for the engine to see +# the packets from ipfw. For Example: +# +# ipfw add 100 divert 8000 ip from any to any +# +# The 8000 above should be the same number you passed on the command +# line, i.e. -d 8000 +# +ipfw: + + # Reinject packets at the specified ipfw rule number. This config + # option is the ipfw rule number AT WHICH rule processing continues + # in the ipfw processing system after the engine has finished + # inspecting the packet for acceptance. If no rule number is specified, + # accepted packets are reinjected at the divert rule which they entered + # and IPFW rule processing continues. No check is done to verify + # this will rule makes sense so care must be taken to avoid loops in ipfw. + # + ## The following example tells the engine to reinject packets + # back into the ipfw firewall AT rule number 5500: + # + # ipfw-reinjection-rule-number: 5500 + + +napatech: + # The Host Buffer Allowance for all streams + # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back) + # This may be enabled when sharing streams with another application. + # Otherwise, it should be turned off. + hba: -1 + + # use_all_streams set to "yes" will query the Napatech service for all configured + # streams and listen on all of them. When set to "no" the streams config array + # will be used. + use-all-streams: yes + + # The streams to listen on. This can be either: + # a list of individual streams (e.g. streams: [0,1,2,3]) + # or + # a range of streams (e.g. streams: ["0-3"]) + streams: ["0-3"] + +# Tilera mpipe configuration. for use on Tilera TILE-Gx. +mpipe: + + # Load balancing modes: "static", "dynamic", "sticky", or "round-robin". + load-balance: dynamic + + # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536 + iqueue-packets: 2048 + + # List of interfaces we will listen on. + inputs: + - interface: xgbe2 + - interface: xgbe3 + - interface: xgbe4 + + + # Relative weight of memory for packets of each mPipe buffer size. + stack: + size128: 0 + size256: 9 + size512: 0 + size1024: 0 + size1664: 7 + size4096: 0 + size10386: 0 + size16384: 0 + +## +## Hardware accelaration +## + +# Cuda configuration. +cuda: + # The "mpm" profile. On not specifying any of these parameters, the engine's + # internal default values are used, which are same as the ones specified in + # in the default conf file. + mpm: + # The minimum length required to buffer data to the gpu. + # Anything below this is MPM'ed on the CPU. + # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. + # A value of 0 indicates there's no limit. + data-buffer-size-min-limit: 0 + # The maximum length for data that we would buffer to the gpu. + # Anything over this is MPM'ed on the CPU. + # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. + data-buffer-size-max-limit: 1500 + # The ring buffer size used by the CudaBuffer API to buffer data. + cudabuffer-buffer-size: 500mb + # The max chunk size that can be sent to the gpu in a single go. + gpu-transfer-size: 50mb + # The timeout limit for batching of packets in microseconds. + batching-timeout: 2000 + # The device to use for the mpm. Currently we don't support load balancing + # on multiple gpus. In case you have multiple devices on your system, you + # can specify the device to use, using this conf. By default we hold 0, to + # specify the first device cuda sees. To find out device-id associated with + # the card(s) on the system run "suricata --list-cuda-cards". + device-id: 0 + # No of Cuda streams used for asynchronous processing. All values > 0 are valid. + # For this option you need a device with Compute Capability > 1.0. + cuda-streams: 2 + +## +## Include other configs +## + +# Includes. Files included here will be handled as if they were +# inlined in this configuration file. +#include: include1.yaml +#include: include2.yaml From 84e3ce508e72ab106c7b7b69f846ae5eabb86947 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 26 May 2020 15:19:37 -0400 Subject: [PATCH 35/76] [fix] Only check for proxy-user & proxy-pass when needed --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index fed162596..4b47d65d9 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -989,7 +989,7 @@ parse_options() { export http_proxy="${proxy_protocol}://${proxy_user}:${proxy_password}@${proxy_addr}" - elif [[ (-z $2 || -z $3) && (-n $2 || -n $3) || ($2 != --proxy-user=* || $3 != --proxy-pass=*) ]]; then + elif [[ (-z $2 || -z $3) && (-n $2 || -n $3) || ( -n $2 && -n $3 && ($2 != --proxy-user=* || $3 != --proxy-pass=*) ) ]]; then echo "Invalid options passed for proxy. Order is --proxy-user= --proxy-pass=" echo "Ignoring proxy" return From 9e1ed6983f6d0dd0000fd2ea3efa4e2a1cdb8df6 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 26 May 2020 15:25:30 -0400 Subject: [PATCH 36/76] [fix] Parse options at beginning of setup --- setup/so-functions | 10 +++------- setup/so-setup | 11 ++++++++--- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 4b47d65d9..ae99d190e 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -960,7 +960,6 @@ parse_options() { proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}') proxy_url="http://$proxy" TURBO="$proxy_url" - use_turbo_proxy "$TURBO" else echo "turbo is not supported on this install type" >> $setup_log 2>&1 fi @@ -1501,16 +1500,13 @@ update_packages() { } use_turbo_proxy() { - local proxy_url=$1 - #TODO: add options for username + pass - if [[ $OS == 'centos' ]]; then - printf '%s\n' "proxy=${proxy_url}:3142" >> /etc/yum.conf + printf '%s\n' "proxy=${TURBO}:3142" >> /etc/yum.conf else printf '%s\n'\ "Acquire {"\ - " HTTP::proxy \"${proxy_url}:3142\";"\ - " HTTPS::proxy \"${proxy_url}:3142\";"\ + " HTTP::proxy \"${TURBO}:3142\";"\ + " HTTPS::proxy \"${TURBO}:3142\";"\ "}" > /etc/apt/apt.conf.d/proxy.conf fi } diff --git a/setup/so-setup b/setup/so-setup index 4b0a4d6f6..6ce0a64ca 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -40,6 +40,12 @@ export PATH=$PATH:../salt/common/tools/sbin date -u > $setup_log 2>&1 got_root + +if [[ $# -gt 1 ]]; then + set -- "${@:2}" + parse_options "$@" >> $setup_log 2>&1 +fi + detect_os if [ "$OS" == ubuntu ]; then @@ -241,9 +247,8 @@ fi whiptail_make_changes -if [[ $# -gt 1 ]]; then - set -- "${@:2}" - parse_options "$@" >> $setup_log 2>&1 +if [[ -n "$TURBO" ]]; then + use_turbo_proxy fi if [[ "$setup_type" == 'iso' ]]; then From 001f7c6694946c478b5ab6fffc82ab2ad9709a15 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 26 May 2020 16:01:44 -0400 Subject: [PATCH 37/76] [fix] export TURBO var so it can be used from so-setup --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index ae99d190e..bf3f4d856 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -959,7 +959,7 @@ parse_options() { local proxy proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}') proxy_url="http://$proxy" - TURBO="$proxy_url" + export TURBO="$proxy_url" else echo "turbo is not supported on this install type" >> $setup_log 2>&1 fi From 8e1bd32f4d5e9ecdd1b3bb8f3bdf334f1ad08a29 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 26 May 2020 16:11:31 -0400 Subject: [PATCH 38/76] Improve automated installs and remove sleep during progress updates --- setup/so-functions | 4 +--- setup/so-setup | 28 +++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index bf6db26be..f8d41cfc7 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -968,7 +968,7 @@ parse_options() { fi ;; --proxy=*) - echo "Unimplimented" + echo "Unimplemented" return if [[ $2 != --proxy-user=* ]] || [[ $3 != --proxy-pass=* ]]; then @@ -1268,8 +1268,6 @@ set_progress_str() { '----'\ "$percentage% - ${progress_bar_text^^}"\ "----" >> "$setup_log" 2>&1 - - sleep 5 } sensor_pillar() { diff --git a/setup/so-setup b/setup/so-setup index 69c5763f9..03889606d 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -25,11 +25,33 @@ setup_type=$1 export setup_type automation=$2 + +automated=no + +function progress() { + if [ $automated == no ]; then + whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0 + fi +} + if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then echo "Preselecting variable values based on automated setup: $automation" - exit 1 source automation/$automation - sleep 30 # Re-implement with network availability probe + automated=yes + + attempt=1 + attempts=60 + ip a | grep "$MNIC:" | grep "state UP" + while [ $? -ne 0 ]; do + if [ $attempt -gt $attempts ]; then + echo "Network unavailable - setup cannot continue" + exit 1 + fi + echo "Waiting for network to come up (attempt $attempt of $attempts)" + attempt=$((attempt + 1)) + sleep 10; + ip a | grep "$MNIC:" | grep "state UP" + done fi case "$setup_type" in @@ -498,7 +520,7 @@ fi set_progress_str 95 'Verifying setup' salt-call -l info state.highstate >> $setup_log 2>&1 -} | whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0 +} | progress success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}') if [[ "$success" = 0 ]]; then From ec0ec79470fe0bc822ad3e189aba63c79ae492ec Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 26 May 2020 16:23:06 -0400 Subject: [PATCH 39/76] [fix] Test install type after it has been set --- setup/so-functions | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index bf3f4d856..1f5df9678 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -955,14 +955,10 @@ node_pillar() { parse_options() { case "$1" in --turbo=*) - if [[ $is_master || $is_helix ]]; then - local proxy - proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}') - proxy_url="http://$proxy" - export TURBO="$proxy_url" - else - echo "turbo is not supported on this install type" >> $setup_log 2>&1 - fi + local proxy + proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}') + proxy_url="http://$proxy" + TURBO="$proxy_url" ;; --proxy=*) local proxy @@ -971,7 +967,7 @@ parse_options() { local proxy_protocol proxy_protocol=$(echo "$proxy" |tr -d '"' | awk 'match($0, /http|https/) { print substr($0, RSTART, RLENGTH) }') - if [[ ! $proxy_protocol =~ ^(http|https) ]]; then + if [[ ! $proxy_protocol =~ ^(http|https)$ ]]; then echo "Invalid proxy protocol" echo "Ignoring proxy" return @@ -1500,6 +1496,11 @@ update_packages() { } use_turbo_proxy() { + if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then + echo "turbo is not supported on this install type" >> $setup_log 2>&1 + return + fi + if [[ $OS == 'centos' ]]; then printf '%s\n' "proxy=${TURBO}:3142" >> /etc/yum.conf else From 00681649bd7814144993071450cc89f231bfe7a1 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 26 May 2020 16:41:41 -0400 Subject: [PATCH 40/76] [fix] Don't run tr on a string again --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 4eabd8657..20fa67dfd 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -967,7 +967,7 @@ parse_options() { proxy=$(echo "$1" | tr -d '"' | awk -F'--proxy=' '{print $2}') local proxy_protocol - proxy_protocol=$(echo "$proxy" |tr -d '"' | awk 'match($0, /http|https/) { print substr($0, RSTART, RLENGTH) }') + proxy_protocol=$(echo "$proxy" | awk 'match($0, /http|https/) { print substr($0, RSTART, RLENGTH) }') if [[ ! $proxy_protocol =~ ^(http|https)$ ]]; then echo "Invalid proxy protocol" From 377d8e6336c2f30e6f08831a8fe4c8036eeb1d4a Mon Sep 17 00:00:00 2001 From: William Wernert Date: Tue, 26 May 2020 16:54:51 -0400 Subject: [PATCH 41/76] [fix] Don't run tr on a string again --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 20fa67dfd..612b0147b 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -982,7 +982,7 @@ parse_options() { proxy_password=$(echo "$3" | tr -d '"' | awk -F'--proxy-pass=' '{print $2}') local proxy_addr - proxy_addr=$(echo "$proxy" | tr -d '"' | awk -F'http\:\/\/|https\:\/\/' '{print $2}') + proxy_addr=$(echo "$proxy" | awk -F'http\:\/\/|https\:\/\/' '{print $2}') export http_proxy="${proxy_protocol}://${proxy_user}:${proxy_password}@${proxy_addr}" From 8e95115a7c6d11965c5df27493dbc94db92e6346 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 26 May 2020 17:43:32 -0400 Subject: [PATCH 42/76] Update Suricata.yml --- salt/suricata/files/suricata.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml index ebebe0138..ef06c7f97 100644 --- a/salt/suricata/files/suricata.yaml +++ b/salt/suricata/files/suricata.yaml @@ -319,7 +319,7 @@ outputs: append: yes #extended: yes # enable this for extended logging information #custom: yes # enabled the custom logging format (defined by customformat) - #customformat: "%{%D-%H:%M:%S}t.%z %{X-Forwarded-For}i %H %m %h %u %s %B %a:%p -> %A:%P" + #customformat: "" #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' # a line based log of TLS handshake parameters (no alerts) @@ -329,7 +329,7 @@ outputs: append: yes #extended: yes # Log extended information like fingerprint #custom: yes # enabled the custom logging format (defined by customformat) - #customformat: "%{%D-%H:%M:%S}t.%z %a:%p -> %A:%P %v %n %d %D" + #customformat: "" #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' # output TLS transaction where the session is resumed using a # session id From 1259338e6c2eeb518e4f226a92f94f8cfc2c741c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 26 May 2020 17:44:19 -0400 Subject: [PATCH 43/76] Remvoe old Suricata.yml --- salt/suricata/files/suricataDEPRICATED.yaml | 1726 ------------------- 1 file changed, 1726 deletions(-) delete mode 100644 salt/suricata/files/suricataDEPRICATED.yaml diff --git a/salt/suricata/files/suricataDEPRICATED.yaml b/salt/suricata/files/suricataDEPRICATED.yaml deleted file mode 100644 index 5a0121b63..000000000 --- a/salt/suricata/files/suricataDEPRICATED.yaml +++ /dev/null @@ -1,1726 +0,0 @@ -%YAML 1.1 ---- -{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %} -{%- if grains['role'] == 'so-eval' %} -{%- set MTU = 1500 %} -{%- elif grains['role'] == 'so-helix' %} -{%- set MTU = 9000 %} -{%- else %} -{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %} -{%- endif %} -{%- if salt['pillar.get']('sensor:homenet') %} - {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %} -{%- else %} - {%- set homenet = salt['pillar.get']('static:hnmaster', '') %} -{%- endif %} -# Suricata configuration file. In addition to the comments describing all -# options in this file, full documentation can be found at: -# https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml - -## -## Step 1: inform Suricata about your network -## - -vars: - # more specifc is better for alert accuracy and performance - address-groups: - HOME_NET: "[{{ homenet }}]" - #HOME_NET: "[192.168.0.0/16]" - #HOME_NET: "[10.0.0.0/8]" - #HOME_NET: "[172.16.0.0/12]" - #HOME_NET: "any" - - EXTERNAL_NET: "!$HOME_NET" - #EXTERNAL_NET: "any" - - HTTP_SERVERS: "$HOME_NET" - SMTP_SERVERS: "$HOME_NET" - SQL_SERVERS: "$HOME_NET" - DNS_SERVERS: "$HOME_NET" - TELNET_SERVERS: "$HOME_NET" - AIM_SERVERS: "$EXTERNAL_NET" - DNP3_SERVER: "$HOME_NET" - DNP3_CLIENT: "$HOME_NET" - MODBUS_CLIENT: "$HOME_NET" - MODBUS_SERVER: "$HOME_NET" - ENIP_CLIENT: "$HOME_NET" - ENIP_SERVER: "$HOME_NET" - - port-groups: - HTTP_PORTS: "80" - SHELLCODE_PORTS: "!80" - ORACLE_PORTS: 1521 - SSH_PORTS: 22 - DNP3_PORTS: 20000 - MODBUS_PORTS: 502 - FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]" - FTP_PORTS: 21 - - -## -## Step 2: select the rules to enable or disable -## - -default-rule-path: /etc/suricata/rules -rule-files: - - all.rules - -classification-file: /etc/suricata/classification.config -reference-config-file: /etc/suricata/reference.config -# threshold-file: /usr/local/etc/suricata/threshold.config - - -## -## Step 3: select outputs to enable -## - -# The default logging directory. Any log or output file will be -# placed here if its not specified with a full path name. This can be -# overridden with the -l command line parameter. -default-log-dir: /var/log/suricata/ - -# global stats configuration -stats: - enabled: yes - # The interval field (in seconds) controls at what interval - # the loggers are invoked. - interval: 30 - -# Configure the type of alert (and other) logging you would like. -outputs: - # a line based alerts log similar to Snort's fast.log - - fast: - enabled: no - filename: fast.log - append: yes - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' - - # Extensible Event Format (nicknamed EVE) event log in JSON format - - eve-log: - enabled: yes - filetype: regular #regular|syslog|unix_dgram|unix_stream|redis - filename: eve.json - rotate-interval: day - community-id: true - community-id-seed: 0 - #prefix: "@cee: " # prefix to prepend to each log entry - # the following are valid when type: syslog above - #identity: "suricata" - #facility: local5 - #level: Info ## possible levels: Emergency, Alert, Critical, - ## Error, Warning, Notice, Info, Debug - #redis: - # server: 127.0.0.1 - # port: 6379 - # async: true ## if redis replies are read asynchronously - # mode: list ## possible values: list|lpush (default), rpush, channel|publish - # ## lpush and rpush are using a Redis list. "list" is an alias for lpush - # ## publish is using a Redis channel. "channel" is an alias for publish - # key: suricata ## key or channel to use (default to suricata) - # Redis pipelining set up. This will enable to only do a query every - # 'batch-size' events. This should lower the latency induced by network - # connection at the cost of some memory. There is no flushing implemented - # so this setting as to be reserved to high traffic suricata. - # pipelining: - # enabled: yes ## set enable to yes to enable query pipelining - # batch-size: 10 ## number of entry to keep in buffer - types: - - alert: - # payload: yes # enable dumping payload in Base64 - # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log - # payload-printable: yes # enable dumping payload in printable (lossy) format - # packet: yes # enable dumping of packet (without stream segments) - # http-body: yes # enable dumping of http body in Base64 - # http-body-printable: yes # enable dumping of http body in printable format - metadata: - app-layer: false - flow: false - rule: - metadata: true - raw: true - - # Enable the logging of tagged packets for rules using the - # "tag" keyword. - tagged-packets: no - - # HTTP X-Forwarded-For support by adding an extra field or overwriting - # the source or destination IP address (depending on flow direction) - # with the one reported in the X-Forwarded-For HTTP header. This is - # helpful when reviewing alerts for traffic that is being reverse - # or forward proxied. - xff: - enabled: no - # Two operation modes are available, "extra-data" and "overwrite". - mode: extra-data - # Two proxy deployments are supported, "reverse" and "forward". In - # a "reverse" deployment the IP address used is the last one, in a - # "forward" deployment the first IP address is used. - deployment: reverse - # Header name where the actual IP address will be reported, if more - # than one IP address is present, the last IP address will be the - # one taken into consideration. - header: X-Forwarded-For - #- http: - # extended: no # enable this for extended logging information - # custom allows additional http fields to be included in eve-log - # the example below adds three additional fields when uncommented - #custom: [Accept-Encoding, Accept-Language, Authorization] - #- dns: - # control logging of queries and answers - # default yes, no to disable - # query: no # enable logging of DNS queries - # answer: no # enable logging of DNS answers - # control which RR types are logged - # all enabled if custom not specified - #custom: [a, aaaa, cname, mx, ns, ptr, txt] - #- tls: - # extended: no # enable this for extended logging information - # output TLS transaction where the session is resumed using a - # session id - #session-resumption: no - # custom allows to control which tls fields that are included - # in eve-log - #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain] - #- files: - # force-magic: no # force logging magic on all logged files - # force logging of checksums, available hash functions are md5, - # sha1 and sha256 - #force-hash: [md5] - #- drop: - # alerts: yes # log alerts that caused drops - # flows: all # start or all: 'start' logs only a single drop - # # per flow direction. All logs each dropped pkt. - #- smtp: - #extended: yes # enable this for extended logging information - # this includes: bcc, message-id, subject, x_mailer, user-agent - # custom fields logging from the list: - # reply-to, bcc, message-id, subject, x-mailer, user-agent, received, - # x-originating-ip, in-reply-to, references, importance, priority, - # sensitivity, organization, content-md5, date - #custom: [received, x-mailer, x-originating-ip, relays, reply-to, bcc] - # output md5 of fields: body, subject - # for the body you need to set app-layer.protocols.smtp.mime.body-md5 - # to yes - #md5: [body, subject] - - #- dnp3 - #- nfs - #- ssh: - #- stats: - # totals: yes # stats for all threads merged together - # threads: no # per thread stats - # deltas: no # include delta values - # bi-directional flows - #- flow: - # uni-directional flows - #- netflow - # Vars log flowbits and other packet and flow vars - #- vars - - # alert output for use with Barnyard2 - - unified2-alert: - enabled: no - filename: unified2.alert - - # File size limit. Can be specified in kb, mb, gb. Just a number - # is parsed as bytes. - #limit: 32mb - - # By default unified2 log files have the file creation time (in - # unix epoch format) appended to the filename. Set this to yes to - # disable this behaviour. - #nostamp: no - - # Sensor ID field of unified2 alerts. - #sensor-id: 0 - - # Include payload of packets related to alerts. Defaults to true, set to - # false if payload is not required. - #payload: yes - - # HTTP X-Forwarded-For support by adding the unified2 extra header or - # overwriting the source or destination IP address (depending on flow - # direction) with the one reported in the X-Forwarded-For HTTP header. - # This is helpful when reviewing alerts for traffic that is being reverse - # or forward proxied. - xff: - enabled: no - # Two operation modes are available, "extra-data" and "overwrite". Note - # that in the "overwrite" mode, if the reported IP address in the HTTP - # X-Forwarded-For header is of a different version of the packet - # received, it will fall-back to "extra-data" mode. - mode: extra-data - # Two proxy deployments are supported, "reverse" and "forward". In - # a "reverse" deployment the IP address used is the last one, in a - # "forward" deployment the first IP address is used. - deployment: reverse - # Header name where the actual IP address will be reported, if more - # than one IP address is present, the last IP address will be the - # one taken into consideration. - header: X-Forwarded-For - - # a line based log of HTTP requests (no alerts) - - http-log: - enabled: no - filename: http.log - append: yes - #extended: yes # enable this for extended logging information - #custom: yes # enabled the custom logging format (defined by customformat) - - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' - - # a line based log of TLS handshake parameters (no alerts) - - tls-log: - enabled: no # Log TLS connections. - filename: tls.log # File to store TLS logs. - append: yes - #extended: yes # Log extended information like fingerprint - #custom: yes # enabled the custom logging format (defined by customformat) - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' - # output TLS transaction where the session is resumed using a - # session id - #session-resumption: no - - # output module to store certificates chain to disk - - tls-store: - enabled: no - #certs-log-dir: certs # directory to store the certificates files - - # a line based log of DNS requests and/or replies (no alerts) - - dns-log: - enabled: no - filename: dns.log - append: yes - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' - - # Packet log... log packets in pcap format. 3 modes of operation: "normal" - # "multi" and "sguil". - # - # In normal mode a pcap file "filename" is created in the default-log-dir, - # or are as specified by "dir". - # In multi mode, a file is created per thread. This will perform much - # better, but will create multiple files where 'normal' would create one. - # In multi mode the filename takes a few special variables: - # - %n -- thread number - # - %i -- thread id - # - %t -- timestamp (secs or secs.usecs based on 'ts-format' - # E.g. filename: pcap.%n.%t - # - # Note that it's possible to use directories, but the directories are not - # created by Suricata. E.g. filename: pcaps/%n/log.%s will log into the - # per thread directory. - # - # Also note that the limit and max-files settings are enforced per thread. - # So the size limit when using 8 threads with 1000mb files and 2000 files - # is: 8*1000*2000 ~ 16TiB. - # - # In Sguil mode "dir" indicates the base directory. In this base dir the - # pcaps are created in th directory structure Sguil expects: - # - # $sguil-base-dir/YYYY-MM-DD/$filename. - # - # By default all packets are logged except: - # - TCP streams beyond stream.reassembly.depth - # - encrypted streams after the key exchange - # - - pcap-log: - enabled: no - filename: log.pcap - - # File size limit. Can be specified in kb, mb, gb. Just a number - # is parsed as bytes. - limit: 1000mb - - # If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit" - max-files: 2000 - - mode: normal # normal, multi or sguil. - - # Directory to place pcap files. If not provided the default log - # directory will be used. Required for "sguil" mode. - #dir: /nsm_data/ - - #ts-format: usec # sec or usec second format (default) is filename.sec usec is filename.sec.usec - use-stream-depth: no #If set to "yes" packets seen after reaching stream inspection depth are ignored. "no" logs all packets - honor-pass-rules: no # If set to "yes", flows in which a pass rule matched will stopped being logged. - - # a full alerts log containing much information for signature writers - # or for investigating suspected false positives. - - alert-debug: - enabled: no - filename: alert-debug.log - append: yes - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' - - # alert output to prelude (http://www.prelude-technologies.com/) only - # available if Suricata has been compiled with --enable-prelude - - alert-prelude: - enabled: no - profile: suricata - log-packet-content: no - log-packet-header: yes - - # Stats.log contains data from various counters of the suricata engine. - - stats: - enabled: yes - filename: stats.log - append: yes # append to file (yes) or overwrite it (no) - totals: yes # stats for all threads merged together - threads: no # per thread stats - #null-values: yes # print counters that have value 0 - - # a line based alerts log similar to fast.log into syslog - - syslog: - enabled: no - # reported identity to syslog. If ommited the program name (usually - # suricata) will be used. - #identity: "suricata" - facility: local5 - #level: Info ## possible levels: Emergency, Alert, Critical, - ## Error, Warning, Notice, Info, Debug - - # a line based information for dropped packets in IPS mode - - drop: - enabled: no - filename: drop.log - append: yes - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' - - # output module to store extracted files to disk - # - # The files are stored to the log-dir in a format "file." where is - # an incrementing number starting at 1. For each file "file." a meta - # file "file..meta" is created. - # - # File extraction depends on a lot of things to be fully done: - # - file-store stream-depth. For optimal results, set this to 0 (unlimited) - # - http request / response body sizes. Again set to 0 for optimal results. - # - rules that contain the "filestore" keyword. - - file-store: - enabled: no # set to yes to enable - log-dir: files # directory to store the files - force-magic: no # force logging magic on all stored files - # force logging of checksums, available hash functions are md5, - # sha1 and sha256 - #force-hash: [md5] - force-filestore: no # force storing of all files - # override global stream-depth for sessions in which we want to - # perform file extraction. Set to 0 for unlimited. - #stream-depth: 0 - #waldo: file.waldo # waldo file to store the file_id across runs - # uncomment to disable meta file writing - #write-meta: no - # uncomment the following variable to define how many files can - # remain open for filestore by Suricata. Default value is 0 which - # means files get closed after each write - #max-open-files: 1000 - - # output module to log files tracked in a easily parsable json format - - file-log: - enabled: no - filename: files-json.log - append: yes - #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram' - - force-magic: no # force logging magic on all logged files - # force logging of checksums, available hash functions are md5, - # sha1 and sha256 - #force-hash: [md5] - - # Log TCP data after stream normalization - # 2 types: file or dir. File logs into a single logfile. Dir creates - # 2 files per TCP session and stores the raw TCP data into them. - # Using 'both' will enable both file and dir modes. - # - # Note: limited by stream.depth - - tcp-data: - enabled: no - type: file - filename: tcp-data.log - - # Log HTTP body data after normalization, dechunking and unzipping. - # 2 types: file or dir. File logs into a single logfile. Dir creates - # 2 files per HTTP session and stores the normalized data into them. - # Using 'both' will enable both file and dir modes. - # - # Note: limited by the body limit settings - - http-body-data: - enabled: no - type: file - filename: http-data.log - - # Lua Output Support - execute lua script to generate alert and event - # output. - # Documented at: - # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Lua_Output - - lua: - enabled: no - #scripts-dir: /etc/suricata/lua-output/ - scripts: - # - script1.lua - -# Logging configuration. This is not about logging IDS alerts/events, but -# output about what Suricata is doing, like startup messages, errors, etc. -logging: - # The default log level, can be overridden in an output section. - # Note that debug level logging will only be emitted if Suricata was - # compiled with the --enable-debug configure option. - # - # This value is overriden by the SC_LOG_LEVEL env var. - default-log-level: notice - - # The default output format. Optional parameter, should default to - # something reasonable if not provided. Can be overriden in an - # output section. You can leave this out to get the default. - # - # This value is overriden by the SC_LOG_FORMAT env var. - #default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- " - - # A regex to filter output. Can be overridden in an output section. - # Defaults to empty (no filter). - # - # This value is overriden by the SC_LOG_OP_FILTER env var. - default-output-filter: - - # Define your logging outputs. If none are defined, or they are all - # disabled you will get the default - console output. - outputs: - - console: - enabled: yes - # type: json - - file: - enabled: yes - level: info - filename: /var/log/suricata/suricata.log - # type: json - - syslog: - enabled: no - -af-packet: - - interface: {{ interface }} - # Number of receive threads. "auto" uses the number of cores - #threads: auto - # Default clusterid. AF_PACKET will load balance packets based on flow. - cluster-id: 59 - # Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash. - # This is only supported for Linux kernel > 3.1 - # possible value are: - # * cluster_round_robin: round robin load balancing - # * cluster_flow: all packets of a given flow are send to the same socket - # * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket - # * cluster_qm: all packets linked by network card to a RSS queue are sent to the same - # socket. Requires at least Linux 3.14. - # * cluster_random: packets are sent randomly to sockets but with an equipartition. - # Requires at least Linux 3.14. - # * cluster_rollover: kernel rotates between sockets filling each socket before moving - # to the next. Requires at least Linux 3.10. - # Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system - # with capture card using RSS (require cpu affinity tuning and system irq tuning) - cluster-type: cluster_flow - # In some fragmentation case, the hash can not be computed. If "defrag" is set - # to yes, the kernel will do the needed defragmentation before sending the packets. - defrag: yes - # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is - # full then kernel will send the packet on the next socket with room available. This option - # can minimize packet drop and increase the treated bandwidth on single intensive flow. - #rollover: yes - # To use the ring feature of AF_PACKET, set 'use-mmap' to yes - #use-mmap: yes - # Lock memory map to avoid it goes to swap. Be careful that over suscribing could lock - # your system - #mmap-locked: yes - # Use tpacket_v3 capture mode, only active if use-mmap is true - # Don't use it in IPS or TAP mode as it causes severe latency - #tpacket-v3: yes - # Ring size will be computed with respect to max_pending_packets and number - # of threads. You can set manually the ring size in number of packets by setting - # the following value. If you are using flow cluster-type and have really network - # intensive single-flow you could want to set the ring-size independently of the number - # of threads: - #ring-size: 2048 - # Block size is used by tpacket_v3 only. It should set to a value high enough to contain - # a decent number of packets. Size is in bytes so please consider your MTU. It should be - # a power of 2 and it must be multiple of page size (usually 4096). - #block-size: 32768 - # tpacket_v3 block timeout: an open block is passed to userspace if it is not - # filled after block-timeout milliseconds. - #block-timeout: 10 - # On busy system, this could help to set it to yes to recover from a packet drop - # phase. This will result in some packets (at max a ring flush) being non treated. - #use-emergency-flush: yes - # recv buffer size, increase value could improve performance - # buffer-size: 32768 - # Set to yes to disable promiscuous mode - # disable-promisc: no - # Choose checksum verification mode for the interface. At the moment - # of the capture, some packets may be with an invalid checksum due to - # offloading to the network card of the checksum computation. - # Possible values are: - # - kernel: use indication sent by kernel for each packet (default) - # - yes: checksum validation is forced - # - no: checksum validation is disabled - # - auto: suricata uses a statistical approach to detect when - # checksum off-loading is used. - # Warning: 'checksum-validation' must be set to yes to have any validation - #checksum-checks: kernel - # BPF filter to apply to this interface. The pcap filter syntax apply here. - #bpf-filter: port 80 or udp - # You can use the following variables to activate AF_PACKET tap or IPS mode. - # If copy-mode is set to ips or tap, the traffic coming to the current - # interface will be copied to the copy-iface interface. If 'tap' is set, the - # copy is complete. If 'ips' is set, the packet matching a 'drop' action - # will not be copied. - #copy-mode: ips - #copy-iface: eth1 - - # Put default values here. These will be used for an interface that is not - # in the list above. - - interface: default - #threads: auto - #use-mmap: no - #rollover: yes - #tpacket-v3: yes - -# Cross platform libpcap capture support -pcap: - - interface: eth0 - # On Linux, pcap will try to use mmaped capture and will use buffer-size - # as total of memory used by the ring. So set this to something bigger - # than 1% of your bandwidth. - #buffer-size: 16777216 - #bpf-filter: "tcp and port 25" - # Choose checksum verification mode for the interface. At the moment - # of the capture, some packets may be with an invalid checksum due to - # offloading to the network card of the checksum computation. - # Possible values are: - # - yes: checksum validation is forced - # - no: checksum validation is disabled - # - auto: suricata uses a statistical approach to detect when - # checksum off-loading is used. (default) - # Warning: 'checksum-validation' must be set to yes to have any validation - #checksum-checks: auto - # With some accelerator cards using a modified libpcap (like myricom), you - # may want to have the same number of capture threads as the number of capture - # rings. In this case, set up the threads variable to N to start N threads - # listening on the same interface. - #threads: 16 - # set to no to disable promiscuous mode: - #promisc: no - # set snaplen, if not set it defaults to MTU if MTU can be known - # via ioctl call and to full capture if not. - #snaplen: 1518 - # Put default values here - - interface: default - #checksum-checks: auto - -# Settings for reading pcap files -pcap-file: - # Possible values are: - # - yes: checksum validation is forced - # - no: checksum validation is disabled - # - auto: suricata uses a statistical approach to detect when - # checksum off-loading is used. (default) - # Warning: 'checksum-validation' must be set to yes to have checksum tested - checksum-checks: auto - -# See "Advanced Capture Options" below for more options, including NETMAP -# and PF_RING. - - -## -## Step 5: App Layer Protocol Configuration -## - -# Configure the app-layer parsers. The protocols section details each -# protocol. -# -# The option "enabled" takes 3 values - "yes", "no", "detection-only". -# "yes" enables both detection and the parser, "no" disables both, and -# "detection-only" enables protocol detection only (parser disabled). -app-layer: - protocols: - tls: - enabled: detection-only - detection-ports: - dp: 443 - - # Completely stop processing TLS/SSL session after the handshake - # completed. If bypass is enabled this will also trigger flow - # bypass. If disabled (the default), TLS/SSL session is still - # tracked for Heartbleed and other anomalies. - #no-reassemble: yes - dcerpc: - enabled: detection-only - ftp: - enabled: detection-only - ssh: - enabled: detection-only - smtp: - enabled: detection-only - # Configure SMTP-MIME Decoder - mime: - # Decode MIME messages from SMTP transactions - # (may be resource intensive) - # This field supercedes all others because it turns the entire - # process on or off - decode-mime: detection-only - - # Decode MIME entity bodies (ie. base64, quoted-printable, etc.) - decode-base64: detection-only - decode-quoted-printable: detection-only - - # Maximum bytes per header data value stored in the data structure - # (default is 2000) - header-value-depth: 2000 - - # Extract URLs and save in state data structure - extract-urls: detection-only - # Set to yes to compute the md5 of the mail body. You will then - # be able to journalize it. - body-md5: no - # Configure inspected-tracker for file_data keyword - inspected-tracker: - content-limit: 100000 - content-inspect-min-size: 32768 - content-inspect-window: 4096 - imap: - enabled: detection-only - msn: - enabled: detection-only - smb: - enabled: detection-only - detection-ports: - dp: 139, 445 - # smb2 detection is disabled internally inside the engine. - #smb2: - # enabled: yes - # Note: NFS parser depends on Rust support: pass --enable-rust - # to configure. - nfs: - enabled: no - dns: - # memcaps. Globally and per flow/state. - #global-memcap: 16mb - #state-memcap: 512kb - - # How many unreplied DNS requests are considered a flood. - # If the limit is reached, app-layer-event:dns.flooded; will match. - #request-flood: 500 - - tcp: - enabled: detection-only - detection-ports: - dp: 53 - udp: - enabled: detection-only - detection-ports: - dp: 53 - http: - enabled: detection-only - # memcap: 64mb - - # default-config: Used when no server-config matches - # personality: List of personalities used by default - # request-body-limit: Limit reassembly of request body for inspection - # by http_client_body & pcre /P option. - # response-body-limit: Limit reassembly of response body for inspection - # by file_data, http_server_body & pcre /Q option. - # double-decode-path: Double decode path section of the URI - # double-decode-query: Double decode query section of the URI - # response-body-decompress-layer-limit: - # Limit to how many layers of compression will be - # decompressed. Defaults to 2. - # - # server-config: List of server configurations to use if address matches - # address: List of ip addresses or networks for this block - # personalitiy: List of personalities used by this block - # request-body-limit: Limit reassembly of request body for inspection - # by http_client_body & pcre /P option. - # response-body-limit: Limit reassembly of response body for inspection - # by file_data, http_server_body & pcre /Q option. - # double-decode-path: Double decode path section of the URI - # double-decode-query: Double decode query section of the URI - # - # uri-include-all: Include all parts of the URI. By default the - # 'scheme', username/password, hostname and port - # are excluded. Setting this option to true adds - # all of them to the normalized uri as inspected - # by http_uri, urilen, pcre with /U and the other - # keywords that inspect the normalized uri. - # Note that this does not affect http_raw_uri. - # Also, note that including all was the default in - # 1.4 and 2.0beta1. - # - # meta-field-limit: Hard size limit for request and response size - # limits. Applies to request line and headers, - # response line and headers. Does not apply to - # request or response bodies. Default is 18k. - # If this limit is reached an event is raised. - # - # Currently Available Personalities: - # Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0, - # IIS_7_0, IIS_7_5, Apache_2 - libhtp: - default-config: - personality: IDS - - # Can be specified in kb, mb, gb. Just a number indicates - # it's in bytes. - request-body-limit: 100kb - response-body-limit: 100kb - - # inspection limits - request-body-minimal-inspect-size: 32kb - request-body-inspect-window: 4kb - response-body-minimal-inspect-size: 40kb - response-body-inspect-window: 16kb - - # response body decompression (0 disables) - response-body-decompress-layer-limit: 2 - - # auto will use http-body-inline mode in IPS mode, yes or no set it statically - http-body-inline: auto - - # Take a random value for inspection sizes around the specified value. - # This lower the risk of some evasion technics but could lead - # detection change between runs. It is set to 'yes' by default. - #randomize-inspection-sizes: yes - # If randomize-inspection-sizes is active, the value of various - # inspection size will be choosen in the [1 - range%, 1 + range%] - # range - # Default value of randomize-inspection-range is 10. - #randomize-inspection-range: 10 - - # decoding - double-decode-path: no - double-decode-query: no - - server-config: - - #- apache: - # address: [192.168.1.0/24, 127.0.0.0/8, "::1"] - # personality: Apache_2 - # # Can be specified in kb, mb, gb. Just a number indicates - # # it's in bytes. - # request-body-limit: 4096 - # response-body-limit: 4096 - # double-decode-path: no - # double-decode-query: no - - #- iis7: - # address: - # - 192.168.0.0/24 - # - 192.168.10.0/24 - # personality: IIS_7_0 - # # Can be specified in kb, mb, gb. Just a number indicates - # # it's in bytes. - # request-body-limit: 4096 - # response-body-limit: 4096 - # double-decode-path: no - # double-decode-query: no - - # Note: Modbus probe parser is minimalist due to the poor significant field - # Only Modbus message length (greater than Modbus header length) - # And Protocol ID (equal to 0) are checked in probing parser - # It is important to enable detection port and define Modbus port - # to avoid false positive - modbus: - # How many unreplied Modbus requests are considered a flood. - # If the limit is reached, app-layer-event:modbus.flooded; will match. - #request-flood: 500 - - enabled: no - detection-ports: - dp: 502 - # According to MODBUS Messaging on TCP/IP Implementation Guide V1.0b, it - # is recommended to keep the TCP connection opened with a remote device - # and not to open and close it for each MODBUS/TCP transaction. In that - # case, it is important to set the depth of the stream reassembling as - # unlimited (stream.reassembly.depth: 0) - - # Stream reassembly size for modbus. By default track it completely. - stream-depth: 0 - - # DNP3 - dnp3: - enabled: no - detection-ports: - dp: 20000 - - # SCADA EtherNet/IP and CIP protocol support - enip: - enabled: no - detection-ports: - dp: 44818 - sp: 44818 - - # Note: parser depends on experimental Rust support - # with --enable-rust-experimental passed to configure - ntp: - enabled: no - -# Limit for the maximum number of asn1 frames to decode (default 256) -asn1-max-frames: 256 - - -############################################################################## -## -## Advanced settings below -## -############################################################################## - -## -## Run Options -## - -# Run suricata as user and group. -run-as: - user: suricata - group: suricata - -# Some logging module will use that name in event as identifier. The default -# value is the hostname -#sensor-name: suricata - -# Default location of the pid file. The pid file is only used in -# daemon mode (start Suricata with -D). If not running in daemon mode -# the --pidfile command line option must be used to create a pid file. -#pid-file: /usr/local/var/run/suricata.pid - -# Daemon working directory -# Suricata will change directory to this one if provided -# Default: "/" -#daemon-directory: "/" - -# Suricata core dump configuration. Limits the size of the core dump file to -# approximately max-dump. The actual core dump size will be a multiple of the -# page size. Core dumps that would be larger than max-dump are truncated. On -# Linux, the actual core dump size may be a few pages larger than max-dump. -# Setting max-dump to 0 disables core dumping. -# Setting max-dump to 'unlimited' will give the full core dump file. -# On 32-bit Linux, a max-dump value >= ULONG_MAX may cause the core dump size -# to be 'unlimited'. - -coredump: - max-dump: unlimited - -# If suricata box is a router for the sniffed networks, set it to 'router'. If -# it is a pure sniffing setup, set it to 'sniffer-only'. -# If set to auto, the variable is internally switch to 'router' in IPS mode -# and 'sniffer-only' in IDS mode. -# This feature is currently only used by the reject* keywords. -host-mode: auto - -# Number of packets preallocated per thread. The default is 1024. A higher number -# will make sure each CPU will be more easily kept busy, but may negatively -# impact caching. -# -# If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules -# apply. In that case try something like 60000 or more. This is because the CUDA -# pattern matcher buffers and scans as many packets as possible in parallel. -#max-pending-packets: 1024 - -# Runmode the engine should use. Please check --list-runmodes to get the available -# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned -# load balancing). -runmode: workers - -# Specifies the kind of flow load balancer used by the flow pinned autofp mode. -# -# Supported schedulers are: -# -# round-robin - Flows assigned to threads in a round robin fashion. -# active-packets - Flows assigned to threads that have the lowest number of -# unprocessed packets (default). -# hash - Flow alloted usihng the address hash. More of a random -# technique. Was the default in Suricata 1.2.1 and older. -# -#autofp-scheduler: active-packets - -# Preallocated size for packet. Default is 1514 which is the classical -# size for pcap on ethernet. You should adjust this value to the highest -# packet size (MTU + hardware header) on your system. -default-packet-size: {{ MTU + 15 }} - -# Unix command socket can be used to pass commands to suricata. -# An external tool can then connect to get information from suricata -# or trigger some modifications of the engine. Set enabled to yes -# to activate the feature. In auto mode, the feature will only be -# activated in live capture mode. You can use the filename variable to set -# the file name of the socket. -unix-command: - enabled: auto - #filename: custom.socket - -# Magic file. The extension .mgc is added to the value here. -#magic-file: /usr/share/file/magic -#magic-file: - -legacy: - uricontent: enabled - -## -## Detection settings -## - -# Set the order of alerts bassed on actions -# The default order is pass, drop, reject, alert -# action-order: -# - pass -# - drop -# - reject -# - alert - -# IP Reputation -#reputation-categories-file: /usr/local/etc/suricata/iprep/categories.txt -#default-reputation-path: /usr/local/etc/suricata/iprep -#reputation-files: -# - reputation.list - -# When run with the option --engine-analysis, the engine will read each of -# the parameters below, and print reports for each of the enabled sections -# and exit. The reports are printed to a file in the default log dir -# given by the parameter "default-log-dir", with engine reporting -# subsection below printing reports in its own report file. -engine-analysis: - # enables printing reports for fast-pattern for every rule. - rules-fast-pattern: yes - # enables printing reports for each rule - rules: yes - -#recursion and match limits for PCRE where supported -pcre: - match-limit: 3500 - match-limit-recursion: 1500 - -## -## Advanced Traffic Tracking and Reconstruction Settings -## - -# Host specific policies for defragmentation and TCP stream -# reassembly. The host OS lookup is done using a radix tree, just -# like a routing table so the most specific entry matches. -host-os-policy: - # Make the default policy windows. - windows: [0.0.0.0/0] - bsd: [] - bsd-right: [] - old-linux: [] - linux: [] - old-solaris: [] - solaris: [] - hpux10: [] - hpux11: [] - irix: [] - macos: [] - vista: [] - windows2k3: [] - -# Defrag settings: - -defrag: - memcap: 32mb - hash-size: 65536 - trackers: 65535 # number of defragmented flows to follow - max-frags: 65535 # number of fragments to keep (higher than trackers) - prealloc: yes - timeout: 60 - -# Enable defrag per host settings -# host-config: -# -# - dmz: -# timeout: 30 -# address: [192.168.1.0/24, 127.0.0.0/8, 1.1.1.0/24, 2.2.2.0/24, "1.1.1.1", "2.2.2.2", "::1"] -# -# - lan: -# timeout: 45 -# address: -# - 192.168.0.0/24 -# - 192.168.10.0/24 -# - 172.16.14.0/24 - -# Flow settings: -# By default, the reserved memory (memcap) for flows is 32MB. This is the limit -# for flow allocation inside the engine. You can change this value to allow -# more memory usage for flows. -# The hash-size determine the size of the hash used to identify flows inside -# the engine, and by default the value is 65536. -# At the startup, the engine can preallocate a number of flows, to get a better -# performance. The number of flows preallocated is 10000 by default. -# emergency-recovery is the percentage of flows that the engine need to -# prune before unsetting the emergency state. The emergency state is activated -# when the memcap limit is reached, allowing to create new flows, but -# prunning them with the emergency timeouts (they are defined below). -# If the memcap is reached, the engine will try to prune flows -# with the default timeouts. If it doens't find a flow to prune, it will set -# the emergency bit and it will try again with more agressive timeouts. -# If that doesn't work, then it will try to kill the last time seen flows -# not in use. -# The memcap can be specified in kb, mb, gb. Just a number indicates it's -# in bytes. - -flow: - memcap: 128mb - hash-size: 65536 - prealloc: 10000 - emergency-recovery: 30 - #managers: 1 # default to one flow manager - #recyclers: 1 # default to one flow recycler thread - -# This option controls the use of vlan ids in the flow (and defrag) -# hashing. Normally this should be enabled, but in some (broken) -# setups where both sides of a flow are not tagged with the same vlan -# tag, we can ignore the vlan id's in the flow hashing. -vlan: - use-for-tracking: true - -# Specific timeouts for flows. Here you can specify the timeouts that the -# active flows will wait to transit from the current state to another, on each -# protocol. The value of "new" determine the seconds to wait after a hanshake or -# stream startup before the engine free the data of that flow it doesn't -# change the state to established (usually if we don't receive more packets -# of that flow). The value of "established" is the amount of -# seconds that the engine will wait to free the flow if it spend that amount -# without receiving new packets or closing the connection. "closed" is the -# amount of time to wait after a flow is closed (usually zero). "bypassed" -# timeout controls locally bypassed flows. For these flows we don't do any other -# tracking. If no packets have been seen after this timeout, the flow is discarded. -# -# There's an emergency mode that will become active under attack circumstances, -# making the engine to check flow status faster. This configuration variables -# use the prefix "emergency-" and work similar as the normal ones. -# Some timeouts doesn't apply to all the protocols, like "closed", for udp and -# icmp. - -flow-timeouts: - - default: - new: 30 - established: 300 - closed: 0 - bypassed: 100 - emergency-new: 10 - emergency-established: 100 - emergency-closed: 0 - emergency-bypassed: 50 - tcp: - new: 60 - established: 600 - closed: 60 - bypassed: 100 - emergency-new: 5 - emergency-established: 100 - emergency-closed: 10 - emergency-bypassed: 50 - udp: - new: 30 - established: 300 - bypassed: 100 - emergency-new: 10 - emergency-established: 100 - emergency-bypassed: 50 - icmp: - new: 30 - established: 300 - bypassed: 100 - emergency-new: 10 - emergency-established: 100 - emergency-bypassed: 50 - -# Stream engine settings. Here the TCP stream tracking and reassembly -# engine is configured. -# -# stream: -# memcap: 32mb # Can be specified in kb, mb, gb. Just a -# # number indicates it's in bytes. -# checksum-validation: yes # To validate the checksum of received -# # packet. If csum validation is specified as -# # "yes", then packet with invalid csum will not -# # be processed by the engine stream/app layer. -# # Warning: locally generated trafic can be -# # generated without checksum due to hardware offload -# # of checksum. You can control the handling of checksum -# # on a per-interface basis via the 'checksum-checks' -# # option -# prealloc-sessions: 2k # 2k sessions prealloc'd per stream thread -# midstream: false # don't allow midstream session pickups -# async-oneside: false # don't enable async stream handling -# inline: no # stream inline mode -# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine -# max-synack-queued: 5 # Max different SYN/ACKs to queue -# bypass: no # Bypass packets when stream.depth is reached -# -# reassembly: -# memcap: 64mb # Can be specified in kb, mb, gb. Just a number -# # indicates it's in bytes. -# depth: 1mb # Can be specified in kb, mb, gb. Just a number -# # indicates it's in bytes. -# toserver-chunk-size: 2560 # inspect raw stream in chunks of at least -# # this size. Can be specified in kb, mb, -# # gb. Just a number indicates it's in bytes. -# toclient-chunk-size: 2560 # inspect raw stream in chunks of at least -# # this size. Can be specified in kb, mb, -# # gb. Just a number indicates it's in bytes. -# randomize-chunk-size: yes # Take a random value for chunk size around the specified value. -# # This lower the risk of some evasion technics but could lead -# # detection change between runs. It is set to 'yes' by default. -# randomize-chunk-range: 10 # If randomize-chunk-size is active, the value of chunk-size is -# # a random value between (1 - randomize-chunk-range/100)*toserver-chunk-size -# # and (1 + randomize-chunk-range/100)*toserver-chunk-size and the same -# # calculation for toclient-chunk-size. -# # Default value of randomize-chunk-range is 10. -# -# raw: yes # 'Raw' reassembly enabled or disabled. -# # raw is for content inspection by detection -# # engine. -# -# segment-prealloc: 2048 # number of segments preallocated per thread -# -# check-overlap-different-data: true|false -# # check if a segment contains different data -# # than what we've already seen for that -# # position in the stream. -# # This is enabled automatically if inline mode -# # is used or when stream-event:reassembly_overlap_different_data; -# # is used in a rule. -# -stream: - memcap: 64mb - checksum-validation: yes # reject wrong csums - inline: auto # auto will use inline mode in IPS mode, yes or no set it statically - reassembly: - memcap: 256mb - depth: 1mb # reassemble 1mb into a stream - toserver-chunk-size: 2560 - toclient-chunk-size: 2560 - randomize-chunk-size: yes - #randomize-chunk-range: 10 - #raw: yes - #segment-prealloc: 2048 - #check-overlap-different-data: true - -# Host table: -# -# Host table is used by tagging and per host thresholding subsystems. -# -host: - hash-size: 4096 - prealloc: 1000 - memcap: 32mb - -# IP Pair table: -# -# Used by xbits 'ippair' tracking. -# -#ippair: -# hash-size: 4096 -# prealloc: 1000 -# memcap: 32mb - -# Decoder settings - -decoder: - # Teredo decoder is known to not be completely accurate - # it will sometimes detect non-teredo as teredo. - teredo: - enabled: true - - -## -## Performance tuning and profiling -## - -# The detection engine builds internal groups of signatures. The engine -# allow us to specify the profile to use for them, to manage memory on an -# efficient way keeping a good performance. For the profile keyword you -# can use the words "low", "medium", "high" or "custom". If you use custom -# make sure to define the values at "- custom-values" as your convenience. -# Usually you would prefer medium/high/low. -# -# "sgh mpm-context", indicates how the staging should allot mpm contexts for -# the signature groups. "single" indicates the use of a single context for -# all the signature group heads. "full" indicates a mpm-context for each -# group head. "auto" lets the engine decide the distribution of contexts -# based on the information the engine gathers on the patterns from each -# group head. -# -# The option inspection-recursion-limit is used to limit the recursive calls -# in the content inspection code. For certain payload-sig combinations, we -# might end up taking too much time in the content inspection code. -# If the argument specified is 0, the engine uses an internally defined -# default limit. On not specifying a value, we use no limits on the recursion. -detect: - profile: medium - custom-values: - toclient-groups: 3 - toserver-groups: 25 - sgh-mpm-context: auto - inspection-recursion-limit: 3000 - # If set to yes, the loading of signatures will be made after the capture - # is started. This will limit the downtime in IPS mode. - #delayed-detect: yes - - prefilter: - # default prefiltering setting. "mpm" only creates MPM/fast_pattern - # engines. "auto" also sets up prefilter engines for other keywords. - # Use --list-keywords=all to see which keywords support prefiltering. - default: mpm - - # the grouping values above control how many groups are created per - # direction. Port whitelisting forces that port to get it's own group. - # Very common ports will benefit, as well as ports with many expensive - # rules. - grouping: - #tcp-whitelist: 53, 80, 139, 443, 445, 1433, 3306, 3389, 6666, 6667, 8080 - #udp-whitelist: 53, 135, 5060 - - profiling: - # Log the rules that made it past the prefilter stage, per packet - # default is off. The threshold setting determines how many rules - # must have made it past pre-filter for that rule to trigger the - # logging. - #inspect-logging-threshold: 200 - grouping: - dump-to-disk: false - include-rules: false # very verbose - include-mpm-stats: false - -# Select the multi pattern algorithm you want to run for scan/search the -# in the engine. -# -# The supported algorithms are: -# "ac" - Aho-Corasick, default implementation -# "ac-bs" - Aho-Corasick, reduced memory implementation -# "ac-cuda" - Aho-Corasick, CUDA implementation -# "ac-ks" - Aho-Corasick, "Ken Steele" variant -# "hs" - Hyperscan, available when built with Hyperscan support -# -# The default mpm-algo value of "auto" will use "hs" if Hyperscan is -# available, "ac" otherwise. -# -# The mpm you choose also decides the distribution of mpm contexts for -# signature groups, specified by the conf - "detect.sgh-mpm-context". -# Selecting "ac" as the mpm would require "detect.sgh-mpm-context" -# to be set to "single", because of ac's memory requirements, unless the -# ruleset is small enough to fit in one's memory, in which case one can -# use "full" with "ac". Rest of the mpms can be run in "full" mode. -# -# There is also a CUDA pattern matcher (only available if Suricata was -# compiled with --enable-cuda: b2g_cuda. Make sure to update your -# max-pending-packets setting above as well if you use b2g_cuda. - -mpm-algo: auto - -# Select the matching algorithm you want to use for single-pattern searches. -# -# Supported algorithms are "bm" (Boyer-Moore) and "hs" (Hyperscan, only -# available if Suricata has been built with Hyperscan support). -# -# The default of "auto" will use "hs" if available, otherwise "bm". - -spm-algo: auto - -# Suricata is multi-threaded. Here the threading can be influenced. -threading: - set-cpu-affinity: yes - # Tune cpu affinity of threads. Each family of threads can be bound - # on specific CPUs. - # - # These 2 apply to the all runmodes: - # management-cpu-set is used for flow timeout handling, counters - # worker-cpu-set is used for 'worker' threads - # - # Additionally, for autofp these apply: - # receive-cpu-set is used for capture threads - # verdict-cpu-set is used for IPS verdict threads - # - {%- if salt['pillar.get']('sensor:suriprocs') %} - cpu-affinity: - - management-cpu-set: - cpu: [ all ] # include only these cpus in affinity settings - - receive-cpu-set: - cpu: [ all ] # include only these cpus in affinity settings - - worker-cpu-set: - cpu: [ "all" ] - mode: "exclusive" - # Use explicitely 3 threads and don't compute number by using - # detect-thread-ratio variable: - threads: {{ salt['pillar.get']('sensor:suriprocs') }} - prio: - default: "high" - {% endif %} - - {%- if salt['pillar.get']('sensor:suripins') %} - cpu-affinity: - - management-cpu-set: - cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] # include only these cpus in affinity settings - - receive-cpu-set: - cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] # include only these cpus in affinity settings - - worker-cpu-set: - cpu: [ {{ salt['pillar.get']('sensor:suripins')|join(",") }} ] - mode: "exclusive" - # Use explicitely 3 threads and don't compute number by using - # detect-thread-ratio variable: - threads: {{ salt['pillar.get']('sensor:suripins')|length }} - prio: - default: "high" - {% endif %} - - #- verdict-cpu-set: - # cpu: [ 0 ] - # prio: - # default: "high" - # - # By default Suricata creates one "detect" thread per available CPU/CPU core. - # This setting allows controlling this behaviour. A ratio setting of 2 will - # create 2 detect threads for each CPU/CPU core. So for a dual core CPU this - # will result in 4 detect threads. If values below 1 are used, less threads - # are created. So on a dual core CPU a setting of 0.5 results in 1 detect - # thread being created. Regardless of the setting at a minimum 1 detect - # thread will always be created. - # - detect-thread-ratio: 1.0 - -# Luajit has a strange memory requirement, it's 'states' need to be in the -# first 2G of the process' memory. -# -# 'luajit.states' is used to control how many states are preallocated. -# State use: per detect script: 1 per detect thread. Per output script: 1 per -# script. -luajit: - states: 128 - -# Profiling settings. Only effective if Suricata has been built with the -# the --enable-profiling configure flag. -# -profiling: - # Run profiling for every xth packet. The default is 1, which means we - # profile every packet. If set to 1000, one packet is profiled for every - # 1000 received. - #sample-rate: 1000 - - # rule profiling - rules: - - # Profiling can be disabled here, but it will still have a - # performance impact if compiled in. - enabled: yes - filename: rule_perf.log - append: yes - - # Sort options: ticks, avgticks, checks, matches, maxticks - # If commented out all the sort options will be used. - #sort: avgticks - - # Limit the number of sids for which stats are shown at exit (per sort). - limit: 10 - - # output to json - json: yes - - # per keyword profiling - keywords: - enabled: yes - filename: keyword_perf.log - append: yes - - # per rulegroup profiling - rulegroups: - enabled: yes - filename: rule_group_perf.log - append: yes - - # packet profiling - packets: - - # Profiling can be disabled here, but it will still have a - # performance impact if compiled in. - enabled: yes - filename: packet_stats.log - append: yes - - # per packet csv output - csv: - - # Output can be disabled here, but it will still have a - # performance impact if compiled in. - enabled: no - filename: packet_stats.csv - - # profiling of locking. Only available when Suricata was built with - # --enable-profiling-locks. - locks: - enabled: no - filename: lock_stats.log - append: yes - - pcap-log: - enabled: no - filename: pcaplog_stats.log - append: yes - -## -## Netfilter integration -## - -# When running in NFQ inline mode, it is possible to use a simulated -# non-terminal NFQUEUE verdict. -# This permit to do send all needed packet to suricata via this a rule: -# iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE -# And below, you can have your standard filtering ruleset. To activate -# this mode, you need to set mode to 'repeat' -# If you want packet to be sent to another queue after an ACCEPT decision -# set mode to 'route' and set next-queue value. -# On linux >= 3.1, you can set batchcount to a value > 1 to improve performance -# by processing several packets before sending a verdict (worker runmode only). -# On linux >= 3.6, you can set the fail-open option to yes to have the kernel -# accept the packet if suricata is not able to keep pace. -# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is -# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask -# on packet of a flow that need to be bypassed. The Nefilter ruleset has to -# directly accept all packets of a flow once a packet has been marked. -nfq: -# mode: accept -# repeat-mark: 1 -# repeat-mask: 1 -# bypass-mark: 1 -# bypass-mask: 1 -# route-queue: 2 -# batchcount: 20 -# fail-open: yes - -#nflog support -nflog: - # netlink multicast group - # (the same as the iptables --nflog-group param) - # Group 0 is used by the kernel, so you can't use it - - group: 2 - # netlink buffer size - buffer-size: 18432 - # put default value here - - group: default - # set number of packet to queue inside kernel - qthreshold: 1 - # set the delay before flushing packet in the queue inside kernel - qtimeout: 100 - # netlink max buffer size - max-size: 20000 - -## -## Advanced Capture Options -## - -# general settings affecting packet capture -capture: - # disable NIC offloading. It's restored when Suricata exists. - # Enabled by default - #disable-offloading: false - # - # disable checksum validation. Same as setting '-k none' on the - # commandline - #checksum-validation: none - -# Netmap support -# -# Netmap operates with NIC directly in driver, so you need FreeBSD wich have -# built-in netmap support or compile and install netmap module and appropriate -# NIC driver on your Linux system. -# To reach maximum throughput disable all receive-, segmentation-, -# checksum- offloadings on NIC. -# Disabling Tx checksum offloading is *required* for connecting OS endpoint -# with NIC endpoint. -# You can find more information at https://github.com/luigirizzo/netmap -# -netmap: - # To specify OS endpoint add plus sign at the end (e.g. "eth0+") - - interface: eth2 - # Number of receive threads. "auto" uses number of RSS queues on interface. - #threads: auto - # You can use the following variables to activate netmap tap or IPS mode. - # If copy-mode is set to ips or tap, the traffic coming to the current - # interface will be copied to the copy-iface interface. If 'tap' is set, the - # copy is complete. If 'ips' is set, the packet matching a 'drop' action - # will not be copied. - # To specify the OS as the copy-iface (so the OS can route packets, or forward - # to a service running on the same machine) add a plus sign at the end - # (e.g. "copy-iface: eth0+"). Don't forget to set up a symmetrical eth0+ -> eth0 - # for return packets. Hardware checksumming must be *off* on the interface if - # using an OS endpoint (e.g. 'ifconfig eth0 -rxcsum -txcsum -rxcsum6 -txcsum6' for FreeBSD - # or 'ethtool -K eth0 tx off rx off' for Linux). - #copy-mode: tap - #copy-iface: eth3 - # Set to yes to disable promiscuous mode - # disable-promisc: no - # Choose checksum verification mode for the interface. At the moment - # of the capture, some packets may be with an invalid checksum due to - # offloading to the network card of the checksum computation. - # Possible values are: - # - yes: checksum validation is forced - # - no: checksum validation is disabled - # - auto: suricata uses a statistical approach to detect when - # checksum off-loading is used. - # Warning: 'checksum-validation' must be set to yes to have any validation - #checksum-checks: auto - # BPF filter to apply to this interface. The pcap filter syntax apply here. - #bpf-filter: port 80 or udp - #- interface: eth3 - #threads: auto - #copy-mode: tap - #copy-iface: eth2 - # Put default values here - - interface: default - -# PF_RING configuration. for use with native PF_RING support -# for more info see http://www.ntop.org/products/pf_ring/ -pfring: - - interface: eth0 - # Number of receive threads (>1 will enable experimental flow pinned - # runmode) - threads: 1 - - # Default clusterid. PF_RING will load balance packets based on flow. - # All threads/processes that will participate need to have the same - # clusterid. - cluster-id: 99 - - # Default PF_RING cluster type. PF_RING can load balance per flow. - # Possible values are cluster_flow or cluster_round_robin. - cluster-type: cluster_flow - # bpf filter for this interface - #bpf-filter: tcp - # Choose checksum verification mode for the interface. At the moment - # of the capture, some packets may be with an invalid checksum due to - # offloading to the network card of the checksum computation. - # Possible values are: - # - rxonly: only compute checksum for packets received by network card. - # - yes: checksum validation is forced - # - no: checksum validation is disabled - # - auto: suricata uses a statistical approach to detect when - # checksum off-loading is used. (default) - # Warning: 'checksum-validation' must be set to yes to have any validation - #checksum-checks: auto - # Second interface - #- interface: eth1 - # threads: 3 - # cluster-id: 93 - # cluster-type: cluster_flow - # Put default values here - - interface: default - #threads: 2 - -# For FreeBSD ipfw(8) divert(4) support. -# Please make sure you have ipfw_load="YES" and ipdivert_load="YES" -# in /etc/loader.conf or kldload'ing the appropriate kernel modules. -# Additionally, you need to have an ipfw rule for the engine to see -# the packets from ipfw. For Example: -# -# ipfw add 100 divert 8000 ip from any to any -# -# The 8000 above should be the same number you passed on the command -# line, i.e. -d 8000 -# -ipfw: - - # Reinject packets at the specified ipfw rule number. This config - # option is the ipfw rule number AT WHICH rule processing continues - # in the ipfw processing system after the engine has finished - # inspecting the packet for acceptance. If no rule number is specified, - # accepted packets are reinjected at the divert rule which they entered - # and IPFW rule processing continues. No check is done to verify - # this will rule makes sense so care must be taken to avoid loops in ipfw. - # - ## The following example tells the engine to reinject packets - # back into the ipfw firewall AT rule number 5500: - # - # ipfw-reinjection-rule-number: 5500 - - -napatech: - # The Host Buffer Allowance for all streams - # (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back) - # This may be enabled when sharing streams with another application. - # Otherwise, it should be turned off. - hba: -1 - - # use_all_streams set to "yes" will query the Napatech service for all configured - # streams and listen on all of them. When set to "no" the streams config array - # will be used. - use-all-streams: yes - - # The streams to listen on. This can be either: - # a list of individual streams (e.g. streams: [0,1,2,3]) - # or - # a range of streams (e.g. streams: ["0-3"]) - streams: ["0-3"] - -# Tilera mpipe configuration. for use on Tilera TILE-Gx. -mpipe: - - # Load balancing modes: "static", "dynamic", "sticky", or "round-robin". - load-balance: dynamic - - # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536 - iqueue-packets: 2048 - - # List of interfaces we will listen on. - inputs: - - interface: xgbe2 - - interface: xgbe3 - - interface: xgbe4 - - - # Relative weight of memory for packets of each mPipe buffer size. - stack: - size128: 0 - size256: 9 - size512: 0 - size1024: 0 - size1664: 7 - size4096: 0 - size10386: 0 - size16384: 0 - -## -## Hardware accelaration -## - -# Cuda configuration. -cuda: - # The "mpm" profile. On not specifying any of these parameters, the engine's - # internal default values are used, which are same as the ones specified in - # in the default conf file. - mpm: - # The minimum length required to buffer data to the gpu. - # Anything below this is MPM'ed on the CPU. - # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. - # A value of 0 indicates there's no limit. - data-buffer-size-min-limit: 0 - # The maximum length for data that we would buffer to the gpu. - # Anything over this is MPM'ed on the CPU. - # Can be specified in kb, mb, gb. Just a number indicates it's in bytes. - data-buffer-size-max-limit: 1500 - # The ring buffer size used by the CudaBuffer API to buffer data. - cudabuffer-buffer-size: 500mb - # The max chunk size that can be sent to the gpu in a single go. - gpu-transfer-size: 50mb - # The timeout limit for batching of packets in microseconds. - batching-timeout: 2000 - # The device to use for the mpm. Currently we don't support load balancing - # on multiple gpus. In case you have multiple devices on your system, you - # can specify the device to use, using this conf. By default we hold 0, to - # specify the first device cuda sees. To find out device-id associated with - # the card(s) on the system run "suricata --list-cuda-cards". - device-id: 0 - # No of Cuda streams used for asynchronous processing. All values > 0 are valid. - # For this option you need a device with Compute Capability > 1.0. - cuda-streams: 2 - -## -## Include other configs -## - -# Includes. Files included here will be handled as if they were -# inlined in this configuration file. -#include: include1.yaml -#include: include2.yaml From 68dd333fbe686f58fdbfaa907898f100c5171605 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 26 May 2020 17:49:11 -0400 Subject: [PATCH 44/76] Remove stats from eve.json --- salt/suricata/files/suricata.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml index ef06c7f97..8487ec032 100644 --- a/salt/suricata/files/suricata.yaml +++ b/salt/suricata/files/suricata.yaml @@ -261,7 +261,7 @@ outputs: # alerts: yes # log alerts that caused drops # flows: all # start or all: 'start' logs only a single drop # # per flow direction. All logs each dropped pkt. - - smtp: + #- smtp: #extended: yes # enable this for extended logging information # this includes: bcc, message-id, subject, x_mailer, user-agent # custom fields logging from the list: @@ -292,10 +292,10 @@ outputs: # to an IP address is logged. # extended: no #- ssh - - stats: - totals: yes # stats for all threads merged together - threads: no # per thread stats - deltas: no # include delta values + #- stats: + # totals: yes # stats for all threads merged together + # threads: no # per thread stats + # deltas: no # include delta values # bi-directional flows #- flow # uni-directional flows From 5d0a7f99e91aee191266d0e122cd158e73bb6e30 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 26 May 2020 18:45:29 -0400 Subject: [PATCH 45/76] Improve logging of automated setup --- setup/so-setup | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index fdc69076b..0d309bfc3 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -28,6 +28,8 @@ automation=$2 automated=no +echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1 + function progress() { if [ $automated == no ]; then whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0 @@ -35,31 +37,51 @@ function progress() { } if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then - echo "Preselecting variable values based on automated setup: $automation" + echo "Preselecting variable values based on automated setup: $automation" >> $setup_log 2>&1 source automation/$automation automated=yes + echo "Checking network configuration" >> $setup_log 2>&1g + ip a >> $setup_log 2>&1 + attempt=1 attempts=60 - ip a | grep "$MNIC:" | grep "state UP" + ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1 while [ $? -ne 0 ]; do + ip a >> $setup_log 2>&1 if [ $attempt -gt $attempts ]; then - echo "Network unavailable - setup cannot continue" + echo "Network unavailable - setup cannot continue" >> $setup_log 2>&1 exit 1 fi - echo "Waiting for network to come up (attempt $attempt of $attempts)" + echo "Waiting for network to come up (attempt $attempt of $attempts)" >> $setup_log 2>&1 attempt=$((attempt + 1)) sleep 10; - ip a | grep "$MNIC:" | grep "state UP" + ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1 done + echo "Network is up on $MNIC" >> $setup_log 2>&1 + + attempt=1 + attempts=60 + ping -c google.com >> $setup_log 2>&1 + while [ $? -ne 0 ]; do + if [ $attempt -gt $attempts ]; then + echo "DNS unavailable - setup cannot continue" >> $setup_log 2>&1 + exit 1 + fi + echo "Waiting for DNS to become available (attempt $attempt of $attempts)" >> $setup_log 2>&1 + attempt=$((attempt + 1)) + sleep 10; + ping -c google.com >> $setup_log 2>&1 + done + echo "DNS is available" >> $setup_log 2>&1 fi case "$setup_type" in iso | network) # Accepted values - echo "Beginning Security Onion $setup_type install" + echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1 ;; *) - echo "Invalid install type, must be 'iso' or 'network'" + echo "Invalid install type, must be 'iso' or 'network'" | tee $setup_log exit 1 ;; esac @@ -67,8 +89,6 @@ esac # Allow execution of SO tools during setup export PATH=$PATH:../salt/common/tools/sbin -date -u > $setup_log 2>&1 - got_root if [[ $# -gt 1 ]]; then @@ -87,7 +107,7 @@ setterm -blank 0 if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then true else - echo "User cancelled setup." >> $setup_log 2>&1 + echo "User cancelled setup." | tee $setup_log whiptail_cancel fi From 65431e0fccc7a82979637ac65cf5f1a353e15c1f Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 26 May 2020 22:39:18 -0400 Subject: [PATCH 46/76] Do not expect network access to be available prior to the NIC being configured for DHCP/Static --- setup/so-setup | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index 0d309bfc3..d18f60089 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -59,21 +59,6 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1 done echo "Network is up on $MNIC" >> $setup_log 2>&1 - - attempt=1 - attempts=60 - ping -c google.com >> $setup_log 2>&1 - while [ $? -ne 0 ]; do - if [ $attempt -gt $attempts ]; then - echo "DNS unavailable - setup cannot continue" >> $setup_log 2>&1 - exit 1 - fi - echo "Waiting for DNS to become available (attempt $attempt of $attempts)" >> $setup_log 2>&1 - attempt=$((attempt + 1)) - sleep 10; - ping -c google.com >> $setup_log 2>&1 - done - echo "DNS is available" >> $setup_log 2>&1 fi case "$setup_type" in From f3809cb93d3871eabf722427499ab6d5aeac64e0 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 27 May 2020 08:31:14 -0400 Subject: [PATCH 47/76] Wrap with quotes --- salt/strelka/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls index a9842924d..5767531f4 100644 --- a/salt/strelka/init.sls +++ b/salt/strelka/init.sls @@ -112,5 +112,5 @@ strelka_filestream: strelka_zeek_extracted_sync: cron.present: - user: root - - name: [ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1 + - name: '[ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1' - minute: '*' From c614e0a8805b87f4e5d00ea0e02607c334eacde2 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Wed, 27 May 2020 10:11:54 -0400 Subject: [PATCH 48/76] [feat] Add prompt about master needing internet access When package updates go through the master node, that master needs internet access. Therefore, prompt the user about this requirement. Resolves #146 --- setup/so-setup | 3 +++ setup/so-whiptail | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/setup/so-setup b/setup/so-setup index fdc69076b..c07cca70b 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -231,6 +231,9 @@ fi if [[ $is_distmaster || ( $is_sensor || $is_node ) && ! $is_eval ]]; then whiptail_master_updates + if [[ $setup_type == 'network' && $MASTERUPDATES == 1 ]]; then + whiptail_master_updates_warning + fi fi if [[ $is_minion ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index 72455fc9e..31a0c05ec 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -1027,7 +1027,17 @@ whiptail_master_updates() { ;; esac +} +whiptail_master_updates_warning() { + [ -n "$TESTING" ] && return + + whiptail --title "Security Onion Setup"\ + --msgbox "Updating through the master node requires the master to have internet access, press ENTER to continue"\ + 8 75 + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus } whiptail_node_updates() { From 9cc2614cc8d31ead8b608bfe6d8c87e9b47ec288 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 May 2020 11:47:18 -0400 Subject: [PATCH 49/76] Automated setup will now output progress data to sosetup.log --- setup/so-functions | 4 +++- setup/so-setup | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 612b0147b..f6abdb047 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -998,7 +998,9 @@ parse_options() { export {https,ftp,rsync,all}_proxy="$http_proxy" ;; *) - echo "Invalid option" + if [[ $1 = --* ]]; then + echo "Invalid option" + fi esac } diff --git a/setup/so-setup b/setup/so-setup index 4e004b425..9ddb35a0a 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -33,6 +33,8 @@ echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1 function progress() { if [ $automated == no ]; then whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0 + else + cat >> $setup_log 2>&1 fi } From b7a0f79038c7c5d306ba8c79f1223d9a6f8b67bc Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Wed, 27 May 2020 16:58:31 +0000 Subject: [PATCH 50/76] Update Suricata init --- salt/suricata/init.sls | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index 39f419ad0..547eee863 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -55,6 +55,12 @@ surilogdir: - user: 940 - group: 939 +suridatadir: + file.directory: + - name: /nsm/suricata + - user: 940 + - group: 939 + surirulesync: file.recurse: - name: /opt/so/conf/suricata/rules/ @@ -119,6 +125,7 @@ so-suricata: - /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro - /opt/so/conf/suricata/rules:/etc/suricata/rules:ro - /opt/so/log/suricata/:/var/log/suricata/:rw + - /nsm/suricata/:/nsm/suricata/:rw - /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro - network_mode: host - watch: From e78a3f32780b0c8c6c5093a5161479b881a92621 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Wed, 27 May 2020 16:59:26 +0000 Subject: [PATCH 51/76] update Suricata config --- salt/suricata/files/suricata.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml index 5a0121b63..65465806f 100644 --- a/salt/suricata/files/suricata.yaml +++ b/salt/suricata/files/suricata.yaml @@ -99,7 +99,7 @@ outputs: - eve-log: enabled: yes filetype: regular #regular|syslog|unix_dgram|unix_stream|redis - filename: eve.json + filename: /nsm/eve.json rotate-interval: day community-id: true community-id-seed: 0 @@ -918,7 +918,7 @@ host-mode: auto # If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules # apply. In that case try something like 60000 or more. This is because the CUDA # pattern matcher buffers and scans as many packets as possible in parallel. -#max-pending-packets: 1024 +max-pending-packets: 5000 # Runmode the engine should use. Please check --list-runmodes to get the available # runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned From 3684cdf1c6e07011daee9e84c2e1d469d1728922 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Wed, 27 May 2020 17:00:09 +0000 Subject: [PATCH 52/76] Update FB and LS --- salt/filebeat/init.sls | 2 +- salt/logstash/init.sls | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls index 409594b2d..e5dc78d33 100644 --- a/salt/filebeat/init.sls +++ b/salt/filebeat/init.sls @@ -57,7 +57,7 @@ so-filebeat: - /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro - /nsm/zeek:/nsm/zeek:ro - /nsm/strelka/log:/nsm/strelka/log:ro - - /opt/so/log/suricata:/suricata:ro + - /nsm/suricata:/suricata:ro - /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro - /opt/so/wazuh/logs/archives:/wazuh/archives:ro - /nsm/osquery/fleet/:/nsm/osquery/fleet:ro diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index ba0e015f4..1118b6807 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -198,7 +198,7 @@ so-logstash: - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro {%- if grains['role'] == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - - /opt/so/log/suricata:/suricata:ro + - /nsm/suricata:/suricata:ro - /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro - /opt/so/wazuh/logs/archives:/wazuh/archives:ro - /opt/so/log/fleet/:/osquery/logs:ro From d56bc4c167937b850a3871c1dc79aee83fffd579 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Wed, 27 May 2020 17:01:05 +0000 Subject: [PATCH 53/76] fix path --- salt/suricata/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index 547eee863..0f3d49bc3 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -125,7 +125,7 @@ so-suricata: - /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro - /opt/so/conf/suricata/rules:/etc/suricata/rules:ro - /opt/so/log/suricata/:/var/log/suricata/:rw - - /nsm/suricata/:/nsm/suricata/:rw + - /nsm/suricata/:/nsm/:rw - /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro - network_mode: host - watch: From b26f6826dd6a7c56fab1d9dc8a6a11a05d309a31 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Wed, 27 May 2020 17:17:55 +0000 Subject: [PATCH 54/76] Rename Hive to TheHive in several places --- salt/hive/thehive/scripts/hive_init | 64 ------------------- salt/{hive => }/thehive/etc/application.conf | 2 +- .../thehive/etc/cortex-application.conf | 2 +- .../thehive/etc/es/elasticsearch.yml | 2 +- .../thehive/etc/es/log4j2.properties | 0 salt/{hive => thehive}/init.sls | 40 ++++++------ salt/{hive => }/thehive/scripts/cortex_init | 0 salt/thehive/scripts/hive_init | 64 +++++++++++++++++++ 8 files changed, 87 insertions(+), 87 deletions(-) delete mode 100755 salt/hive/thehive/scripts/hive_init rename salt/{hive => }/thehive/etc/application.conf (99%) rename salt/{hive => }/thehive/etc/cortex-application.conf (99%) rename salt/{hive => }/thehive/etc/es/elasticsearch.yml (95%) rename salt/{hive => }/thehive/etc/es/log4j2.properties (100%) rename salt/{hive => thehive}/init.sls (70%) rename salt/{hive => }/thehive/scripts/cortex_init (100%) create mode 100755 salt/thehive/scripts/hive_init diff --git a/salt/hive/thehive/scripts/hive_init b/salt/hive/thehive/scripts/hive_init deleted file mode 100755 index b1ef62d68..000000000 --- a/salt/hive/thehive/scripts/hive_init +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash -{% set MASTERIP = salt['pillar.get']('static:masterip', '') %} -{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %} -{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %} -{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %} - -hive_init(){ - sleep 120 - HIVE_IP="{{MASTERIP}}" - HIVE_USER="{{HIVEUSER}}" - HIVE_PASSWORD="{{HIVEPASSWORD}}" - HIVE_KEY="{{HIVEKEY}}" - SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf" - - echo -n "Waiting for TheHive..." - COUNT=0 - HIVE_CONNECTED="no" - while [[ "$COUNT" -le 240 ]]; do - curl --output /dev/null --silent --head --fail -k "https://$HIVE_IP/thehive" - if [ $? -eq 0 ]; then - HIVE_CONNECTED="yes" - echo "connected!" - break - else - ((COUNT+=1)) - sleep 1 - echo -n "." - fi - done - - if [ "$HIVE_CONNECTED" == "yes" ]; then - - # Migrate DB - curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate" - - # Create intial TheHive user - curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}" - - # Pre-load custom fields - # - # reputation - curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}" - - - touch /opt/so/state/thehive.txt - else - echo "We experienced an issue connecting to TheHive!" - fi -} - -if [ -f /opt/so/state/thehive.txt ]; then - exit 0 -else - rm -f garbage_file - while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null - do - echo "Waiting for Elasticsearch..." - rm -f garbage_file - sleep 1 - done - rm -f garbage_file - sleep 5 - hive_init -fi diff --git a/salt/hive/thehive/etc/application.conf b/salt/thehive/etc/application.conf similarity index 99% rename from salt/hive/thehive/etc/application.conf rename to salt/thehive/etc/application.conf index 230d87d67..8630cb386 100644 --- a/salt/hive/thehive/etc/application.conf +++ b/salt/thehive/etc/application.conf @@ -12,7 +12,7 @@ search { # Name of the index index = the_hive # Name of the Elasticsearch cluster - cluster = hive + cluster = thehive # Address of the Elasticsearch instance host = ["{{ MASTERIP }}:9500"] #search.uri = "http://{{ MASTERIP }}:9500" diff --git a/salt/hive/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf similarity index 99% rename from salt/hive/thehive/etc/cortex-application.conf rename to salt/thehive/etc/cortex-application.conf index 356bfd7b3..1a887cdb3 100644 --- a/salt/hive/thehive/etc/cortex-application.conf +++ b/salt/thehive/etc/cortex-application.conf @@ -12,7 +12,7 @@ search { # Name of the index index = cortex # Name of the Elasticsearch cluster - cluster = hive + cluster = thehive # Address of the Elasticsearch instance host = ["{{ MASTERIP }}:9500"] # Scroll keepalive diff --git a/salt/hive/thehive/etc/es/elasticsearch.yml b/salt/thehive/etc/es/elasticsearch.yml similarity index 95% rename from salt/hive/thehive/etc/es/elasticsearch.yml rename to salt/thehive/etc/es/elasticsearch.yml index d00c01d5d..7f268a671 100644 --- a/salt/hive/thehive/etc/es/elasticsearch.yml +++ b/salt/thehive/etc/es/elasticsearch.yml @@ -1,4 +1,4 @@ -cluster.name: "hive" +cluster.name: "thehive" network.host: 0.0.0.0 discovery.zen.minimum_master_nodes: 1 # This is a test -- if this is here, then the volume is mounted correctly. diff --git a/salt/hive/thehive/etc/es/log4j2.properties b/salt/thehive/etc/es/log4j2.properties similarity index 100% rename from salt/hive/thehive/etc/es/log4j2.properties rename to salt/thehive/etc/es/log4j2.properties diff --git a/salt/hive/init.sls b/salt/thehive/init.sls similarity index 70% rename from salt/hive/init.sls rename to salt/thehive/init.sls index 2be2f7480..732fe4a77 100644 --- a/salt/hive/init.sls +++ b/salt/thehive/init.sls @@ -1,24 +1,24 @@ {% set MASTERIP = salt['pillar.get']('master:mainip', '') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set MASTER = salt['grains.get']('master') %} -hiveconfdir: +thehiveconfdir: file.directory: - - name: /opt/so/conf/hive/etc + - name: /opt/so/conf/thehive/etc - makedirs: True - user: 939 - group: 939 -hivelogdir: +thehivelogdir: file.directory: - - name: /opt/so/log/hive + - name: /opt/so/log/thehive - makedirs: True - user: 939 - group: 939 -hiveconf: +thehiveconf: file.recurse: - - name: /opt/so/conf/hive/etc - - source: salt://hive/thehive/etc + - name: /opt/so/conf/thehive/etc + - source: salt://thehive/etc - user: 939 - group: 939 - template: jinja @@ -40,7 +40,7 @@ cortexlogdir: cortexconf: file.recurse: - name: /opt/so/conf/cortex - - source: salt://hive/thehive/etc + - source: salt://thehive/etc - user: 939 - group: 939 - template: jinja @@ -48,9 +48,9 @@ cortexconf: # Install Elasticsearch # Made directory for ES data to live in -hiveesdata: +thehiveesdata: file.directory: - - name: /nsm/hive/esdata + - name: /nsm/thehive/esdata - makedirs: True - user: 939 - group: 939 @@ -64,16 +64,16 @@ so-thehive-es: - interactive: True - tty: True - binds: - - /nsm/hive/esdata:/usr/share/elasticsearch/data:rw - - /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro - - /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro - - /opt/so/log/hive:/var/log/elasticsearch:rw + - /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw + - /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro + - /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro + - /opt/so/log/thehive:/var/log/elasticsearch:rw - environment: - http.host=0.0.0.0 - http.port=9400 - transport.tcp.port=9500 - transport.host=0.0.0.0 - - cluster.name=hive + - cluster.name=thehive - thread_pool.index.queue_size=100000 - thread_pool.search.queue_size=100000 - thread_pool.bulk.queue_size=100000 @@ -90,13 +90,13 @@ so-cortex: - name: so-cortex - user: 939 - binds: - - /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro + - /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro - port_bindings: - 0.0.0.0:9001:9001 cortexscript: cmd.script: - - source: salt://hive/thehive/scripts/cortex_init + - source: salt://thehive/scripts/cortex_init - cwd: /opt/so - template: jinja @@ -109,12 +109,12 @@ so-thehive: - name: so-thehive - user: 939 - binds: - - /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro + - /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro - port_bindings: - 0.0.0.0:9000:9000 -hivescript: +thehivescript: cmd.script: - - source: salt://hive/thehive/scripts/hive_init + - source: salt://thehive/scripts/hive_init - cwd: /opt/so - template: jinja diff --git a/salt/hive/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init similarity index 100% rename from salt/hive/thehive/scripts/cortex_init rename to salt/thehive/scripts/cortex_init diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init new file mode 100755 index 000000000..296004e77 --- /dev/null +++ b/salt/thehive/scripts/hive_init @@ -0,0 +1,64 @@ +#!/bin/bash +{% set MASTERIP = salt['pillar.get']('static:masterip', '') %} +{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %} +{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %} +{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %} + +thehive_init(){ + sleep 120 + THEHIVE_IP="{{MASTERIP}}" + THEHIVE_USER="{{THEHIVEUSER}}" + THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}" + THEHIVE_KEY="{{THEHIVEKEY}}" + SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf" + + echo -n "Waiting for TheHive..." + COUNT=0 + THEHIVE_CONNECTED="no" + while [[ "$COUNT" -le 240 ]]; do + curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive" + if [ $? -eq 0 ]; then + THEHIVE_CONNECTED="yes" + echo "connected!" + break + else + ((COUNT+=1)) + sleep 1 + echo -n "." + fi + done + + if [ "$THEHIVE_CONNECTED" == "yes" ]; then + + # Migrate DB + curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate" + + # Create intial TheHive user + curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}" + + # Pre-load custom fields + # + # reputation + curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}" + + + touch /opt/so/state/thehive.txt + else + echo "We experienced an issue connecting to TheHive!" + fi +} + +if [ -f /opt/so/state/thehive.txt ]; then + exit 0 +else + rm -f garbage_file + while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null + do + echo "Waiting for Elasticsearch..." + rm -f garbage_file + sleep 1 + done + rm -f garbage_file + sleep 5 + thehive_init +fi From 1952246c85d2cf85dd444d39ff6e7fca1d822ed5 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Wed, 27 May 2020 13:23:37 -0400 Subject: [PATCH 55/76] [feat] Add flag to run so-allow automatically post-setup --- setup/automation/pm_standalone_defaults | 1 + setup/so-functions | 3 +++ setup/so-setup | 1 + 3 files changed, 5 insertions(+) diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults index b5a6258ff..166d4a5d7 100644 --- a/setup/automation/pm_standalone_defaults +++ b/setup/automation/pm_standalone_defaults @@ -34,6 +34,7 @@ HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNSENSOR=inherit HOSTNAME=standalone install_type=STANDALONE +IP=192.168.0.0/16 # LSINPUTBATCHCOUNT= # LSINPUTTHREADS= # LSPIPELINEBATCH= diff --git a/setup/so-functions b/setup/so-functions index f6abdb047..bedb9e204 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -997,6 +997,9 @@ parse_options() { export {https,ftp,rsync,all}_proxy="$http_proxy" ;; + "--allow-analyst"|"--allow=a") + allow='a' + ;; *) if [[ $1 = --* ]]; then echo "Invalid option" diff --git a/setup/so-setup b/setup/so-setup index 9ddb35a0a..2e8d9b85b 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -540,6 +540,7 @@ fi success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}') if [[ "$success" = 0 ]]; then whiptail_setup_complete + if [[ -n $allow ]]; then so-allow -$allow; fi if [[ $THEHIVE == 1 ]]; then check_hive_init_then_reboot else From b80eb5f73b0cb350c7a1f2f2b0adff04bde9323d Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Wed, 27 May 2020 17:43:17 +0000 Subject: [PATCH 56/76] Add Hive/ES/Cortex scripts --- salt/common/tools/sbin/so-cortex-restart | 5 +++-- salt/common/tools/sbin/so-cortex-start | 2 +- salt/common/tools/sbin/so-cortex-stop | 2 +- salt/common/tools/sbin/so-thehive-es-restart | 21 ++++++++++++++++++++ salt/common/tools/sbin/so-thehive-es-start | 20 +++++++++++++++++++ salt/common/tools/sbin/so-thehive-es-stop | 20 +++++++++++++++++++ salt/common/tools/sbin/so-thehive-restart | 2 +- salt/common/tools/sbin/so-thehive-stop | 2 +- 8 files changed, 68 insertions(+), 6 deletions(-) create mode 100755 salt/common/tools/sbin/so-thehive-es-restart create mode 100755 salt/common/tools/sbin/so-thehive-es-start create mode 100755 salt/common/tools/sbin/so-thehive-es-stop diff --git a/salt/common/tools/sbin/so-cortex-restart b/salt/common/tools/sbin/so-cortex-restart index ef0e3e4fe..841ca1bb6 100755 --- a/salt/common/tools/sbin/so-cortex-restart +++ b/salt/common/tools/sbin/so-cortex-restart @@ -1,5 +1,5 @@ #!/bin/bash - +# # Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC # # This program is free software: you can redistribute it and/or modify @@ -17,4 +17,5 @@ . /usr/sbin/so-common -/usr/sbin/so-restart cortex $1 +/usr/sbin/so-stop cortex $1 +/usr/sbin/so-start thehive $1 diff --git a/salt/common/tools/sbin/so-cortex-start b/salt/common/tools/sbin/so-cortex-start index a08969cab..92fe88bb5 100755 --- a/salt/common/tools/sbin/so-cortex-start +++ b/salt/common/tools/sbin/so-cortex-start @@ -17,4 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-start cortex $1 +/usr/sbin/so-start thehive $1 diff --git a/salt/common/tools/sbin/so-cortex-stop b/salt/common/tools/sbin/so-cortex-stop index a13d1e2e3..727b2c7fa 100755 --- a/salt/common/tools/sbin/so-cortex-stop +++ b/salt/common/tools/sbin/so-cortex-stop @@ -1,5 +1,5 @@ #!/bin/bash - +# # Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC # # This program is free software: you can redistribute it and/or modify diff --git a/salt/common/tools/sbin/so-thehive-es-restart b/salt/common/tools/sbin/so-thehive-es-restart new file mode 100755 index 000000000..d58caecdc --- /dev/null +++ b/salt/common/tools/sbin/so-thehive-es-restart @@ -0,0 +1,21 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /usr/sbin/so-common + +/usr/sbin/so-stop thehive-es $1 +/usr/sbin/so-start thehive $1 diff --git a/salt/common/tools/sbin/so-thehive-es-start b/salt/common/tools/sbin/so-thehive-es-start new file mode 100755 index 000000000..92fe88bb5 --- /dev/null +++ b/salt/common/tools/sbin/so-thehive-es-start @@ -0,0 +1,20 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /usr/sbin/so-common + +/usr/sbin/so-start thehive $1 diff --git a/salt/common/tools/sbin/so-thehive-es-stop b/salt/common/tools/sbin/so-thehive-es-stop new file mode 100755 index 000000000..cf9cc2310 --- /dev/null +++ b/salt/common/tools/sbin/so-thehive-es-stop @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /usr/sbin/so-common + +/usr/sbin/so-stop thehive-es $1 diff --git a/salt/common/tools/sbin/so-thehive-restart b/salt/common/tools/sbin/so-thehive-restart index 08cd8318e..4b28c0030 100755 --- a/salt/common/tools/sbin/so-thehive-restart +++ b/salt/common/tools/sbin/so-thehive-restart @@ -1,5 +1,5 @@ #!/bin/bash - +# # Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC # # This program is free software: you can redistribute it and/or modify diff --git a/salt/common/tools/sbin/so-thehive-stop b/salt/common/tools/sbin/so-thehive-stop index b326f699c..6c56e0473 100755 --- a/salt/common/tools/sbin/so-thehive-stop +++ b/salt/common/tools/sbin/so-thehive-stop @@ -1,5 +1,5 @@ #!/bin/bash - +# # Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC # # This program is free software: you can redistribute it and/or modify From 769f5674ebd86231316d6aca5090e05df2a18b65 Mon Sep 17 00:00:00 2001 From: William Wernert Date: Wed, 27 May 2020 14:08:27 -0400 Subject: [PATCH 57/76] [fix] Always call set_default_log_size --- setup/so-common-functions | 28 ---------------------------- setup/so-functions | 27 +++++++++++++++++++++++++++ setup/so-setup | 7 ++++++- setup/so-whiptail | 1 - 4 files changed, 33 insertions(+), 30 deletions(-) diff --git a/setup/so-common-functions b/setup/so-common-functions index 15cb3e686..fc380f85b 100644 --- a/setup/so-common-functions +++ b/setup/so-common-functions @@ -38,31 +38,3 @@ calculate_useable_cores() { if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi export lb_procs } - -set_defaul_log_size() { - local percentage - - case $INSTALLTYPE in - EVAL | HEAVYNODE) - percentage=50 - ;; - *) - percentage=80 - ;; - esac - - local disk_dir="/" - if [ -d /nsm ]; then - disk_dir="/nsm" - fi - local disk_size_1k - disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}') - - local ratio="1048576" - - local disk_size_gb - disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' ) - - log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}') - export log_size_limit -} diff --git a/setup/so-functions b/setup/so-functions index bedb9e204..61bb256ce 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1336,6 +1336,33 @@ sensor_pillar() { cat "$pillar_file" >> "$setup_log" 2>&1 } +set_default_log_size() { + local percentage + + case $INSTALLTYPE in + EVAL | HEAVYNODE) + percentage=50 + ;; + *) + percentage=80 + ;; + esac + + local disk_dir="/" + if [ -d /nsm ]; then + disk_dir="/nsm" + fi + local disk_size_1k + disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}') + + local ratio="1048576" + + local disk_size_gb + disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' ) + + log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}') +} + set_hostname() { set_hostname_iso diff --git a/setup/so-setup b/setup/so-setup index 2e8d9b85b..f3972b299 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -177,17 +177,21 @@ echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1 minion_type=$(get_minion_type) -# Set any constants needed +# Set any variables needed +set_default_log_size >> $setup_log 2>&1 + if [[ $is_helix ]]; then RULESETUP=ETOPEN NSMSETUP=BASIC HNSENSOR=inherit MASTERUPDATES=0 fi + if [[ $is_helix || ( $is_master && $is_node ) ]]; then RULESETUP=ETOPEN NSMSETUP=BASIC fi + if [[ $is_master && $is_node ]]; then LSPIPELINEWORKERS=1 LSPIPELINEBATCH=125 @@ -196,6 +200,7 @@ if [[ $is_master && $is_node ]]; then NIDS=Suricata BROVERSION=ZEEK fi + if [[ $is_node ]]; then CURCLOSEDAYS=30 fi diff --git a/setup/so-whiptail b/setup/so-whiptail index 31a0c05ec..9ba4ebc20 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -413,7 +413,6 @@ whiptail_log_size_limit() { [ -n "$TESTING" ] && return - set_defaul_log_size log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \ "Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \ From 6a935b5452f6fdb8652407f576ee9d59294af9bc Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 27 May 2020 15:43:41 -0400 Subject: [PATCH 58/76] Hive to TheHive --- salt/top.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/top.sls b/salt/top.sls index 95acae1fd..4d60b01c0 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -100,7 +100,7 @@ base: - schedule - soctopus {%- if THEHIVE != 0 %} - - hive + - thehive {%- endif %} {%- if PLAYBOOK != 0 %} - playbook @@ -149,7 +149,7 @@ base: {%- endif %} - soctopus {%- if THEHIVE != 0 %} - - hive + - thehive {%- endif %} {%- if PLAYBOOK != 0 %} - playbook @@ -203,7 +203,7 @@ base: - schedule - soctopus {%- if THEHIVE != 0 %} - - hive + - thehive {%- endif %} {%- if PLAYBOOK != 0 %} - playbook @@ -318,7 +318,7 @@ base: {%- endif %} - soctopus {%- if THEHIVE != 0 %} - - hive + - thehive {%- endif %} {%- if PLAYBOOK != 0 %} - playbook From 3712eb0acb4e3cd786a09a18ff59f9166225860c Mon Sep 17 00:00:00 2001 From: William Wernert Date: Wed, 27 May 2020 15:49:41 -0400 Subject: [PATCH 59/76] [fix] Redirect so-allow output to log --- setup/so-functions | 2 +- setup/so-setup | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 61bb256ce..b86665a1f 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -998,7 +998,7 @@ parse_options() { export {https,ftp,rsync,all}_proxy="$http_proxy" ;; "--allow-analyst"|"--allow=a") - allow='a' + export allow='a' ;; *) if [[ $1 = --* ]]; then diff --git a/setup/so-setup b/setup/so-setup index f3972b299..0d1123d10 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -545,7 +545,9 @@ fi success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}') if [[ "$success" = 0 ]]; then whiptail_setup_complete - if [[ -n $allow ]]; then so-allow -$allow; fi + if [[ -n $allow ]]; then + so-allow -$allow >> $setup_log 2>&1 + fi if [[ $THEHIVE == 1 ]]; then check_hive_init_then_reboot else From 1e5d5397a409c9bcd114c7dcec49c055c4ecb2e8 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 May 2020 19:42:48 -0400 Subject: [PATCH 60/76] Support multiple command line options for setup, along with dynamic values per option --- setup/so-functions | 53 ---------------------------------------------- setup/so-setup | 29 +++++++++++++++++-------- 2 files changed, 20 insertions(+), 62 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index b86665a1f..6707e6841 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -954,59 +954,6 @@ node_pillar() { cat "$pillar_file" >> "$setup_log" 2>&1 } -parse_options() { - case "$1" in - --turbo=*) - local proxy - proxy=$(echo "$1" | tr -d '"' | awk -F'--turbo=' '{print $2}') - proxy_url="http://$proxy" - TURBO="$proxy_url" - ;; - --proxy=*) - local proxy - proxy=$(echo "$1" | tr -d '"' | awk -F'--proxy=' '{print $2}') - - local proxy_protocol - proxy_protocol=$(echo "$proxy" | awk 'match($0, /http|https/) { print substr($0, RSTART, RLENGTH) }') - - if [[ ! $proxy_protocol =~ ^(http|https)$ ]]; then - echo "Invalid proxy protocol" - echo "Ignoring proxy" - return - fi - - if [[ $2 == --proxy-user=* && $3 == --proxy-pass=* ]]; then - local proxy_user - local proxy_password - proxy_user=$(echo "$2" | tr -d '"' | awk -F'--proxy-user=' '{print $2}') - proxy_password=$(echo "$3" | tr -d '"' | awk -F'--proxy-pass=' '{print $2}') - - local proxy_addr - proxy_addr=$(echo "$proxy" | awk -F'http\:\/\/|https\:\/\/' '{print $2}') - - export http_proxy="${proxy_protocol}://${proxy_user}:${proxy_password}@${proxy_addr}" - - elif [[ (-z $2 || -z $3) && (-n $2 || -n $3) || ( -n $2 && -n $3 && ($2 != --proxy-user=* || $3 != --proxy-pass=*) ) ]]; then - echo "Invalid options passed for proxy. Order is --proxy-user= --proxy-pass=" - echo "Ignoring proxy" - return - - else - export http_proxy="$proxy" - fi - - export {https,ftp,rsync,all}_proxy="$http_proxy" - ;; - "--allow-analyst"|"--allow=a") - export allow='a' - ;; - *) - if [[ $1 = --* ]]; then - echo "Invalid option" - fi - esac -} - patch_pillar() { local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls diff --git a/setup/so-setup b/setup/so-setup index 0d1123d10..6b002a157 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -21,15 +21,31 @@ source ./so-common-functions source ./so-whiptail source ./so-variables +# Parse command line arguments setup_type=$1 -export setup_type - automation=$2 -automated=no +while [[ $# -gt 0 ]]; do + arg="$1" + shift + case "$arg" in + "--turbo="* ) + export TURBO="http://${arg#*=}";; + "--proxy="* ) + export {http,https,ftp,rsync,all}_proxy="${arg#*=}";; + "--allow="* ) + export allow="${arg#*=}";; + * ) + if [[ "$arg" == "--"* ]]; then + echo "Invalid option" + fi + esac +done +# Begin Installation pre-processing echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1 +automated=no function progress() { if [ $automated == no ]; then whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0 @@ -43,7 +59,7 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th source automation/$automation automated=yes - echo "Checking network configuration" >> $setup_log 2>&1g + echo "Checking network configuration" >> $setup_log 2>&1 ip a >> $setup_log 2>&1 attempt=1 @@ -78,11 +94,6 @@ export PATH=$PATH:../salt/common/tools/sbin got_root -if [[ $# -gt 1 ]]; then - set -- "${@:2}" - parse_options "$@" >> $setup_log 2>&1 -fi - detect_os if [ "$OS" == ubuntu ]; then From a75301cd0e654c3a7cc97e1e2116d1451335c65e Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 May 2020 22:00:58 -0400 Subject: [PATCH 61/76] Ensure IP is available to child process executing so-allow --- setup/automation/pm_standalone_defaults | 3 ++- setup/so-setup | 11 +++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults index 166d4a5d7..b47f5bf2d 100644 --- a/setup/automation/pm_standalone_defaults +++ b/setup/automation/pm_standalone_defaults @@ -21,6 +21,8 @@ address_type=DHCP ADMINUSER=onionuser ADMINPASS1=onionuser ADMINPASS2=onionuser +ALLOW_CIDR=192.168.0.0/16 +ALLOW_ROLE=a BASICBRO=7 BASICSURI=7 # BLOGS= @@ -34,7 +36,6 @@ HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNSENSOR=inherit HOSTNAME=standalone install_type=STANDALONE -IP=192.168.0.0/16 # LSINPUTBATCHCOUNT= # LSINPUTTHREADS= # LSPIPELINEBATCH= diff --git a/setup/so-setup b/setup/so-setup index 6b002a157..0d0022feb 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -33,8 +33,10 @@ while [[ $# -gt 0 ]]; do export TURBO="http://${arg#*=}";; "--proxy="* ) export {http,https,ftp,rsync,all}_proxy="${arg#*=}";; - "--allow="* ) - export allow="${arg#*=}";; + "--allow-role="* ) + export ALLOW_ROLE="${arg#*=}";; + "--allow-cidr="* ) + export ALLOW_CIDR="${arg#*=}";; * ) if [[ "$arg" == "--"* ]]; then echo "Invalid option" @@ -556,8 +558,9 @@ fi success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}') if [[ "$success" = 0 ]]; then whiptail_setup_complete - if [[ -n $allow ]]; then - so-allow -$allow >> $setup_log 2>&1 + if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then + export IP=$ALLOW_CIDR + so-allow -$ALLOW_ROLE >> $setup_log 2>&1 fi if [[ $THEHIVE == 1 ]]; then check_hive_init_then_reboot From 2a21d7403f28cfd378faa32e1f1ee4f8135d8af1 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 28 May 2020 00:47:46 -0400 Subject: [PATCH 62/76] Open firewall from all networks for automated testing --- setup/automation/pm_standalone_defaults | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults index b47f5bf2d..74ba8323f 100644 --- a/setup/automation/pm_standalone_defaults +++ b/setup/automation/pm_standalone_defaults @@ -21,7 +21,7 @@ address_type=DHCP ADMINUSER=onionuser ADMINPASS1=onionuser ADMINPASS2=onionuser -ALLOW_CIDR=192.168.0.0/16 +ALLOW_CIDR=0.0.0.0/0 ALLOW_ROLE=a BASICBRO=7 BASICSURI=7 From 60cc3e96756c1ec3a76e16761c76ba471d1945a0 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 28 May 2020 07:50:52 -0400 Subject: [PATCH 63/76] remove address from DHCP leases query --- salt/soc/files/soc/soc.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 76770e2bd..6a48d026a 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -99,7 +99,7 @@ { "name": "Connections", "description": "Connections grouped by destination country", "query": "event.module:zeek AND event.dataset:conn | groupby destination.geo.country_name"}, { "name": "Connections", "description": "Connections grouped by source country", "query": "event.module:zeek AND event.dataset:conn | groupby source.geo.country_name"}, { "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.module:zeek AND event.dataset:dce_rpc | groupby dce_rpc.operation"}, - { "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain dhcp.requested_address"}, + { "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain"}, { "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.module:zeek AND event.dataset:dhcp | groupby dhcp.message_types"}, { "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.module:zeek AND event.dataset:dnp3 | groupby dnp3.fc_reply"}, { "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.name destination.port"}, From f3efafc9ca05bf7123130ad00ca3b37783c7fb8a Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 28 May 2020 08:01:33 -0400 Subject: [PATCH 64/76] combine two notice queries into one query with multiple groupby --- salt/soc/files/soc/soc.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 6a48d026a..e98ee7bf7 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -122,8 +122,7 @@ { "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.module:zeek AND event.dataset:kerberos | groupby kerberos.service"}, { "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.module:zeek AND event.dataset:modbus | groupby modbus.function"}, { "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.module:zeek AND event.dataset:mysql | groupby mysql.command"}, - { "name": "NOTICE", "description": "Zeek notice logs grouped by note", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note"}, - { "name": "NOTICE", "description": "Zeek notice logs grouped by message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.message"}, + { "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note notice.message"}, { "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.module:zeek AND event.dataset:ntlm | groupby ntlm.server.dns.name"}, { "name": "PE", "description": "PE files list", "query": "event.module:zeek AND event.dataset:pe | groupby file.machine file.os file.subsystem"}, { "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.module:zeek AND event.dataset:radius | groupby user.name.keyword"}, From b9bdca509ebd6085ec3208855afb14276a124599 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Thu, 28 May 2020 12:33:41 +0000 Subject: [PATCH 65/76] update Filebeat config for syslog --- salt/filebeat/etc/filebeat.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml index 1c4bee013..be04effb0 100644 --- a/salt/filebeat/etc/filebeat.yml +++ b/salt/filebeat/etc/filebeat.yml @@ -75,6 +75,19 @@ filebeat.modules: filebeat.inputs: #------------------------------ Log prospector -------------------------------- {%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %} + - type: syslog + enabled: true + protocol.udp: + host: "0.0.0.0:514" + fields: + module: syslog + dataset: syslog + pipeline: "syslog" + index: "so-syslog-%{+yyyy.MM.dd}" + processors: + - drop_fields: + fields: ["source", "prospector", "input", "offset", "beat"] + {%- if BROVER != 'SURICATA' %} {%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %} - type: log From 5afc05feb2ef3c8ac2ba387c7de43fc2b0871ca8 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Thu, 28 May 2020 12:35:22 +0000 Subject: [PATCH 66/76] Update FB init for syslog --- salt/filebeat/init.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls index e5dc78d33..8540faeb6 100644 --- a/salt/filebeat/init.sls +++ b/salt/filebeat/init.sls @@ -64,5 +64,7 @@ so-filebeat: - /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro - /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro - /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro + - port_bindings: + - 0.0.0.0:514:514/udp - watch: - file: /opt/so/conf/filebeat/etc/filebeat.yml From d2b93d531e0df7a325fa9a8b04f653e9695f1855 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Thu, 28 May 2020 12:36:29 +0000 Subject: [PATCH 67/76] Basic syslog config --- salt/elasticsearch/files/ingest/syslog | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 salt/elasticsearch/files/ingest/syslog diff --git a/salt/elasticsearch/files/ingest/syslog b/salt/elasticsearch/files/ingest/syslog new file mode 100644 index 000000000..d34e79d4a --- /dev/null +++ b/salt/elasticsearch/files/ingest/syslog @@ -0,0 +1,13 @@ +{ + "description" : "syslog", + "processors" : [ + { + "dissect": { + "field": "message", + "pattern" : "%{message}", + "on_failure": [ { "drop" : { } } ] + } + }, + { "pipeline": { "name": "common" } } + ] +} From b7d7747f65994d8bef637a7f29008efb2c743fd6 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Thu, 28 May 2020 13:56:02 +0000 Subject: [PATCH 68/76] allow syslog --- salt/firewall/init.sls | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/salt/firewall/init.sls b/salt/firewall/init.sls index b6c928eba..c2ddaf5c2 100644 --- a/salt/firewall/init.sls +++ b/salt/firewall/init.sls @@ -136,6 +136,18 @@ enable_wazuh_manager_1514_udp_{{ip}}: - position: 1 - save: True +# Allow syslog +enable_syslog_514_{{ip}}: + iptables.insert: + - table: filter + - chain: DOCKER-USER + - jump: ACCEPT + - proto: tcp + - source: {{ ip }} + - dport: 514 + - position: 1 + - save: True + # Rules if you are a Master {% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %} #This should be more granular From 6c4946f4e27152b5f65701d651f657a75aeda2e5 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 28 May 2020 10:20:39 -0400 Subject: [PATCH 69/76] Provide option to skip reboot after setup completes --- setup/automation/pm_standalone_defaults | 1 + setup/so-functions | 3 +-- setup/so-setup | 9 ++++++--- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults index 74ba8323f..ae4554a3f 100644 --- a/setup/automation/pm_standalone_defaults +++ b/setup/automation/pm_standalone_defaults @@ -67,6 +67,7 @@ PLAYBOOK=1 REDIRECTINFO=IP RULESETUP=ETOPEN # SHARDCOUNT= +SKIP_REBOOT=1 SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 diff --git a/setup/so-functions b/setup/so-functions index 6707e6841..52aee37d6 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -193,7 +193,7 @@ check_admin_pass() { check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH" } -check_hive_init_then_reboot() { +check_hive_init() { wait_for_file /opt/so/state/thehive.txt 20 5 local return_val=$? @@ -203,7 +203,6 @@ check_hive_init_then_reboot() { docker stop so-thehive docker rm so-thehive - shutdown -r now } check_network_manager_conf() { diff --git a/setup/so-setup b/setup/so-setup index 0d0022feb..24079bb94 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -37,6 +37,8 @@ while [[ $# -gt 0 ]]; do export ALLOW_ROLE="${arg#*=}";; "--allow-cidr="* ) export ALLOW_CIDR="${arg#*=}";; + "--skip-reboot" ) + export SKIP_REBOOT=1 * ) if [[ "$arg" == "--"* ]]; then echo "Invalid option" @@ -563,11 +565,12 @@ if [[ "$success" = 0 ]]; then so-allow -$ALLOW_ROLE >> $setup_log 2>&1 fi if [[ $THEHIVE == 1 ]]; then - check_hive_init_then_reboot - else - shutdown -r now + check_hive_init fi else whiptail_setup_failed +fi + +if [[ -z $SKIP_REBOOT ]]; then shutdown -r now fi From d2263db0ff2bba2f845bee7477b3c73fa794c5f7 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 28 May 2020 12:11:08 -0400 Subject: [PATCH 70/76] Update init.sls --- salt/filebeat/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls index 8540faeb6..897bb3937 100644 --- a/salt/filebeat/init.sls +++ b/salt/filebeat/init.sls @@ -64,7 +64,7 @@ so-filebeat: - /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro - /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro - /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro - - port_bindings: + - port_bindings: - 0.0.0.0:514:514/udp - watch: - file: /opt/so/conf/filebeat/etc/filebeat.yml From 869bfb947deb0409296809c1975e33628e671456 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Thu, 28 May 2020 16:45:48 +0000 Subject: [PATCH 71/76] add master to SOCtopus hosts file --- salt/soctopus/init.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls index 330e727f0..ff30c3c1a 100644 --- a/salt/soctopus/init.sls +++ b/salt/soctopus/init.sls @@ -1,5 +1,7 @@ {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set MASTER = salt['grains.get']('master') %} +{%- set MASTER_URL = salt['pillar.get']('master:url_base', '') %} +{%- set MASTER_IP = salt['pillar.get']('static:masterip', '') %} soctopusdir: file.directory: @@ -69,3 +71,5 @@ so-soctopus: - /opt/so/conf/navigator/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw - port_bindings: - 0.0.0.0:7000:7000 + - extra_hosts: + - {{MASTER_URL}}:{{MASTER_IP}} From 12f426d4f4bb80f58cc82fd7bb3a9766102f24d0 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 28 May 2020 12:59:41 -0400 Subject: [PATCH 72/76] Move eve.json to /nsm --- salt/suricata/files/suricata.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml index d896167be..c87c75447 100644 --- a/salt/suricata/files/suricata.yaml +++ b/salt/suricata/files/suricata.yaml @@ -95,7 +95,7 @@ outputs: - eve-log: enabled: yes filetype: regular #regular|syslog|unix_dgram|unix_stream|redis - filename: eve.json + filename: /nsm/eve.json rotate-interval: day #prefix: "@cee: " # prefix to prepend to each log entry @@ -1880,4 +1880,4 @@ reference-config-file: /etc/suricata/reference.config # Includes. Files included here will be handled as if they were # inlined in this configuration file. #include: include1.yaml -#include: include2.yaml \ No newline at end of file +#include: include2.yaml From b835c2e27ecb076b4d5498295c4b923be7b0e236 Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 28 May 2020 13:17:31 -0400 Subject: [PATCH 73/76] Update for exact match (ex. thehive, thehive-es, thehive-cortex) --- salt/common/tools/sbin/so-start | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-start b/salt/common/tools/sbin/so-start index a198377a1..690950373 100755 --- a/salt/common/tools/sbin/so-start +++ b/salt/common/tools/sbin/so-start @@ -32,5 +32,5 @@ fi case $1 in "all") salt-call state.highstate queue=True;; "steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;; - *) if docker ps | grep -q so-$1; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;; + *) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;; esac From 7f750506820f0893558b8707d49f9b26b3fec5d6 Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Thu, 28 May 2020 17:54:15 +0000 Subject: [PATCH 74/76] Add basic Zeek stats script --- salt/common/tools/sbin/so-zeek-stats | 39 ++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 salt/common/tools/sbin/so-zeek-stats diff --git a/salt/common/tools/sbin/so-zeek-stats b/salt/common/tools/sbin/so-zeek-stats new file mode 100644 index 000000000..656da7f04 --- /dev/null +++ b/salt/common/tools/sbin/so-zeek-stats @@ -0,0 +1,39 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Show Zeek stats (capstats, netstats) + +show_stats() { + echo '##############' + echo '# Zeek Stats #' + echo '##############' + echo + echo "Average throughput:" + echo + docker exec -it so-zeek /opt/zeek/bin/zeekctl capstats + echo + echo "Average packet loss:" + echo + docker exec -it so-zeek /opt/zeek/bin/zeekctl netstats + echo +} + +if docker ps | grep -q zeek; then + show_stats +else + echo "Zeek is not running! Try starting it with 'so-zeek-start'." && exit 1; +fi From 3952faba85bb091cd8a0e6924b3cbd92f17499a3 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 28 May 2020 15:27:14 -0400 Subject: [PATCH 75/76] Add missing semi-colons to break out of the case block --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 24079bb94..566767e82 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -38,7 +38,7 @@ while [[ $# -gt 0 ]]; do "--allow-cidr="* ) export ALLOW_CIDR="${arg#*=}";; "--skip-reboot" ) - export SKIP_REBOOT=1 + export SKIP_REBOOT=1;; * ) if [[ "$arg" == "--"* ]]; then echo "Invalid option" From 4059121dd66354ea32f108f5754b906d0aa4391e Mon Sep 17 00:00:00 2001 From: Wes Lambert Date: Fri, 29 May 2020 11:55:18 +0000 Subject: [PATCH 76/76] fix framed_addr field --- salt/elasticsearch/files/ingest/zeek.radius | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/zeek.radius b/salt/elasticsearch/files/ingest/zeek.radius index c74330690..715f41478 100644 --- a/salt/elasticsearch/files/ingest/zeek.radius +++ b/salt/elasticsearch/files/ingest/zeek.radius @@ -5,7 +5,7 @@ { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, { "rename": { "field": "message2.username", "target_field": "user.name", "ignore_missing": true } }, { "rename": { "field": "message2.mac", "target_field": "host.mac", "ignore_missing": true } }, - { "rename": { "field": "message2.framed_addr", "target_field": "framed_addr", "ignore_missing": true } }, + { "rename": { "field": "message2.framed_addr", "target_field": "radius.framed_address", "ignore_missing": true } }, { "rename": { "field": "message2.remote_ip", "target_field": "destination.ip", "ignore_missing": true } }, { "rename": { "field": "message2.connect_info", "target_field": "radius.connect_info", "ignore_missing": true } }, { "rename": { "field": "message2.reply_msg", "target_field": "radius.reply_message", "ignore_missing": true } },