diff --git a/salt/common/init.sls b/salt/common/init.sls
index 82ac4a062..09d71114b 100644
--- a/salt/common/init.sls
+++ b/salt/common/init.sls
@@ -28,20 +28,83 @@ salttmp:
- group: 939
- makedirs: True
-# Install packages needed for the sensor
-sensorpkgs:
+# Install epel
+{% if grains['os'] == 'CentOS' %}
+epel:
pkg.installed:
- - skip_suggestions: False
+ - skip_suggestions: True
+ - pkgs:
+ - epel-release
+{% endif %}
+
+# Install common packages
+{% if grains['os'] != 'CentOS' %}
+commonpkgs:
+ pkg.installed:
+ - skip_suggestions: True
+ - pkgs:
+ - apache2-utils
+ - wget
+ - ntpdate
+ - jq
+ - python3-docker
+ - docker-ce
+ - curl
+ - ca-certificates
+ - software-properties-common
+ - apt-transport-https
+ - openssl
+ - netcat
+ - python3-mysqldb
+ - sqlite3
+ - argon2
+ - libssl-dev
+ - python3-dateutil
+ - python3-m2crypto
+ - python3-mysqldb
+heldpackages:
+ pkg.installed:
+ - pkgs:
+ - containerd.io: 1.2.13-2
+ - docker-ce: 5:19.03.9~3-0~ubuntu-bionic
+ - hold: True
+ - update_holds: True
+
+{% else %}
+commonpkgs:
+ pkg.installed:
+ - skip_suggestions: True
- pkgs:
- wget
+ - ntpdate
+ - bind-utils
- jq
- {% if grains['os'] != 'CentOS' %}
- - apache2-utils
- {% else %}
- - net-tools
- tcpdump
- httpd-tools
- {% endif %}
+ - net-tools
+ - curl
+ - sqlite
+ - argon2
+ - mariadb-devel
+ - nmap-ncat
+ - python3
+ - python36-docker
+ - python36-dateutil
+ - python36-m2crypto
+ - python36-mysql
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - openssl
+
+heldpackages:
+ pkg.installed:
+ - pkgs:
+ - containerd.io: 1.2.13-3.2.el7
+ - docker-ce: 3:19.03.9-3.el7
+ - hold: True
+ - update_holds: True
+{% endif %}
# Always keep these packages up to date
diff --git a/salt/common/tools/sbin/so-cortex-restart b/salt/common/tools/sbin/so-cortex-restart
index ef0e3e4fe..841ca1bb6 100755
--- a/salt/common/tools/sbin/so-cortex-restart
+++ b/salt/common/tools/sbin/so-cortex-restart
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
@@ -17,4 +17,5 @@
. /usr/sbin/so-common
-/usr/sbin/so-restart cortex $1
+/usr/sbin/so-stop cortex $1
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-cortex-start b/salt/common/tools/sbin/so-cortex-start
index a08969cab..92fe88bb5 100755
--- a/salt/common/tools/sbin/so-cortex-start
+++ b/salt/common/tools/sbin/so-cortex-start
@@ -17,4 +17,4 @@
. /usr/sbin/so-common
-/usr/sbin/so-start cortex $1
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-cortex-stop b/salt/common/tools/sbin/so-cortex-stop
index a13d1e2e3..727b2c7fa 100755
--- a/salt/common/tools/sbin/so-cortex-stop
+++ b/salt/common/tools/sbin/so-cortex-stop
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
diff --git a/salt/common/tools/sbin/so-elastalert-create b/salt/common/tools/sbin/so-elastalert-create
index fbe9527a7..0270503bf 100755
--- a/salt/common/tools/sbin/so-elastalert-create
+++ b/salt/common/tools/sbin/so-elastalert-create
@@ -166,8 +166,7 @@ cat << EOF
What elasticsearch index do you want to use?
Below are the default Index Patterns used in Security Onion:
-*:logstash-*
-*:logstash-beats-*
+*:so-ids-*
*:elastalert_status*
EOF
diff --git a/salt/common/tools/sbin/so-start b/salt/common/tools/sbin/so-start
index a198377a1..690950373 100755
--- a/salt/common/tools/sbin/so-start
+++ b/salt/common/tools/sbin/so-start
@@ -32,5 +32,5 @@ fi
case $1 in
"all") salt-call state.highstate queue=True;;
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
- *) if docker ps | grep -q so-$1; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
+ *) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
esac
diff --git a/salt/common/tools/sbin/so-thehive-es-restart b/salt/common/tools/sbin/so-thehive-es-restart
new file mode 100755
index 000000000..d58caecdc
--- /dev/null
+++ b/salt/common/tools/sbin/so-thehive-es-restart
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+/usr/sbin/so-stop thehive-es $1
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-thehive-es-start b/salt/common/tools/sbin/so-thehive-es-start
new file mode 100755
index 000000000..92fe88bb5
--- /dev/null
+++ b/salt/common/tools/sbin/so-thehive-es-start
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+/usr/sbin/so-start thehive $1
diff --git a/salt/common/tools/sbin/so-thehive-es-stop b/salt/common/tools/sbin/so-thehive-es-stop
new file mode 100755
index 000000000..cf9cc2310
--- /dev/null
+++ b/salt/common/tools/sbin/so-thehive-es-stop
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+
+/usr/sbin/so-stop thehive-es $1
diff --git a/salt/common/tools/sbin/so-thehive-restart b/salt/common/tools/sbin/so-thehive-restart
index 08cd8318e..4b28c0030 100755
--- a/salt/common/tools/sbin/so-thehive-restart
+++ b/salt/common/tools/sbin/so-thehive-restart
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
diff --git a/salt/common/tools/sbin/so-thehive-stop b/salt/common/tools/sbin/so-thehive-stop
index b326f699c..6c56e0473 100755
--- a/salt/common/tools/sbin/so-thehive-stop
+++ b/salt/common/tools/sbin/so-thehive-stop
@@ -1,5 +1,5 @@
#!/bin/bash
-
+#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
diff --git a/salt/common/tools/sbin/so-zeek-stats b/salt/common/tools/sbin/so-zeek-stats
new file mode 100644
index 000000000..656da7f04
--- /dev/null
+++ b/salt/common/tools/sbin/so-zeek-stats
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+# Show Zeek stats (capstats, netstats)
+
+show_stats() {
+ echo '##############'
+ echo '# Zeek Stats #'
+ echo '##############'
+ echo
+ echo "Average throughput:"
+ echo
+ docker exec -it so-zeek /opt/zeek/bin/zeekctl capstats
+ echo
+ echo "Average packet loss:"
+ echo
+ docker exec -it so-zeek /opt/zeek/bin/zeekctl netstats
+ echo
+}
+
+if docker ps | grep -q zeek; then
+ show_stats
+else
+ echo "Zeek is not running! Try starting it with 'so-zeek-start'." && exit 1;
+fi
diff --git a/salt/curator/files/action/close.yml b/salt/curator/files/action/close.yml
index dfe5519e8..a65e9af3d 100644
--- a/salt/curator/files/action/close.yml
+++ b/salt/curator/files/action/close.yml
@@ -24,9 +24,8 @@ actions:
disable_action: False
filters:
- filtertype: pattern
- kind: prefix
- value: logstash-
- exclude:
+ kind: regex
+ value: '^(logstash-.*|so-.*)$'
- filtertype: age
source: name
direction: older
diff --git a/salt/curator/files/action/delete.yml b/salt/curator/files/action/delete.yml
index e6f2f3833..030bbbfac 100644
--- a/salt/curator/files/action/delete.yml
+++ b/salt/curator/files/action/delete.yml
@@ -20,8 +20,8 @@ actions:
disable_action: False
filters:
- filtertype: pattern
- kind: prefix
- value: logstash-
+ kind: regex
+ value: '^(logstash-.*|so-.*)$'
- filtertype: space
source: creation_date
use_age: True
diff --git a/salt/curator/files/bin/so-curator-closed-delete-delete b/salt/curator/files/bin/so-curator-closed-delete-delete
index b0ec62424..3d397defc 100755
--- a/salt/curator/files/bin/so-curator-closed-delete-delete
+++ b/salt/curator/files/bin/so-curator-closed-delete-delete
@@ -33,17 +33,17 @@ LOG="/opt/so/log/curator/so-curator-closed-delete.log"
# Check for 2 conditions:
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
-# 2. Are there any closed logstash- indices that we can delete?
+# 2. Are there any closed logstash-, or so- indices that we can delete?
# If both conditions are true, keep on looping until one of the conditions is false.
while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
-curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" > /dev/null; do
+curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" > /dev/null; do
# We need to determine OLDEST_INDEX.
- # First, get the list of closed indices that are prefixed with "logstash-".
+ # First, get the list of closed indices that are prefixed with "logstash-" or "so-".
# For example: logstash-ids-YYYY.MM.DD
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
# Finally, select the first entry in that sorted list.
- OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" | awk '{print $2}' | sort -t- -k3 | head -1)
+ OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E "^ close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
diff --git a/salt/elastalert/files/rules/so/nids2hive.yaml b/salt/elastalert/files/rules/so/nids2hive.yaml
index 68a173fcd..097511d56 100644
--- a/salt/elastalert/files/rules/so/nids2hive.yaml
+++ b/salt/elastalert/files/rules/so/nids2hive.yaml
@@ -40,7 +40,7 @@ hive_alert_config:
title: '{match[rule][name]}'
type: 'NIDS'
source: 'SecurityOnion'
- description: "`Hunting Pivot:` \n\n \n\n `Kibana Dashboard:` \n\n \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
+ description: "`Hunting Pivot:` \n\n \n\n `Kibana Dashboard - Signature Drilldown:` \n\n \n\n `Kibana Dashboard - Community_ID:` \n\n \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
severity: 2
tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}']
tlp: 3
diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml
index 73f3c9239..271ef40cf 100644
--- a/salt/elasticsearch/files/elasticsearch.yml
+++ b/salt/elasticsearch/files/elasticsearch.yml
@@ -22,3 +22,7 @@ transport.bind_host: 0.0.0.0
transport.publish_host: {{ nodeip }}
transport.publish_port: 9300
{%- endif %}
+cluster.routing.allocation.disk.threshold_enabled: true
+cluster.routing.allocation.disk.watermark.low: 95%
+cluster.routing.allocation.disk.watermark.high: 98%
+cluster.routing.allocation.disk.watermark.flood_stage: 98%
diff --git a/salt/elasticsearch/files/ingest/common b/salt/elasticsearch/files/ingest/common
index e70d5e2d8..a65742f99 100644
--- a/salt/elasticsearch/files/ingest/common
+++ b/salt/elasticsearch/files/ingest/common
@@ -38,7 +38,7 @@
{ "rename": { "field": "module", "target_field": "event.module", "ignore_missing": true } },
{ "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_missing": true } },
{ "rename": { "field": "category", "target_field": "event.category", "ignore_missing": true } },
- { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
+ { "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } },
{
"remove": {
"field": [ "index_name_prefix", "message2", "type" ],
diff --git a/salt/elasticsearch/files/ingest/osquery.query_result b/salt/elasticsearch/files/ingest/osquery.query_result
index e9cdbe2d3..5b37655f9 100644
--- a/salt/elasticsearch/files/ingest/osquery.query_result
+++ b/salt/elasticsearch/files/ingest/osquery.query_result
@@ -31,7 +31,7 @@
{ "rename": { "field": "message3.columns.remote_port", "target_field": "remote.port", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.process_name", "target_field": "process.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.eventid", "target_field": "event.code", "ignore_missing": true } },
- { "set": { "if": "ctx.message3.columns.?data != null", "field": "dataset", "value": "wel-{{message3.columns.source}}", "override": true } },
+ { "set": { "if": "ctx.message3.columns?.data != null", "field": "dataset", "value": "wel-{{message3.columns.source}}", "override": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.winlog.EventData.destinationIp", "target_field": "destination.ip", "ignore_missing": true } },
diff --git a/salt/elasticsearch/files/ingest/syslog b/salt/elasticsearch/files/ingest/syslog
new file mode 100644
index 000000000..d34e79d4a
--- /dev/null
+++ b/salt/elasticsearch/files/ingest/syslog
@@ -0,0 +1,13 @@
+{
+ "description" : "syslog",
+ "processors" : [
+ {
+ "dissect": {
+ "field": "message",
+ "pattern" : "%{message}",
+ "on_failure": [ { "drop" : { } } ]
+ }
+ },
+ { "pipeline": { "name": "common" } }
+ ]
+}
diff --git a/salt/elasticsearch/files/ingest/zeek.common b/salt/elasticsearch/files/ingest/zeek.common
index c31625db6..b0ac0d12a 100644
--- a/salt/elasticsearch/files/ingest/zeek.common
+++ b/salt/elasticsearch/files/ingest/zeek.common
@@ -6,7 +6,8 @@
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
- { "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
+ { "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
+ {"community_id": {"if": "ctx.network?.transport != null", "field":["message2.id.orig_h","message2.id.orig_p","message2.id.resp_h","message2.id.resp_p","network.transport"],"target_field":"network.community_id"}},
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
diff --git a/salt/elasticsearch/files/ingest/zeek.http b/salt/elasticsearch/files/ingest/zeek.http
index a1354044c..3368e45e1 100644
--- a/salt/elasticsearch/files/ingest/zeek.http
+++ b/salt/elasticsearch/files/ingest/zeek.http
@@ -29,6 +29,7 @@
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
{ "script": { "lang": "painless", "source": "ctx.useragent_length = ctx.useragent.length()", "ignore_failure": true } },
{ "script": { "lang": "painless", "source": "ctx.virtual_host_length = ctx.virtual_host.length()", "ignore_failure": true } },
+ { "set": { "field": "network.transport", "value": "tcp" } },
{ "pipeline": { "name": "zeek.common" } }
]
}
diff --git a/salt/elasticsearch/files/ingest/zeek.notice b/salt/elasticsearch/files/ingest/zeek.notice
index 4e54f325d..b662393f6 100644
--- a/salt/elasticsearch/files/ingest/zeek.notice
+++ b/salt/elasticsearch/files/ingest/zeek.notice
@@ -6,7 +6,7 @@
{ "rename": { "field": "message2.fuid", "target_field": "log.id.fuid", "ignore_missing": true } },
{ "rename": { "field": "message2.mime", "target_field": "file.mimetype", "ignore_missing": true } },
{ "rename": { "field": "message2.desc", "target_field": "file.description", "ignore_missing": true } },
- { "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } },
+ { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
{ "rename": { "field": "message2.note", "target_field": "notice.note", "ignore_missing": true } },
{ "rename": { "field": "message2.msg", "target_field": "notice.message", "ignore_missing": true } },
{ "rename": { "field": "message2.sub", "target_field": "notice.sub_message", "ignore_missing": true } },
diff --git a/salt/elasticsearch/files/ingest/zeek.radius b/salt/elasticsearch/files/ingest/zeek.radius
index c74330690..715f41478 100644
--- a/salt/elasticsearch/files/ingest/zeek.radius
+++ b/salt/elasticsearch/files/ingest/zeek.radius
@@ -5,7 +5,7 @@
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.username", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message2.mac", "target_field": "host.mac", "ignore_missing": true } },
- { "rename": { "field": "message2.framed_addr", "target_field": "framed_addr", "ignore_missing": true } },
+ { "rename": { "field": "message2.framed_addr", "target_field": "radius.framed_address", "ignore_missing": true } },
{ "rename": { "field": "message2.remote_ip", "target_field": "destination.ip", "ignore_missing": true } },
{ "rename": { "field": "message2.connect_info", "target_field": "radius.connect_info", "ignore_missing": true } },
{ "rename": { "field": "message2.reply_msg", "target_field": "radius.reply_message", "ignore_missing": true } },
diff --git a/salt/elasticsearch/files/ingest/zeek.smtp b/salt/elasticsearch/files/ingest/zeek.smtp
index 473b4cce5..9bfb1e3e1 100644
--- a/salt/elasticsearch/files/ingest/zeek.smtp
+++ b/salt/elasticsearch/files/ingest/zeek.smtp
@@ -25,6 +25,7 @@
{ "rename": { "field": "message2.tls", "target_field": "smtp.tls", "ignore_missing": true } },
{ "rename": { "field": "message2.fuids", "target_field": "log.id.fuids", "ignore_missing": true } },
{ "rename": { "field": "message2.is_webmail", "target_field": "smtp.is_webmail", "ignore_missing": true } },
+ { "set": { "field": "network.transport", "value": "tcp" } },
{ "pipeline": { "name": "zeek.common" } }
]
}
diff --git a/salt/elasticsearch/files/so-elasticsearch-pipelines b/salt/elasticsearch/files/so-elasticsearch-pipelines
index b1b6db158..514054359 100755
--- a/salt/elasticsearch/files/so-elasticsearch-pipelines
+++ b/salt/elasticsearch/files/so-elasticsearch-pipelines
@@ -15,6 +15,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+RETURN_CODE=0
ELASTICSEARCH_HOST=$1
ELASTICSEARCH_PORT=9200
@@ -46,7 +47,9 @@ fi
cd ${ELASTICSEARCH_INGEST_PIPELINES}
echo "Loading pipelines..."
-for i in *; do echo $i; curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
+for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
echo
cd - >/dev/null
+
+exit $RETURN_CODE
\ No newline at end of file
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index 1c4bee013..be04effb0 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -75,6 +75,19 @@ filebeat.modules:
filebeat.inputs:
#------------------------------ Log prospector --------------------------------
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
+ - type: syslog
+ enabled: true
+ protocol.udp:
+ host: "0.0.0.0:514"
+ fields:
+ module: syslog
+ dataset: syslog
+ pipeline: "syslog"
+ index: "so-syslog-%{+yyyy.MM.dd}"
+ processors:
+ - drop_fields:
+ fields: ["source", "prospector", "input", "offset", "beat"]
+
{%- if BROVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
- type: log
diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls
index 409594b2d..897bb3937 100644
--- a/salt/filebeat/init.sls
+++ b/salt/filebeat/init.sls
@@ -57,12 +57,14 @@ so-filebeat:
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /nsm/zeek:/nsm/zeek:ro
- /nsm/strelka/log:/nsm/strelka/log:ro
- - /opt/so/log/suricata:/suricata:ro
+ - /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
+ - port_bindings:
+ - 0.0.0.0:514:514/udp
- watch:
- file: /opt/so/conf/filebeat/etc/filebeat.yml
diff --git a/salt/firewall/init.sls b/salt/firewall/init.sls
index b6c928eba..c2ddaf5c2 100644
--- a/salt/firewall/init.sls
+++ b/salt/firewall/init.sls
@@ -136,6 +136,18 @@ enable_wazuh_manager_1514_udp_{{ip}}:
- position: 1
- save: True
+# Allow syslog
+enable_syslog_514_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: tcp
+ - source: {{ ip }}
+ - dport: 514
+ - position: 1
+ - save: True
+
# Rules if you are a Master
{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
#This should be more granular
diff --git a/salt/hive/thehive/scripts/hive_init b/salt/hive/thehive/scripts/hive_init
deleted file mode 100755
index b1ef62d68..000000000
--- a/salt/hive/thehive/scripts/hive_init
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
-{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
-{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
-{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
-
-hive_init(){
- sleep 120
- HIVE_IP="{{MASTERIP}}"
- HIVE_USER="{{HIVEUSER}}"
- HIVE_PASSWORD="{{HIVEPASSWORD}}"
- HIVE_KEY="{{HIVEKEY}}"
- SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
-
- echo -n "Waiting for TheHive..."
- COUNT=0
- HIVE_CONNECTED="no"
- while [[ "$COUNT" -le 240 ]]; do
- curl --output /dev/null --silent --head --fail -k "https://$HIVE_IP/thehive"
- if [ $? -eq 0 ]; then
- HIVE_CONNECTED="yes"
- echo "connected!"
- break
- else
- ((COUNT+=1))
- sleep 1
- echo -n "."
- fi
- done
-
- if [ "$HIVE_CONNECTED" == "yes" ]; then
-
- # Migrate DB
- curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate"
-
- # Create intial TheHive user
- curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}"
-
- # Pre-load custom fields
- #
- # reputation
- curl -v -k "https://$HIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $HIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
-
-
- touch /opt/so/state/thehive.txt
- else
- echo "We experienced an issue connecting to TheHive!"
- fi
-}
-
-if [ -f /opt/so/state/thehive.txt ]; then
- exit 0
-else
- rm -f garbage_file
- while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
- do
- echo "Waiting for Elasticsearch..."
- rm -f garbage_file
- sleep 1
- done
- rm -f garbage_file
- sleep 5
- hive_init
-fi
diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls
index ba0e015f4..1118b6807 100644
--- a/salt/logstash/init.sls
+++ b/salt/logstash/init.sls
@@ -198,7 +198,7 @@ so-logstash:
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
{%- if grains['role'] == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro
- - /opt/so/log/suricata:/suricata:ro
+ - /nsm/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /opt/so/log/fleet/:/osquery/logs:ro
diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json
index 6b76e622c..e98ee7bf7 100644
--- a/salt/soc/files/soc/soc.json
+++ b/salt/soc/files/soc/soc.json
@@ -32,7 +32,7 @@
"dateRangeMinutes": 1440,
"mostRecentlyUsedLimit": 5,
"eventFields": {
- "default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ],
+ "default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id" ],
"bro_conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "service", "log.id.uid" ],
"bro_dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ],
"bro_dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ],
@@ -99,7 +99,7 @@
{ "name": "Connections", "description": "Connections grouped by destination country", "query": "event.module:zeek AND event.dataset:conn | groupby destination.geo.country_name"},
{ "name": "Connections", "description": "Connections grouped by source country", "query": "event.module:zeek AND event.dataset:conn | groupby source.geo.country_name"},
{ "name": "DCE_RPC", "description": "DCE_RPC grouped by operation", "query": "event.module:zeek AND event.dataset:dce_rpc | groupby dce_rpc.operation"},
- { "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain dhcp.requested_address"},
+ { "name": "DHCP", "description": "DHCP leases", "query": "event.module:zeek AND event.dataset:dhcp | groupby host.hostname host.domain"},
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.module:zeek AND event.dataset:dhcp | groupby dhcp.message_types"},
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.module:zeek AND event.dataset:dnp3 | groupby dnp3.fc_reply"},
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.module:zeek AND event.dataset:dns | groupby dns.query.name destination.port"},
@@ -122,8 +122,7 @@
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.module:zeek AND event.dataset:kerberos | groupby kerberos.service"},
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.module:zeek AND event.dataset:modbus | groupby modbus.function"},
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.module:zeek AND event.dataset:mysql | groupby mysql.command"},
- { "name": "NOTICE", "description": "Zeek notice logs grouped by note", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note"},
- { "name": "NOTICE", "description": "Zeek notice logs grouped by message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.message"},
+ { "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.module:zeek AND event.dataset:notice | groupby notice.note notice.message"},
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.module:zeek AND event.dataset:ntlm | groupby ntlm.server.dns.name"},
{ "name": "PE", "description": "PE files list", "query": "event.module:zeek AND event.dataset:pe | groupby file.machine file.os file.subsystem"},
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.module:zeek AND event.dataset:radius | groupby user.name.keyword"},
diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf
index f2415d010..e5878cb70 100644
--- a/salt/soctopus/files/SOCtopus.conf
+++ b/salt/soctopus/files/SOCtopus.conf
@@ -1,9 +1,9 @@
-{%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- set MASTER = salt['pillar.get']('master:url_base', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
[es]
-es_url = http://{{ip}}:9200
+es_url = http://{{MASTER}}:9200
es_user = YOURESUSER
es_pass = YOURESPASS
es_index_pattern = so-*
@@ -11,7 +11,7 @@ es_verifycert = no
[cortex]
auto_analyze_alerts = no
-cortex_url = https://{{ip}}/cortex/
+cortex_url = https://{{MASTER}}/cortex/
cortex_key = {{ CORTEXKEY }}
supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
@@ -32,7 +32,7 @@ grr_user = YOURGRRUSER
grr_pass = YOURGRRPASS
[hive]
-hive_url = https://{{ip}}/thehive/
+hive_url = https://{{MASTER}}/thehive/
hive_key = {{ HIVEKEY }}
hive_tlp = 3
hive_verifycert = no
@@ -59,7 +59,7 @@ slack_url = YOURSLACKWORKSPACE
slack_webhook = YOURSLACKWEBHOOK
[playbook]
-playbook_url = https://{{ip}}/playbook
+playbook_url = https://{{MASTER}}/playbook
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
playbook_verifycert = no
diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls
index 330e727f0..ff30c3c1a 100644
--- a/salt/soctopus/init.sls
+++ b/salt/soctopus/init.sls
@@ -1,5 +1,7 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
+{%- set MASTER_URL = salt['pillar.get']('master:url_base', '') %}
+{%- set MASTER_IP = salt['pillar.get']('static:masterip', '') %}
soctopusdir:
file.directory:
@@ -69,3 +71,5 @@ so-soctopus:
- /opt/so/conf/navigator/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
- port_bindings:
- 0.0.0.0:7000:7000
+ - extra_hosts:
+ - {{MASTER_URL}}:{{MASTER_IP}}
diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls
index 8bdbd8274..5767531f4 100644
--- a/salt/strelka/init.sls
+++ b/salt/strelka/init.sls
@@ -112,5 +112,5 @@ strelka_filestream:
strelka_zeek_extracted_sync:
cron.present:
- user: root
- - name: mv /nsm/zeek/extracted/complete/* /nsm/strelka
+ - name: '[ -d /nsm/zeek/extracted/complete/ ] && mv /nsm/zeek/extracted/complete/* /nsm/strelka/ > /dev/null 2>&1'
- minute: '*'
diff --git a/salt/suricata/files/suricata.yaml b/salt/suricata/files/suricata.yaml
index 5a0121b63..c87c75447 100644
--- a/salt/suricata/files/suricata.yaml
+++ b/salt/suricata/files/suricata.yaml
@@ -1,28 +1,28 @@
%YAML 1.1
---
-{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
-{%- if grains['role'] == 'so-eval' %}
-{%- set MTU = 1500 %}
-{%- elif grains['role'] == 'so-helix' %}
-{%- set MTU = 9000 %}
-{%- else %}
-{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
-{%- endif %}
-{%- if salt['pillar.get']('sensor:homenet') %}
- {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
-{%- else %}
- {%- set homenet = salt['pillar.get']('static:hnmaster', '') %}
-{%- endif %}
+ {%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
+ {%- if grains['role'] == 'so-eval' %}
+ {%- set MTU = 1500 %}
+ {%- elif grains['role'] == 'so-helix' %}
+ {%- set MTU = 9000 %}
+ {%- else %}
+ {%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
+ {%- endif %}
+ {%- if salt['pillar.get']('sensor:homenet') %}
+ {%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
+ {%- else %}
+ {%- set homenet = salt['pillar.get']('static:hnmaster', '') %}
+ {%- endif %}
# Suricata configuration file. In addition to the comments describing all
# options in this file, full documentation can be found at:
-# https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Suricatayaml
+# https://suricata.readthedocs.io/en/latest/configuration/suricata-yaml.html
##
## Step 1: inform Suricata about your network
##
vars:
- # more specifc is better for alert accuracy and performance
+ # more specific is better for alert accuracy and performance
address-groups:
HOME_NET: "[{{ homenet }}]"
#HOME_NET: "[192.168.0.0/16]"
@@ -39,6 +39,7 @@ vars:
DNS_SERVERS: "$HOME_NET"
TELNET_SERVERS: "$HOME_NET"
AIM_SERVERS: "$EXTERNAL_NET"
+ DC_SERVERS: "$HOME_NET"
DNP3_SERVER: "$HOME_NET"
DNP3_CLIENT: "$HOME_NET"
MODBUS_CLIENT: "$HOME_NET"
@@ -55,23 +56,11 @@ vars:
MODBUS_PORTS: 502
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
FTP_PORTS: 21
-
+ VXLAN_PORTS: 4789
+ TEREDO_PORTS: 3544
##
-## Step 2: select the rules to enable or disable
-##
-
-default-rule-path: /etc/suricata/rules
-rule-files:
- - all.rules
-
-classification-file: /etc/suricata/classification.config
-reference-config-file: /etc/suricata/reference.config
-# threshold-file: /usr/local/etc/suricata/threshold.config
-
-
-##
-## Step 3: select outputs to enable
+## Step 2: select outputs to enable
##
# The default logging directory. Any log or output file will be
@@ -85,6 +74,13 @@ stats:
# The interval field (in seconds) controls at what interval
# the loggers are invoked.
interval: 30
+ # Add decode events as stats.
+ #decoder-events: true
+ # Decoder event prefix in stats. Has been 'decoder' before, but that leads
+ # to missing events in the eve.stats records. See issue #2225.
+ #decoder-events-prefix: "decoder.event"
+ # Add stream events as stats.
+ #stream-events: false
# Configure the type of alert (and other) logging you would like.
outputs:
@@ -99,10 +95,9 @@ outputs:
- eve-log:
enabled: yes
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
- filename: eve.json
+ filename: /nsm/eve.json
rotate-interval: day
- community-id: true
- community-id-seed: 0
+
#prefix: "@cee: " # prefix to prepend to each log entry
# the following are valid when type: syslog above
#identity: "suricata"
@@ -124,63 +119,141 @@ outputs:
# pipelining:
# enabled: yes ## set enable to yes to enable query pipelining
# batch-size: 10 ## number of entry to keep in buffer
+
+ # Include top level metadata. Default yes.
+ #metadata: no
+
+ # include the name of the input pcap file in pcap file processing mode
+ pcap-file: false
+
+ # Community Flow ID
+ # Adds a 'community_id' field to EVE records. These are meant to give
+ # a records a predictable flow id that can be used to match records to
+ # output of other tools such as Bro.
+ #
+ # Takes a 'seed' that needs to be same across sensors and tools
+ # to make the id less predictable.
+
+ # enable/disable the community id feature.
+ community-id: true
+ # Seed value for the ID output. Valid values are 0-65535.
+ community-id-seed: 0
+
+ # HTTP X-Forwarded-For support by adding an extra field or overwriting
+ # the source or destination IP address (depending on flow direction)
+ # with the one reported in the X-Forwarded-For HTTP header. This is
+ # helpful when reviewing alerts for traffic that is being reverse
+ # or forward proxied.
+ xff:
+ enabled: no
+ # Two operation modes are available, "extra-data" and "overwrite".
+ mode: extra-data
+ # Two proxy deployments are supported, "reverse" and "forward". In
+ # a "reverse" deployment the IP address used is the last one, in a
+ # "forward" deployment the first IP address is used.
+ deployment: reverse
+ # Header name where the actual IP address will be reported, if more
+ # than one IP address is present, the last IP address will be the
+ # one taken into consideration.
+ header: X-Forwarded-For
+
types:
- alert:
- # payload: yes # enable dumping payload in Base64
- # payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
- # payload-printable: yes # enable dumping payload in printable (lossy) format
- # packet: yes # enable dumping of packet (without stream segments)
- # http-body: yes # enable dumping of http body in Base64
- # http-body-printable: yes # enable dumping of http body in printable format
- metadata:
- app-layer: false
- flow: false
- rule:
- metadata: true
- raw: true
+ payload: no # enable dumping payload in Base64
+ payload-buffer-size: 4kb # max size of payload buffer to output in eve-log
+ payload-printable: yes # enable dumping payload in printable (lossy) format
+ packet: yes # enable dumping of packet (without stream segments)
+ metadata:
+ app-layer: false
+ flow: false
+ rule:
+ metadata: true
+ raw: true
+
+ # http-body: yes # Requires metadata; enable dumping of http body in Base64
+ # http-body-printable: yes # Requires metadata; enable dumping of http body in printable format
# Enable the logging of tagged packets for rules using the
# "tag" keyword.
tagged-packets: no
-
- # HTTP X-Forwarded-For support by adding an extra field or overwriting
- # the source or destination IP address (depending on flow direction)
- # with the one reported in the X-Forwarded-For HTTP header. This is
- # helpful when reviewing alerts for traffic that is being reverse
- # or forward proxied.
- xff:
- enabled: no
- # Two operation modes are available, "extra-data" and "overwrite".
- mode: extra-data
- # Two proxy deployments are supported, "reverse" and "forward". In
- # a "reverse" deployment the IP address used is the last one, in a
- # "forward" deployment the first IP address is used.
- deployment: reverse
- # Header name where the actual IP address will be reported, if more
- # than one IP address is present, the last IP address will be the
- # one taken into consideration.
- header: X-Forwarded-For
+ #- anomaly:
+ # Anomaly log records describe unexpected conditions such
+ # as truncated packets, packets with invalid IP/UDP/TCP
+ # length values, and other events that render the packet
+ # invalid for further processing or describe unexpected
+ # behavior on an established stream. Networks which
+ # experience high occurrences of anomalies may experience
+ # packet processing degradation.
+ #
+ # Anomalies are reported for the following:
+ # 1. Decode: Values and conditions that are detected while
+ # decoding individual packets. This includes invalid or
+ # unexpected values for low-level protocol lengths as well
+ # as stream related events (TCP 3-way handshake issues,
+ # unexpected sequence number, etc).
+ # 2. Stream: This includes stream related events (TCP
+ # 3-way handshake issues, unexpected sequence number,
+ # etc).
+ # 3. Application layer: These denote application layer
+ # specific conditions that are unexpected, invalid or are
+ # unexpected given the application monitoring state.
+ #
+ # By default, anomaly logging is disabled. When anomaly
+ # logging is enabled, applayer anomaly reporting is
+ # enabled.
+ # enabled: no
+ #
+ # Choose one or more types of anomaly logging and whether to enable
+ # logging of the packet header for packet anomalies.
+ # types:
+ # decode: no
+ # stream: no
+ # applayer: yes
+ #packethdr: no
#- http:
- # extended: no # enable this for extended logging information
+ # extended: yes # enable this for extended logging information
# custom allows additional http fields to be included in eve-log
# the example below adds three additional fields when uncommented
#custom: [Accept-Encoding, Accept-Language, Authorization]
+ # set this value to one and only one among {both, request, response}
+ # to dump all http headers for every http request and/or response
+ # dump-all-headers: none
#- dns:
- # control logging of queries and answers
- # default yes, no to disable
- # query: no # enable logging of DNS queries
- # answer: no # enable logging of DNS answers
- # control which RR types are logged
- # all enabled if custom not specified
- #custom: [a, aaaa, cname, mx, ns, ptr, txt]
+ # This configuration uses the new DNS logging format,
+ # the old configuration is still available:
+ # https://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html#dns-v1-format
+
+ # As of Suricata 5.0, version 2 of the eve dns output
+ # format is the default.
+ #version: 2
+
+ # Enable/disable this logger. Default: enabled.
+ #enabled: yes
+
+ # Control logging of requests and responses:
+ # - requests: enable logging of DNS queries
+ # - responses: enable logging of DNS answers
+ # By default both requests and responses are logged.
+ #requests: no
+ #responses: no
+
+ # Format of answer logging:
+ # - detailed: array item per answer
+ # - grouped: answers aggregated by type
+ # Default: all
+ #formats: [detailed, grouped]
+
+ # Types to log, based on the query type.
+ # Default: all.
+ #types: [a, aaaa, cname, mx, ns, ptr, txt]
#- tls:
- # extended: no # enable this for extended logging information
+ # extended: yes # enable this for extended logging information
# output TLS transaction where the session is resumed using a
# session id
#session-resumption: no
# custom allows to control which tls fields that are included
# in eve-log
- #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain]
+ #custom: [subject, issuer, session_resumed, serial, fingerprint, sni, version, not_before, not_after, certificate, chain, ja3, ja3s]
#- files:
# force-magic: no # force logging magic on all logged files
# force logging of checksums, available hash functions are md5,
@@ -204,60 +277,42 @@ outputs:
#md5: [body, subject]
#- dnp3
+ #- ftp
+ #- rdp
#- nfs
- #- ssh:
+ #- smb
+ #- tftp
+ #- ikev2
+ #- krb5
+ #- snmp
+ #- sip
+ #- dhcp:
+ # enabled: yes
+ # When extended mode is on, all DHCP messages are logged
+ # with full detail. When extended mode is off (the
+ # default), just enough information to map a MAC address
+ # to an IP address is logged.
+ # extended: no
+ #- ssh
#- stats:
# totals: yes # stats for all threads merged together
# threads: no # per thread stats
# deltas: no # include delta values
# bi-directional flows
- #- flow:
+ #- flow
# uni-directional flows
#- netflow
- # Vars log flowbits and other packet and flow vars
- #- vars
- # alert output for use with Barnyard2
+ # Metadata event type. Triggered whenever a pktvar is saved
+ # and will include the pktvars, flowvars, flowbits and
+ # flowints.
+ #- metadata
+
+ # deprecated - unified2 alert format for use with Barnyard2
- unified2-alert:
enabled: no
- filename: unified2.alert
-
- # File size limit. Can be specified in kb, mb, gb. Just a number
- # is parsed as bytes.
- #limit: 32mb
-
- # By default unified2 log files have the file creation time (in
- # unix epoch format) appended to the filename. Set this to yes to
- # disable this behaviour.
- #nostamp: no
-
- # Sensor ID field of unified2 alerts.
- #sensor-id: 0
-
- # Include payload of packets related to alerts. Defaults to true, set to
- # false if payload is not required.
- #payload: yes
-
- # HTTP X-Forwarded-For support by adding the unified2 extra header or
- # overwriting the source or destination IP address (depending on flow
- # direction) with the one reported in the X-Forwarded-For HTTP header.
- # This is helpful when reviewing alerts for traffic that is being reverse
- # or forward proxied.
- xff:
- enabled: no
- # Two operation modes are available, "extra-data" and "overwrite". Note
- # that in the "overwrite" mode, if the reported IP address in the HTTP
- # X-Forwarded-For header is of a different version of the packet
- # received, it will fall-back to "extra-data" mode.
- mode: extra-data
- # Two proxy deployments are supported, "reverse" and "forward". In
- # a "reverse" deployment the IP address used is the last one, in a
- # "forward" deployment the first IP address is used.
- deployment: reverse
- # Header name where the actual IP address will be reported, if more
- # than one IP address is present, the last IP address will be the
- # one taken into consideration.
- header: X-Forwarded-For
+ # for further options see:
+ # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#alert-output-for-use-with-barnyard2-unified2-alert
# a line based log of HTTP requests (no alerts)
- http-log:
@@ -266,7 +321,7 @@ outputs:
append: yes
#extended: yes # enable this for extended logging information
#custom: yes # enabled the custom logging format (defined by customformat)
-
+ #customformat: ""
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
# a line based log of TLS handshake parameters (no alerts)
@@ -276,6 +331,7 @@ outputs:
append: yes
#extended: yes # Log extended information like fingerprint
#custom: yes # enabled the custom logging format (defined by customformat)
+ #customformat: ""
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
# output TLS transaction where the session is resumed using a
# session id
@@ -286,13 +342,6 @@ outputs:
enabled: no
#certs-log-dir: certs # directory to store the certificates files
- # a line based log of DNS requests and/or replies (no alerts)
- - dns-log:
- enabled: no
- filename: dns.log
- append: yes
- #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
-
# Packet log... log packets in pcap format. 3 modes of operation: "normal"
# "multi" and "sguil".
#
@@ -334,6 +383,17 @@ outputs:
# If set to a value will enable ring buffer mode. Will keep Maximum of "max-files" of size "limit"
max-files: 2000
+ # Compression algorithm for pcap files. Possible values: none, lz4.
+ # Enabling compression is incompatible with the sguil mode. Note also
+ # that on Windows, enabling compression will *increase* disk I/O.
+ compression: none
+
+ # Further options for lz4 compression. The compression level can be set
+ # to a value between 0 and 16, where higher values result in higher
+ # compression.
+ #lz4-checksum: no
+ #lz4-level: 0
+
mode: normal # normal, multi or sguil.
# Directory to place pcap files. If not provided the default log
@@ -352,7 +412,7 @@ outputs:
append: yes
#filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
- # alert output to prelude (http://www.prelude-technologies.com/) only
+ # alert output to prelude (https://www.prelude-siem.org/) only
# available if Suricata has been compiled with --enable-prelude
- alert-prelude:
enabled: no
@@ -360,14 +420,14 @@ outputs:
log-packet-content: no
log-packet-header: yes
- # Stats.log contains data from various counters of the suricata engine.
+ # Stats.log contains data from various counters of the Suricata engine.
- stats:
enabled: yes
filename: stats.log
append: yes # append to file (yes) or overwrite it (no)
totals: yes # stats for all threads merged together
threads: no # per thread stats
- #null-values: yes # print counters that have value 0
+ null-values: yes # print counters that have value 0
# a line based alerts log similar to fast.log into syslog
- syslog:
@@ -379,60 +439,89 @@ outputs:
#level: Info ## possible levels: Emergency, Alert, Critical,
## Error, Warning, Notice, Info, Debug
- # a line based information for dropped packets in IPS mode
+ # deprecated a line based information for dropped packets in IPS mode
- drop:
enabled: no
- filename: drop.log
- append: yes
- #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+ # further options documented at:
+ # https://suricata.readthedocs.io/en/suricata-5.0.0/configuration/suricata-yaml.html#drop-log-a-line-based-information-for-dropped-packets
- # output module to store extracted files to disk
+ # Output module for storing files on disk. Files are stored in a
+ # directory names consisting of the first 2 characters of the
+ # SHA256 of the file. Each file is given its SHA256 as a filename.
#
- # The files are stored to the log-dir in a format "file." where is
- # an incrementing number starting at 1. For each file "file." a meta
- # file "file..meta" is created.
+ # When a duplicate file is found, the existing file is touched to
+ # have its timestamps updated.
#
- # File extraction depends on a lot of things to be fully done:
- # - file-store stream-depth. For optimal results, set this to 0 (unlimited)
- # - http request / response body sizes. Again set to 0 for optimal results.
- # - rules that contain the "filestore" keyword.
+ # Unlike the older filestore, metadata is not written out by default
+ # as each file should already have a "fileinfo" record in the
+ # eve.log. If write-fileinfo is set to yes, the each file will have
+ # one more associated .json files that consists of the fileinfo
+ # record. A fileinfo file will be written for each occurrence of the
+ # file seen using a filename suffix to ensure uniqueness.
+ #
+ # To prune the filestore directory see the "suricatactl filestore
+ # prune" command which can delete files over a certain age.
- file-store:
- enabled: no # set to yes to enable
- log-dir: files # directory to store the files
- force-magic: no # force logging magic on all stored files
- # force logging of checksums, available hash functions are md5,
- # sha1 and sha256
- #force-hash: [md5]
- force-filestore: no # force storing of all files
- # override global stream-depth for sessions in which we want to
- # perform file extraction. Set to 0 for unlimited.
+ version: 2
+ enabled: no
+
+ # Set the directory for the filestore. If the path is not
+ # absolute will be be relative to the default-log-dir.
+ #dir: filestore
+
+ # Write out a fileinfo record for each occurrence of a
+ # file. Disabled by default as each occurrence is already logged
+ # as a fileinfo record to the main eve-log.
+ #write-fileinfo: yes
+
+ # Force storing of all files. Default: no.
+ #force-filestore: yes
+
+ # Override the global stream-depth for sessions in which we want
+ # to perform file extraction. Set to 0 for unlimited.
#stream-depth: 0
- #waldo: file.waldo # waldo file to store the file_id across runs
- # uncomment to disable meta file writing
- #write-meta: no
- # uncomment the following variable to define how many files can
+
+ # Uncomment the following variable to define how many files can
# remain open for filestore by Suricata. Default value is 0 which
# means files get closed after each write
#max-open-files: 1000
- # output module to log files tracked in a easily parsable json format
- - file-log:
- enabled: no
- filename: files-json.log
- append: yes
- #filetype: regular # 'regular', 'unix_stream' or 'unix_dgram'
+ # Force logging of checksums, available hash functions are md5,
+ # sha1 and sha256. Note that SHA256 is automatically forced by
+ # the use of this output module as it uses the SHA256 as the
+ # file naming scheme.
+ #force-hash: [sha1, md5]
+ # NOTE: X-Forwarded configuration is ignored if write-fileinfo is disabled
+ # HTTP X-Forwarded-For support by adding an extra field or overwriting
+ # the source or destination IP address (depending on flow direction)
+ # with the one reported in the X-Forwarded-For HTTP header. This is
+ # helpful when reviewing alerts for traffic that is being reverse
+ # or forward proxied.
+ xff:
+ enabled: no
+ # Two operation modes are available, "extra-data" and "overwrite".
+ mode: extra-data
+ # Two proxy deployments are supported, "reverse" and "forward". In
+ # a "reverse" deployment the IP address used is the last one, in a
+ # "forward" deployment the first IP address is used.
+ deployment: reverse
+ # Header name where the actual IP address will be reported, if more
+ # than one IP address is present, the last IP address will be the
+ # one taken into consideration.
+ header: X-Forwarded-For
- force-magic: no # force logging magic on all logged files
- # force logging of checksums, available hash functions are md5,
- # sha1 and sha256
- #force-hash: [md5]
+ # deprecated - file-store v1
+ - file-store:
+ enabled: no
+ # further options documented at:
+ # https://suricata.readthedocs.io/en/suricata-5.0.0/file-extraction/file-extraction.html#file-store-version-1
# Log TCP data after stream normalization
# 2 types: file or dir. File logs into a single logfile. Dir creates
# 2 files per TCP session and stores the raw TCP data into them.
# Using 'both' will enable both file and dir modes.
#
- # Note: limited by stream.depth
+ # Note: limited by stream.reassembly.depth
- tcp-data:
enabled: no
type: file
@@ -452,7 +541,7 @@ outputs:
# Lua Output Support - execute lua script to generate alert and event
# output.
# Documented at:
- # https://redmine.openinfosecfoundation.org/projects/suricata/wiki/Lua_Output
+ # https://suricata.readthedocs.io/en/latest/output/lua-output.html
- lua:
enabled: no
#scripts-dir: /etc/suricata/lua-output/
@@ -466,20 +555,20 @@ logging:
# Note that debug level logging will only be emitted if Suricata was
# compiled with the --enable-debug configure option.
#
- # This value is overriden by the SC_LOG_LEVEL env var.
+ # This value is overridden by the SC_LOG_LEVEL env var.
default-log-level: notice
# The default output format. Optional parameter, should default to
- # something reasonable if not provided. Can be overriden in an
+ # something reasonable if not provided. Can be overridden in an
# output section. You can leave this out to get the default.
#
- # This value is overriden by the SC_LOG_FORMAT env var.
+ # This value is overridden by the SC_LOG_FORMAT env var.
#default-log-format: "[%i] %t - (%f:%l) <%d> (%n) -- "
# A regex to filter output. Can be overridden in an output section.
# Defaults to empty (no filter).
#
- # This value is overriden by the SC_LOG_OP_FILTER env var.
+ # This value is overridden by the SC_LOG_OP_FILTER env var.
default-output-filter:
# Define your logging outputs. If none are defined, or they are all
@@ -491,11 +580,23 @@ logging:
- file:
enabled: yes
level: info
- filename: /var/log/suricata/suricata.log
+ filename: suricata.log
# type: json
- syslog:
enabled: no
+ facility: local5
+ format: "[%i] <%d> -- "
+ # type: json
+
+##
+## Step 4: configure common capture settings
+##
+## See "Advanced Capture Options" below for more options, including NETMAP
+## and PF_RING.
+##
+
+# Linux high speed capture support
af-packet:
- interface: {{ interface }}
# Number of receive threads. "auto" uses the number of cores
@@ -505,28 +606,21 @@ af-packet:
# Default AF_PACKET cluster type. AF_PACKET can load balance per flow or per hash.
# This is only supported for Linux kernel > 3.1
# possible value are:
- # * cluster_round_robin: round robin load balancing
# * cluster_flow: all packets of a given flow are send to the same socket
# * cluster_cpu: all packets treated in kernel by a CPU are send to the same socket
# * cluster_qm: all packets linked by network card to a RSS queue are sent to the same
# socket. Requires at least Linux 3.14.
- # * cluster_random: packets are sent randomly to sockets but with an equipartition.
- # Requires at least Linux 3.14.
- # * cluster_rollover: kernel rotates between sockets filling each socket before moving
- # to the next. Requires at least Linux 3.10.
+ # * cluster_ebpf: eBPF file load balancing. See doc/userguide/capture-hardware/ebpf-xdp.rst for
+ # more info.
# Recommended modes are cluster_flow on most boxes and cluster_cpu or cluster_qm on system
# with capture card using RSS (require cpu affinity tuning and system irq tuning)
cluster-type: cluster_flow
# In some fragmentation case, the hash can not be computed. If "defrag" is set
# to yes, the kernel will do the needed defragmentation before sending the packets.
defrag: yes
- # After Linux kernel 3.10 it is possible to activate the rollover option: if a socket is
- # full then kernel will send the packet on the next socket with room available. This option
- # can minimize packet drop and increase the treated bandwidth on single intensive flow.
- #rollover: yes
# To use the ring feature of AF_PACKET, set 'use-mmap' to yes
- #use-mmap: yes
- # Lock memory map to avoid it goes to swap. Be careful that over suscribing could lock
+ use-mmap: yes
+ # Lock memory map to avoid it goes to swap. Be careful that over subscribing could lock
# your system
#mmap-locked: yes
# Use tpacket_v3 capture mode, only active if use-mmap is true
@@ -572,13 +666,14 @@ af-packet:
# will not be copied.
#copy-mode: ips
#copy-iface: eth1
+ # For eBPF and XDP setup including bypass, filter and load balancing, please
+ # see doc/userguide/capture-hardware/ebpf-xdp.rst for more info.
# Put default values here. These will be used for an interface that is not
# in the list above.
- interface: default
#threads: auto
#use-mmap: no
- #rollover: yes
#tpacket-v3: yes
# Cross platform libpcap capture support
@@ -595,7 +690,7 @@ pcap:
# Possible values are:
# - yes: checksum validation is forced
# - no: checksum validation is disabled
- # - auto: suricata uses a statistical approach to detect when
+ # - auto: Suricata uses a statistical approach to detect when
# checksum off-loading is used. (default)
# Warning: 'checksum-validation' must be set to yes to have any validation
#checksum-checks: auto
@@ -618,7 +713,7 @@ pcap-file:
# Possible values are:
# - yes: checksum validation is forced
# - no: checksum validation is disabled
- # - auto: suricata uses a statistical approach to detect when
+ # - auto: Suricata uses a statistical approach to detect when
# checksum off-loading is used. (default)
# Warning: 'checksum-validation' must be set to yes to have checksum tested
checksum-checks: auto
@@ -639,42 +734,66 @@ pcap-file:
# "detection-only" enables protocol detection only (parser disabled).
app-layer:
protocols:
+ krb5:
+ enabled: yes
+ snmp:
+ enabled: yes
+ ikev2:
+ enabled: yes
tls:
- enabled: detection-only
+ enabled: yes
detection-ports:
dp: 443
- # Completely stop processing TLS/SSL session after the handshake
- # completed. If bypass is enabled this will also trigger flow
- # bypass. If disabled (the default), TLS/SSL session is still
- # tracked for Heartbleed and other anomalies.
- #no-reassemble: yes
+ # Generate JA3 fingerprint from client hello. If not specified it
+ # will be disabled by default, but enabled if rules require it.
+ #ja3-fingerprints: auto
+
+ # What to do when the encrypted communications start:
+ # - default: keep tracking TLS session, check for protocol anomalies,
+ # inspect tls_* keywords. Disables inspection of unmodified
+ # 'content' signatures.
+ # - bypass: stop processing this flow as much as possible. No further
+ # TLS parsing and inspection. Offload flow bypass to kernel
+ # or hardware if possible.
+ # - full: keep tracking and inspection as normal. Unmodified content
+ # keyword signatures are inspected as well.
+ #
+ # For best performance, select 'bypass'.
+ #
+ #encryption-handling: default
+
dcerpc:
- enabled: detection-only
+ enabled: yes
ftp:
- enabled: detection-only
+ enabled: yes
+ # memcap: 64mb
+ # RDP, disabled by default.
+ rdp:
+ #enabled: no
ssh:
- enabled: detection-only
+ enabled: yes
smtp:
- enabled: detection-only
+ enabled: yes
+ raw-extraction: no
# Configure SMTP-MIME Decoder
mime:
# Decode MIME messages from SMTP transactions
# (may be resource intensive)
# This field supercedes all others because it turns the entire
# process on or off
- decode-mime: detection-only
+ decode-mime: yes
# Decode MIME entity bodies (ie. base64, quoted-printable, etc.)
- decode-base64: detection-only
- decode-quoted-printable: detection-only
+ decode-base64: yes
+ decode-quoted-printable: yes
# Maximum bytes per header data value stored in the data structure
# (default is 2000)
header-value-depth: 2000
# Extract URLs and save in state data structure
- extract-urls: detection-only
+ extract-urls: yes
# Set to yes to compute the md5 of the mail body. You will then
# be able to journalize it.
body-md5: no
@@ -685,19 +804,18 @@ app-layer:
content-inspect-window: 4096
imap:
enabled: detection-only
- msn:
- enabled: detection-only
smb:
- enabled: detection-only
+ enabled: yes
detection-ports:
dp: 139, 445
- # smb2 detection is disabled internally inside the engine.
- #smb2:
- # enabled: yes
- # Note: NFS parser depends on Rust support: pass --enable-rust
- # to configure.
+
+ # Stream reassembly size for SMB streams. By default track it completely.
+ #stream-depth: 0
+
nfs:
- enabled: no
+ enabled: yes
+ tftp:
+ enabled: yes
dns:
# memcaps. Globally and per flow/state.
#global-memcap: 16mb
@@ -708,16 +826,17 @@ app-layer:
#request-flood: 500
tcp:
- enabled: detection-only
+ enabled: yes
detection-ports:
dp: 53
udp:
- enabled: detection-only
+ enabled: yes
detection-ports:
dp: 53
http:
- enabled: detection-only
- # memcap: 64mb
+ enabled: yes
+ # memcap: Maximum memory capacity for http
+ # Default is unlimited, value can be such as 64mb
# default-config: Used when no server-config matches
# personality: List of personalities used by default
@@ -725,37 +844,15 @@ app-layer:
# by http_client_body & pcre /P option.
# response-body-limit: Limit reassembly of response body for inspection
# by file_data, http_server_body & pcre /Q option.
- # double-decode-path: Double decode path section of the URI
- # double-decode-query: Double decode query section of the URI
- # response-body-decompress-layer-limit:
- # Limit to how many layers of compression will be
- # decompressed. Defaults to 2.
#
+ # For advanced options, see the user guide
+
+
# server-config: List of server configurations to use if address matches
- # address: List of ip addresses or networks for this block
+ # address: List of IP addresses or networks for this block
# personalitiy: List of personalities used by this block
- # request-body-limit: Limit reassembly of request body for inspection
- # by http_client_body & pcre /P option.
- # response-body-limit: Limit reassembly of response body for inspection
- # by file_data, http_server_body & pcre /Q option.
- # double-decode-path: Double decode path section of the URI
- # double-decode-query: Double decode query section of the URI
#
- # uri-include-all: Include all parts of the URI. By default the
- # 'scheme', username/password, hostname and port
- # are excluded. Setting this option to true adds
- # all of them to the normalized uri as inspected
- # by http_uri, urilen, pcre with /U and the other
- # keywords that inspect the normalized uri.
- # Note that this does not affect http_raw_uri.
- # Also, note that including all was the default in
- # 1.4 and 2.0beta1.
- #
- # meta-field-limit: Hard size limit for request and response size
- # limits. Applies to request line and headers,
- # response line and headers. Does not apply to
- # request or response bodies. Default is 18k.
- # If this limit is reached an event is raised.
+ # Then, all the fields from default-config can be overloaded
#
# Currently Available Personalities:
# Minimal, Generic, IDS (default), IIS_4_0, IIS_5_0, IIS_5_1, IIS_6_0,
@@ -781,6 +878,20 @@ app-layer:
# auto will use http-body-inline mode in IPS mode, yes or no set it statically
http-body-inline: auto
+ # Decompress SWF files.
+ # 2 types: 'deflate', 'lzma', 'both' will decompress deflate and lzma
+ # compress-depth:
+ # Specifies the maximum amount of data to decompress,
+ # set 0 for unlimited.
+ # decompress-depth:
+ # Specifies the maximum amount of decompressed data to obtain,
+ # set 0 for unlimited.
+ swf-decompression:
+ enabled: yes
+ type: both
+ compress-depth: 0
+ decompress-depth: 0
+
# Take a random value for inspection sizes around the specified value.
# This lower the risk of some evasion technics but could lead
# detection change between runs. It is set to 'yes' by default.
@@ -795,6 +906,15 @@ app-layer:
double-decode-path: no
double-decode-query: no
+ # Can disable LZMA decompression
+ #lzma-enabled: yes
+ # Memory limit usage for LZMA decompression dictionary
+ # Data is decompressed until dictionary reaches this size
+ #lzma-memlimit: 1mb
+ # Maximum decompressed size with a compression ratio
+ # above 2048 (only LZMA can reach this ratio, deflate cannot)
+ #compression-bomb-limit: 1mb
+
server-config:
#- apache:
@@ -854,10 +974,15 @@ app-layer:
dp: 44818
sp: 44818
- # Note: parser depends on experimental Rust support
- # with --enable-rust-experimental passed to configure
ntp:
- enabled: no
+ enabled: yes
+
+ dhcp:
+ enabled: yes
+
+ # SIP, disabled by default.
+ sip:
+ #enabled: no
# Limit for the maximum number of asn1 frames to decode (default 256)
asn1-max-frames: 256
@@ -885,13 +1010,18 @@ run-as:
# Default location of the pid file. The pid file is only used in
# daemon mode (start Suricata with -D). If not running in daemon mode
# the --pidfile command line option must be used to create a pid file.
-#pid-file: /usr/local/var/run/suricata.pid
+#pid-file: /var/run/suricata.pid
# Daemon working directory
# Suricata will change directory to this one if provided
# Default: "/"
#daemon-directory: "/"
+# Umask.
+# Suricata will use this umask if it is provided. By default it will use the
+# umask passed on by the shell.
+#umask: 022
+
# Suricata core dump configuration. Limits the size of the core dump file to
# approximately max-dump. The actual core dump size will be a multiple of the
# page size. Core dumps that would be larger than max-dump are truncated. On
@@ -904,7 +1034,7 @@ run-as:
coredump:
max-dump: unlimited
-# If suricata box is a router for the sniffed networks, set it to 'router'. If
+# If Suricata box is a router for the sniffed networks, set it to 'router'. If
# it is a pure sniffing setup, set it to 'sniffer-only'.
# If set to auto, the variable is internally switch to 'router' in IPS mode
# and 'sniffer-only' in IDS mode.
@@ -914,36 +1044,29 @@ host-mode: auto
# Number of packets preallocated per thread. The default is 1024. A higher number
# will make sure each CPU will be more easily kept busy, but may negatively
# impact caching.
-#
-# If you are using the CUDA pattern matcher (mpm-algo: ac-cuda), different rules
-# apply. In that case try something like 60000 or more. This is because the CUDA
-# pattern matcher buffers and scans as many packets as possible in parallel.
-#max-pending-packets: 1024
+max-pending-packets: 5000
# Runmode the engine should use. Please check --list-runmodes to get the available
-# runmodes for each packet acquisition method. Defaults to "autofp" (auto flow pinned
-# load balancing).
+# runmodes for each packet acquisition method. Default depends on selected capture
+# method. 'workers' generally gives best performance.
runmode: workers
# Specifies the kind of flow load balancer used by the flow pinned autofp mode.
#
# Supported schedulers are:
#
-# round-robin - Flows assigned to threads in a round robin fashion.
-# active-packets - Flows assigned to threads that have the lowest number of
-# unprocessed packets (default).
-# hash - Flow alloted usihng the address hash. More of a random
-# technique. Was the default in Suricata 1.2.1 and older.
+# hash - Flow assigned to threads using the 5-7 tuple hash.
+# ippair - Flow assigned to threads using addresses only.
#
-#autofp-scheduler: active-packets
+#autofp-scheduler: hash
# Preallocated size for packet. Default is 1514 which is the classical
# size for pcap on ethernet. You should adjust this value to the highest
# packet size (MTU + hardware header) on your system.
default-packet-size: {{ MTU + 15 }}
-# Unix command socket can be used to pass commands to suricata.
-# An external tool can then connect to get information from suricata
+# Unix command socket can be used to pass commands to Suricata.
+# An external tool can then connect to get information from Suricata
# or trigger some modifications of the engine. Set enabled to yes
# to activate the feature. In auto mode, the feature will only be
# activated in live capture mode. You can use the filename variable to set
@@ -956,6 +1079,10 @@ unix-command:
#magic-file: /usr/share/file/magic
#magic-file:
+# GeoIP2 database file. Specify path and filename of GeoIP2 database
+# if using rules with "geoip" rule option.
+#geoip-database: /usr/local/share/GeoLite2/GeoLite2-Country.mmdb
+
legacy:
uricontent: enabled
@@ -963,7 +1090,7 @@ legacy:
## Detection settings
##
-# Set the order of alerts bassed on actions
+# Set the order of alerts based on actions
# The default order is pass, drop, reject, alert
# action-order:
# - pass
@@ -972,8 +1099,8 @@ legacy:
# - alert
# IP Reputation
-#reputation-categories-file: /usr/local/etc/suricata/iprep/categories.txt
-#default-reputation-path: /usr/local/etc/suricata/iprep
+#reputation-categories-file: /etc/suricata/iprep/categories.txt
+#default-reputation-path: /etc/suricata/iprep
#reputation-files:
# - reputation.list
@@ -1051,10 +1178,10 @@ defrag:
# emergency-recovery is the percentage of flows that the engine need to
# prune before unsetting the emergency state. The emergency state is activated
# when the memcap limit is reached, allowing to create new flows, but
-# prunning them with the emergency timeouts (they are defined below).
+# pruning them with the emergency timeouts (they are defined below).
# If the memcap is reached, the engine will try to prune flows
-# with the default timeouts. If it doens't find a flow to prune, it will set
-# the emergency bit and it will try again with more agressive timeouts.
+# with the default timeouts. If it doesn't find a flow to prune, it will set
+# the emergency bit and it will try again with more aggressive timeouts.
# If that doesn't work, then it will try to kill the last time seen flows
# not in use.
# The memcap can be specified in kb, mb, gb. Just a number indicates it's
@@ -1077,7 +1204,7 @@ vlan:
# Specific timeouts for flows. Here you can specify the timeouts that the
# active flows will wait to transit from the current state to another, on each
-# protocol. The value of "new" determine the seconds to wait after a hanshake or
+# protocol. The value of "new" determine the seconds to wait after a handshake or
# stream startup before the engine free the data of that flow it doesn't
# change the state to established (usually if we don't receive more packets
# of that flow). The value of "established" is the amount of
@@ -1138,7 +1265,7 @@ flow-timeouts:
# # packet. If csum validation is specified as
# # "yes", then packet with invalid csum will not
# # be processed by the engine stream/app layer.
-# # Warning: locally generated trafic can be
+# # Warning: locally generated traffic can be
# # generated without checksum due to hardware offload
# # of checksum. You can control the handling of checksum
# # on a per-interface basis via the 'checksum-checks'
@@ -1149,7 +1276,9 @@ flow-timeouts:
# inline: no # stream inline mode
# drop-invalid: yes # in inline mode, drop packets that are invalid with regards to streaming engine
# max-synack-queued: 5 # Max different SYN/ACKs to queue
-# bypass: no # Bypass packets when stream.depth is reached
+# bypass: no # Bypass packets when stream.reassembly.depth is reached.
+# # Warning: first side to reach this triggers
+# # the bypass.
#
# reassembly:
# memcap: 64mb # Can be specified in kb, mb, gb. Just a number
@@ -1222,9 +1351,22 @@ host:
decoder:
# Teredo decoder is known to not be completely accurate
- # it will sometimes detect non-teredo as teredo.
+ # as it will sometimes detect non-teredo as teredo.
teredo:
enabled: true
+ # ports to look for Teredo. Max 4 ports. If no ports are given, or
+ # the value is set to 'any', Teredo detection runs on _all_ UDP packets.
+ ports: $TEREDO_PORTS # syntax: '[3544, 1234]' or '3533' or 'any'.
+
+ # VXLAN decoder is assigned to up to 4 UDP ports. By default only the
+ # IANA assigned port 4789 is enabled.
+ vxlan:
+ enabled: true
+ ports: $VXLAN_PORTS # syntax: '8472, 4789'
+ # ERSPAN Type I decode support
+ erspan:
+ typeI:
+ enabled: false
##
@@ -1292,7 +1434,6 @@ detect:
# The supported algorithms are:
# "ac" - Aho-Corasick, default implementation
# "ac-bs" - Aho-Corasick, reduced memory implementation
-# "ac-cuda" - Aho-Corasick, CUDA implementation
# "ac-ks" - Aho-Corasick, "Ken Steele" variant
# "hs" - Hyperscan, available when built with Hyperscan support
#
@@ -1305,10 +1446,6 @@ detect:
# to be set to "single", because of ac's memory requirements, unless the
# ruleset is small enough to fit in one's memory, in which case one can
# use "full" with "ac". Rest of the mpms can be run in "full" mode.
-#
-# There is also a CUDA pattern matcher (only available if Suricata was
-# compiled with --enable-cuda: b2g_cuda. Make sure to update your
-# max-pending-packets setting above as well if you use b2g_cuda.
mpm-algo: auto
@@ -1338,19 +1475,26 @@ threading:
{%- if salt['pillar.get']('sensor:suriprocs') %}
cpu-affinity:
- management-cpu-set:
- cpu: [ all ] # include only these cpus in affinity settings
+ cpu: [ all ] # include only these CPUs in affinity settings
- receive-cpu-set:
- cpu: [ all ] # include only these cpus in affinity settings
+ cpu: [ all ] # include only these CPUs in affinity settings
- worker-cpu-set:
cpu: [ "all" ]
mode: "exclusive"
# Use explicitely 3 threads and don't compute number by using
# detect-thread-ratio variable:
+ # threads: 3
threads: {{ salt['pillar.get']('sensor:suriprocs') }}
prio:
+ low: [ 0 ]
+ medium: [ "1-2" ]
+ high: [ 3 ]
default: "high"
- {% endif %}
-
+ #- verdict-cpu-set:
+ # cpu: [ 0 ]
+ # prio:
+ # default: "high"
+ {%- endif -%}
{%- if salt['pillar.get']('sensor:suripins') %}
cpu-affinity:
- management-cpu-set:
@@ -1367,10 +1511,6 @@ threading:
default: "high"
{% endif %}
- #- verdict-cpu-set:
- # cpu: [ 0 ]
- # prio:
- # default: "high"
#
# By default Suricata creates one "detect" thread per available CPU/CPU core.
# This setting allows controlling this behaviour. A ratio setting of 2 will
@@ -1425,6 +1565,11 @@ profiling:
filename: keyword_perf.log
append: yes
+ prefilter:
+ enabled: yes
+ filename: prefilter_perf.log
+ append: yes
+
# per rulegroup profiling
rulegroups:
enabled: yes
@@ -1466,7 +1611,7 @@ profiling:
# When running in NFQ inline mode, it is possible to use a simulated
# non-terminal NFQUEUE verdict.
-# This permit to do send all needed packet to suricata via this a rule:
+# This permit to do send all needed packet to Suricata via this a rule:
# iptables -I FORWARD -m mark ! --mark $MARK/$MASK -j NFQUEUE
# And below, you can have your standard filtering ruleset. To activate
# this mode, you need to set mode to 'repeat'
@@ -1475,7 +1620,7 @@ profiling:
# On linux >= 3.1, you can set batchcount to a value > 1 to improve performance
# by processing several packets before sending a verdict (worker runmode only).
# On linux >= 3.6, you can set the fail-open option to yes to have the kernel
-# accept the packet if suricata is not able to keep pace.
+# accept the packet if Suricata is not able to keep pace.
# bypass mark and mask can be used to implement NFQ bypass. If bypass mark is
# set then the NFQ bypass is activated. Suricata will set the bypass mark/mask
# on packet of a flow that need to be bypassed. The Nefilter ruleset has to
@@ -1513,17 +1658,17 @@ nflog:
# general settings affecting packet capture
capture:
- # disable NIC offloading. It's restored when Suricata exists.
- # Enabled by default
+ # disable NIC offloading. It's restored when Suricata exits.
+ # Enabled by default.
#disable-offloading: false
#
# disable checksum validation. Same as setting '-k none' on the
- # commandline
+ # commandline.
#checksum-validation: none
# Netmap support
#
-# Netmap operates with NIC directly in driver, so you need FreeBSD wich have
+# Netmap operates with NIC directly in driver, so you need FreeBSD 11+ which have
# built-in netmap support or compile and install netmap module and appropriate
# NIC driver on your Linux system.
# To reach maximum throughput disable all receive-, segmentation-,
@@ -1535,7 +1680,9 @@ capture:
netmap:
# To specify OS endpoint add plus sign at the end (e.g. "eth0+")
- interface: eth2
- # Number of receive threads. "auto" uses number of RSS queues on interface.
+ # Number of capture threads. "auto" uses number of RSS queues on interface.
+ # Warning: unless the RSS hashing is symmetrical, this will lead to
+ # accuracy issues.
#threads: auto
# You can use the following variables to activate netmap tap or IPS mode.
# If copy-mode is set to ips or tap, the traffic coming to the current
@@ -1558,7 +1705,7 @@ netmap:
# Possible values are:
# - yes: checksum validation is forced
# - no: checksum validation is disabled
- # - auto: suricata uses a statistical approach to detect when
+ # - auto: Suricata uses a statistical approach to detect when
# checksum off-loading is used.
# Warning: 'checksum-validation' must be set to yes to have any validation
#checksum-checks: auto
@@ -1575,9 +1722,9 @@ netmap:
# for more info see http://www.ntop.org/products/pf_ring/
pfring:
- interface: eth0
- # Number of receive threads (>1 will enable experimental flow pinned
- # runmode)
- threads: 1
+ # Number of receive threads. If set to 'auto' Suricata will first try
+ # to use CPU (core) count and otherwise RSS queue count.
+ threads: auto
# Default clusterid. PF_RING will load balance packets based on flow.
# All threads/processes that will participate need to have the same
@@ -1587,8 +1734,15 @@ pfring:
# Default PF_RING cluster type. PF_RING can load balance per flow.
# Possible values are cluster_flow or cluster_round_robin.
cluster-type: cluster_flow
+
# bpf filter for this interface
#bpf-filter: tcp
+
+ # If bypass is set then the PF_RING hw bypass is activated, when supported
+ # by the interface in use. Suricata will instruct the interface to bypass
+ # all future packets for a flow that need to be bypassed.
+ #bypass: yes
+
# Choose checksum verification mode for the interface. At the moment
# of the capture, some packets may be with an invalid checksum due to
# offloading to the network card of the checksum computation.
@@ -1596,7 +1750,7 @@ pfring:
# - rxonly: only compute checksum for packets received by network card.
# - yes: checksum validation is forced
# - no: checksum validation is disabled
- # - auto: suricata uses a statistical approach to detect when
+ # - auto: Suricata uses a statistical approach to detect when
# checksum off-loading is used. (default)
# Warning: 'checksum-validation' must be set to yes to have any validation
#checksum-checks: auto
@@ -1641,80 +1795,83 @@ napatech:
# (-1 = OFF, 1 - 100 = percentage of the host buffer that can be held back)
# This may be enabled when sharing streams with another application.
# Otherwise, it should be turned off.
- hba: -1
+ #hba: -1
- # use_all_streams set to "yes" will query the Napatech service for all configured
- # streams and listen on all of them. When set to "no" the streams config array
- # will be used.
- use-all-streams: yes
+ # When use_all_streams is set to "yes" the initialization code will query
+ # the Napatech service for all configured streams and listen on all of them.
+ # When set to "no" the streams config array will be used.
+ #
+ # This option necessitates running the appropriate NTPL commands to create
+ # the desired streams prior to running suricata.
+ #use-all-streams: no
- # The streams to listen on. This can be either:
- # a list of individual streams (e.g. streams: [0,1,2,3])
+ # The streams to listen on when auto-config is disabled or when and threading
+ # cpu-affinity is disabled. This can be either:
+ # an individual stream (e.g. streams: [0])
# or
# a range of streams (e.g. streams: ["0-3"])
+ #
streams: ["0-3"]
-# Tilera mpipe configuration. for use on Tilera TILE-Gx.
-mpipe:
+ # When auto-config is enabled the streams will be created and assigned
+ # automatically to the NUMA node where the thread resides. If cpu-affinity
+ # is enabled in the threading section. Then the streams will be created
+ # according to the number of worker threads specified in the worker cpu set.
+ # Otherwise, the streams array is used to define the streams.
+ #
+ # This option cannot be used simultaneous with "use-all-streams".
+ #
+ auto-config: yes
- # Load balancing modes: "static", "dynamic", "sticky", or "round-robin".
- load-balance: dynamic
+ # Ports indicates which napatech ports are to be used in auto-config mode.
+ # these are the port ID's of the ports that will be merged prior to the
+ # traffic being distributed to the streams.
+ #
+ # This can be specified in any of the following ways:
+ #
+ # a list of individual ports (e.g. ports: [0,1,2,3])
+ #
+ # a range of ports (e.g. ports: [0-3])
+ #
+ # "all" to indicate that all ports are to be merged together
+ # (e.g. ports: [all])
+ #
+ # This has no effect if auto-config is disabled.
+ #
+ ports: [all]
- # Number of Packets in each ingress packet queue. Must be 128, 512, 2028 or 65536
- iqueue-packets: 2048
-
- # List of interfaces we will listen on.
- inputs:
- - interface: xgbe2
- - interface: xgbe3
- - interface: xgbe4
-
-
- # Relative weight of memory for packets of each mPipe buffer size.
- stack:
- size128: 0
- size256: 9
- size512: 0
- size1024: 0
- size1664: 7
- size4096: 0
- size10386: 0
- size16384: 0
+ # When auto-config is enabled the hashmode specifies the algorithm for
+ # determining to which stream a given packet is to be delivered.
+ # This can be any valid Napatech NTPL hashmode command.
+ #
+ # The most common hashmode commands are: hash2tuple, hash2tuplesorted,
+ # hash5tuple, hash5tuplesorted and roundrobin.
+ #
+ # See Napatech NTPL documentation other hashmodes and details on their use.
+ #
+ # This has no effect if auto-config is disabled.
+ #
+ hashmode: hash5tuplesorted
##
-## Hardware accelaration
+## Configure Suricata to load Suricata-Update managed rules.
+##
+## If this section is completely commented out move down to the "Advanced rule
+## file configuration".
##
-# Cuda configuration.
-cuda:
- # The "mpm" profile. On not specifying any of these parameters, the engine's
- # internal default values are used, which are same as the ones specified in
- # in the default conf file.
- mpm:
- # The minimum length required to buffer data to the gpu.
- # Anything below this is MPM'ed on the CPU.
- # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
- # A value of 0 indicates there's no limit.
- data-buffer-size-min-limit: 0
- # The maximum length for data that we would buffer to the gpu.
- # Anything over this is MPM'ed on the CPU.
- # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
- data-buffer-size-max-limit: 1500
- # The ring buffer size used by the CudaBuffer API to buffer data.
- cudabuffer-buffer-size: 500mb
- # The max chunk size that can be sent to the gpu in a single go.
- gpu-transfer-size: 50mb
- # The timeout limit for batching of packets in microseconds.
- batching-timeout: 2000
- # The device to use for the mpm. Currently we don't support load balancing
- # on multiple gpus. In case you have multiple devices on your system, you
- # can specify the device to use, using this conf. By default we hold 0, to
- # specify the first device cuda sees. To find out device-id associated with
- # the card(s) on the system run "suricata --list-cuda-cards".
- device-id: 0
- # No of Cuda streams used for asynchronous processing. All values > 0 are valid.
- # For this option you need a device with Compute Capability > 1.0.
- cuda-streams: 2
+default-rule-path: /etc/suricata/rules
+
+rule-files:
+ - all.rules
+
+##
+## Auxiliary configuration files.
+##
+
+classification-file: /etc/suricata/classification.config
+reference-config-file: /etc/suricata/reference.config
+# threshold-file: /etc/suricata/threshold.config
##
## Include other configs
diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls
index 39f419ad0..0f3d49bc3 100644
--- a/salt/suricata/init.sls
+++ b/salt/suricata/init.sls
@@ -55,6 +55,12 @@ surilogdir:
- user: 940
- group: 939
+suridatadir:
+ file.directory:
+ - name: /nsm/suricata
+ - user: 940
+ - group: 939
+
surirulesync:
file.recurse:
- name: /opt/so/conf/suricata/rules/
@@ -119,6 +125,7 @@ so-suricata:
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
- /opt/so/conf/suricata/rules:/etc/suricata/rules:ro
- /opt/so/log/suricata/:/var/log/suricata/:rw
+ - /nsm/suricata/:/nsm/:rw
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
- network_mode: host
- watch:
diff --git a/salt/hive/thehive/etc/application.conf b/salt/thehive/etc/application.conf
similarity index 99%
rename from salt/hive/thehive/etc/application.conf
rename to salt/thehive/etc/application.conf
index 230d87d67..8630cb386 100644
--- a/salt/hive/thehive/etc/application.conf
+++ b/salt/thehive/etc/application.conf
@@ -12,7 +12,7 @@ search {
# Name of the index
index = the_hive
# Name of the Elasticsearch cluster
- cluster = hive
+ cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
#search.uri = "http://{{ MASTERIP }}:9500"
diff --git a/salt/hive/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf
similarity index 99%
rename from salt/hive/thehive/etc/cortex-application.conf
rename to salt/thehive/etc/cortex-application.conf
index 356bfd7b3..1a887cdb3 100644
--- a/salt/hive/thehive/etc/cortex-application.conf
+++ b/salt/thehive/etc/cortex-application.conf
@@ -12,7 +12,7 @@ search {
# Name of the index
index = cortex
# Name of the Elasticsearch cluster
- cluster = hive
+ cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MASTERIP }}:9500"]
# Scroll keepalive
diff --git a/salt/hive/thehive/etc/es/elasticsearch.yml b/salt/thehive/etc/es/elasticsearch.yml
similarity index 95%
rename from salt/hive/thehive/etc/es/elasticsearch.yml
rename to salt/thehive/etc/es/elasticsearch.yml
index d00c01d5d..7f268a671 100644
--- a/salt/hive/thehive/etc/es/elasticsearch.yml
+++ b/salt/thehive/etc/es/elasticsearch.yml
@@ -1,4 +1,4 @@
-cluster.name: "hive"
+cluster.name: "thehive"
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.
diff --git a/salt/hive/thehive/etc/es/log4j2.properties b/salt/thehive/etc/es/log4j2.properties
similarity index 100%
rename from salt/hive/thehive/etc/es/log4j2.properties
rename to salt/thehive/etc/es/log4j2.properties
diff --git a/salt/hive/init.sls b/salt/thehive/init.sls
similarity index 70%
rename from salt/hive/init.sls
rename to salt/thehive/init.sls
index 2be2f7480..732fe4a77 100644
--- a/salt/hive/init.sls
+++ b/salt/thehive/init.sls
@@ -1,24 +1,24 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
-hiveconfdir:
+thehiveconfdir:
file.directory:
- - name: /opt/so/conf/hive/etc
+ - name: /opt/so/conf/thehive/etc
- makedirs: True
- user: 939
- group: 939
-hivelogdir:
+thehivelogdir:
file.directory:
- - name: /opt/so/log/hive
+ - name: /opt/so/log/thehive
- makedirs: True
- user: 939
- group: 939
-hiveconf:
+thehiveconf:
file.recurse:
- - name: /opt/so/conf/hive/etc
- - source: salt://hive/thehive/etc
+ - name: /opt/so/conf/thehive/etc
+ - source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -40,7 +40,7 @@ cortexlogdir:
cortexconf:
file.recurse:
- name: /opt/so/conf/cortex
- - source: salt://hive/thehive/etc
+ - source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
@@ -48,9 +48,9 @@ cortexconf:
# Install Elasticsearch
# Made directory for ES data to live in
-hiveesdata:
+thehiveesdata:
file.directory:
- - name: /nsm/hive/esdata
+ - name: /nsm/thehive/esdata
- makedirs: True
- user: 939
- group: 939
@@ -64,16 +64,16 @@ so-thehive-es:
- interactive: True
- tty: True
- binds:
- - /nsm/hive/esdata:/usr/share/elasticsearch/data:rw
- - /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- - /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- - /opt/so/log/hive:/var/log/elasticsearch:rw
+ - /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw
+ - /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
+ - /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
+ - /opt/so/log/thehive:/var/log/elasticsearch:rw
- environment:
- http.host=0.0.0.0
- http.port=9400
- transport.tcp.port=9500
- transport.host=0.0.0.0
- - cluster.name=hive
+ - cluster.name=thehive
- thread_pool.index.queue_size=100000
- thread_pool.search.queue_size=100000
- thread_pool.bulk.queue_size=100000
@@ -90,13 +90,13 @@ so-cortex:
- name: so-cortex
- user: 939
- binds:
- - /opt/so/conf/hive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
+ - /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9001:9001
cortexscript:
cmd.script:
- - source: salt://hive/thehive/scripts/cortex_init
+ - source: salt://thehive/scripts/cortex_init
- cwd: /opt/so
- template: jinja
@@ -109,12 +109,12 @@ so-thehive:
- name: so-thehive
- user: 939
- binds:
- - /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro
+ - /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9000:9000
-hivescript:
+thehivescript:
cmd.script:
- - source: salt://hive/thehive/scripts/hive_init
+ - source: salt://thehive/scripts/hive_init
- cwd: /opt/so
- template: jinja
diff --git a/salt/hive/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init
similarity index 100%
rename from salt/hive/thehive/scripts/cortex_init
rename to salt/thehive/scripts/cortex_init
diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init
new file mode 100755
index 000000000..296004e77
--- /dev/null
+++ b/salt/thehive/scripts/hive_init
@@ -0,0 +1,64 @@
+#!/bin/bash
+{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
+{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
+{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
+
+thehive_init(){
+ sleep 120
+ THEHIVE_IP="{{MASTERIP}}"
+ THEHIVE_USER="{{THEHIVEUSER}}"
+ THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
+ THEHIVE_KEY="{{THEHIVEKEY}}"
+ SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
+
+ echo -n "Waiting for TheHive..."
+ COUNT=0
+ THEHIVE_CONNECTED="no"
+ while [[ "$COUNT" -le 240 ]]; do
+ curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive"
+ if [ $? -eq 0 ]; then
+ THEHIVE_CONNECTED="yes"
+ echo "connected!"
+ break
+ else
+ ((COUNT+=1))
+ sleep 1
+ echo -n "."
+ fi
+ done
+
+ if [ "$THEHIVE_CONNECTED" == "yes" ]; then
+
+ # Migrate DB
+ curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
+
+ # Create intial TheHive user
+ curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
+
+ # Pre-load custom fields
+ #
+ # reputation
+ curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
+
+
+ touch /opt/so/state/thehive.txt
+ else
+ echo "We experienced an issue connecting to TheHive!"
+ fi
+}
+
+if [ -f /opt/so/state/thehive.txt ]; then
+ exit 0
+else
+ rm -f garbage_file
+ while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
+ do
+ echo "Waiting for Elasticsearch..."
+ rm -f garbage_file
+ sleep 1
+ done
+ rm -f garbage_file
+ sleep 5
+ thehive_init
+fi
diff --git a/salt/top.sls b/salt/top.sls
index 95acae1fd..4d60b01c0 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -100,7 +100,7 @@ base:
- schedule
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -149,7 +149,7 @@ base:
{%- endif %}
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -203,7 +203,7 @@ base:
- schedule
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
@@ -318,7 +318,7 @@ base:
{%- endif %}
- soctopus
{%- if THEHIVE != 0 %}
- - hive
+ - thehive
{%- endif %}
{%- if PLAYBOOK != 0 %}
- playbook
diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults
new file mode 100644
index 000000000..ae4554a3f
--- /dev/null
+++ b/setup/automation/pm_standalone_defaults
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+TESTING=true
+
+address_type=DHCP
+ADMINUSER=onionuser
+ADMINPASS1=onionuser
+ADMINPASS2=onionuser
+ALLOW_CIDR=0.0.0.0/0
+ALLOW_ROLE=a
+BASICBRO=7
+BASICSURI=7
+# BLOGS=
+BNICS=eth1
+BROVERSION=ZEEK
+# CURCLOSEDAYS=
+# EVALADVANCED=BASIC
+GRAFANA=1
+# HELIXAPIKEY=
+HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
+HNSENSOR=inherit
+HOSTNAME=standalone
+install_type=STANDALONE
+# LSINPUTBATCHCOUNT=
+# LSINPUTTHREADS=
+# LSPIPELINEBATCH=
+# LSPIPELINEWORKERS=
+MASTERADV=BASIC
+MASTERUPDATES=1
+# MDNS=
+# MGATEWAY=
+# MIP=
+# MMASK=
+MNIC=eth0
+# MSEARCH=
+# MSRV=
+# MTU=
+NAVIGATOR=1
+NIDS=Suricata
+# NODE_ES_HEAP_SIZE=
+# NODE_LS_HEAP_SIZE=
+NODESETUP=NODEBASIC
+NSMSETUP=BASIC
+NODEUPDATES=MASTER
+# OINKCODE=
+OSQUERY=1
+# PATCHSCHEDULEDAYS=
+# PATCHSCHEDULEHOURS=
+PATCHSCHEDULENAME=auto
+PLAYBOOK=1
+# REDIRECTHOST=
+REDIRECTINFO=IP
+RULESETUP=ETOPEN
+# SHARDCOUNT=
+SKIP_REBOOT=1
+SOREMOTEPASS1=onionuser
+SOREMOTEPASS2=onionuser
+STRELKA=1
+THEHIVE=1
+WAZUH=1
+WEBUSER=onionuser@somewhere.invalid
+WEBPASSWD1=onionuser
+WEBPASSWD2=onionuser
\ No newline at end of file
diff --git a/setup/proxies/docker.conf b/setup/proxies/docker.conf
new file mode 100644
index 000000000..9ab2c4b4c
--- /dev/null
+++ b/setup/proxies/docker.conf
@@ -0,0 +1,2 @@
+[Service]
+ExecStart=/usr/bin/dockerd /usr/bin/dockerd -H fd:// --registry-mirror "$proxy_addr"
diff --git a/setup/so-common-functions b/setup/so-common-functions
index 15cb3e686..fc380f85b 100644
--- a/setup/so-common-functions
+++ b/setup/so-common-functions
@@ -38,31 +38,3 @@ calculate_useable_cores() {
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
export lb_procs
}
-
-set_defaul_log_size() {
- local percentage
-
- case $INSTALLTYPE in
- EVAL | HEAVYNODE)
- percentage=50
- ;;
- *)
- percentage=80
- ;;
- esac
-
- local disk_dir="/"
- if [ -d /nsm ]; then
- disk_dir="/nsm"
- fi
- local disk_size_1k
- disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
-
- local ratio="1048576"
-
- local disk_size_gb
- disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
-
- log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
- export log_size_limit
-}
diff --git a/setup/so-functions b/setup/so-functions
index a20953035..52aee37d6 100755
--- a/setup/so-functions
+++ b/setup/so-functions
@@ -19,7 +19,7 @@ source ./so-whiptail
source ./so-variables
source ./so-common-functions
-SOVERSION=1.3.0
+SOVERSION=1.4.0
accept_salt_key_remote() {
systemctl restart salt-minion
@@ -193,7 +193,7 @@ check_admin_pass() {
check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH"
}
-check_hive_init_then_reboot() {
+check_hive_init() {
wait_for_file /opt/so/state/thehive.txt 20 5
local return_val=$?
@@ -203,7 +203,6 @@ check_hive_init_then_reboot() {
docker stop so-thehive
docker rm so-thehive
- shutdown -r now
}
check_network_manager_conf() {
@@ -514,7 +513,7 @@ detect_os() {
# Install bind-utils so the host command exists
if ! command -v host > /dev/null 2>&1; then
echo "Installing required packages to run installer"
- yum -y install bind-utils >> "$setup_log" 2>&1
+ yum -y install bind-utils yum-plugin-versionlock >> "$setup_log" 2>&1
fi
@@ -550,6 +549,9 @@ detect_os() {
disable_onion_user() {
# Disable the default account cause security.
usermod -L onion
+
+ # Remove the automated setup script from crontab, if it exists
+ crontab -u onion -r
}
disable_misc_network_features() {
@@ -580,7 +582,9 @@ docker_install() {
{
yum clean expire-cache;
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo;
- yum -y install docker-ce;
+ yum -y install docker-ce-19.03.9-3.el7 containerd.io-1.2.6-3.el7;
+ yum versionlock docker-ce-19.03.9-3.el7;
+ yum versionlock containerd.io-1.2.6-3.el7
} >> "$setup_log" 2>&1
else
@@ -616,9 +620,10 @@ docker_registry() {
echo "Setting up Docker Registry" >> "$setup_log" 2>&1
mkdir -p /etc/docker >> "$setup_log" 2>&1
# Make the host use the master docker registry
+ if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi
printf '%s\n'\
"{"\
- " \"registry-mirrors\": [\"https://$MSRV:5000\"]"\
+ " \"registry-mirrors\": [ \"$proxy:5000\" ]"\
"}" > /etc/docker/daemon.json
echo "Docker Registry Setup - Complete" >> "$setup_log" 2>&1
@@ -683,7 +688,7 @@ docker_seed_registry() {
# Tag it with the new registry destination
docker tag soshybridhunter/"$i" "$HOSTNAME":5000/soshybridhunter/"$i"
docker push "$HOSTNAME":5000/soshybridhunter/"$i"
- docker rmi soshybridhunter/"$i"
+ #docker rmi soshybridhunter/"$i"
} >> "$setup_log" 2>&1
done
else
@@ -1068,7 +1073,7 @@ saltify() {
yum -y update exclude=salt*;
systemctl enable salt-minion;
} >> "$setup_log" 2>&1
- echo "exclude=salt*" >> /etc/yum.conf
+ yum versionlock salt*
else
DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade >> "$setup_log" 2>&1
@@ -1227,8 +1232,6 @@ set_progress_str() {
'----'\
"$percentage% - ${progress_bar_text^^}"\
"----" >> "$setup_log" 2>&1
-
- sleep 5
}
sensor_pillar() {
@@ -1279,6 +1282,33 @@ sensor_pillar() {
cat "$pillar_file" >> "$setup_log" 2>&1
}
+set_default_log_size() {
+ local percentage
+
+ case $INSTALLTYPE in
+ EVAL | HEAVYNODE)
+ percentage=50
+ ;;
+ *)
+ percentage=80
+ ;;
+ esac
+
+ local disk_dir="/"
+ if [ -d /nsm ]; then
+ disk_dir="/nsm"
+ fi
+ local disk_size_1k
+ disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}')
+
+ local ratio="1048576"
+
+ local disk_size_gb
+ disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' )
+
+ log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}')
+}
+
set_hostname() {
set_hostname_iso
@@ -1443,6 +1473,23 @@ update_packages() {
fi
}
+use_turbo_proxy() {
+ if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
+ echo "turbo is not supported on this install type" >> $setup_log 2>&1
+ return
+ fi
+
+ if [[ $OS == 'centos' ]]; then
+ printf '%s\n' "proxy=${TURBO}:3142" >> /etc/yum.conf
+ else
+ printf '%s\n'\
+ "Acquire {"\
+ " HTTP::proxy \"${TURBO}:3142\";"\
+ " HTTPS::proxy \"${TURBO}:3142\";"\
+ "}" > /etc/apt/apt.conf.d/proxy.conf
+ fi
+}
+
ls_heapsize() {
if [ "$total_mem" -ge 32000 ]; then
diff --git a/setup/so-setup b/setup/so-setup
index 406d69763..566767e82 100755
--- a/setup/so-setup
+++ b/setup/so-setup
@@ -21,15 +21,74 @@ source ./so-common-functions
source ./so-whiptail
source ./so-variables
+# Parse command line arguments
setup_type=$1
-export setup_type
+automation=$2
+
+while [[ $# -gt 0 ]]; do
+ arg="$1"
+ shift
+ case "$arg" in
+ "--turbo="* )
+ export TURBO="http://${arg#*=}";;
+ "--proxy="* )
+ export {http,https,ftp,rsync,all}_proxy="${arg#*=}";;
+ "--allow-role="* )
+ export ALLOW_ROLE="${arg#*=}";;
+ "--allow-cidr="* )
+ export ALLOW_CIDR="${arg#*=}";;
+ "--skip-reboot" )
+ export SKIP_REBOOT=1;;
+ * )
+ if [[ "$arg" == "--"* ]]; then
+ echo "Invalid option"
+ fi
+ esac
+done
+
+# Begin Installation pre-processing
+echo "---- Starting setup at $(date -u) ----" >> $setup_log 2>&1
+
+automated=no
+function progress() {
+ if [ $automated == no ]; then
+ whiptail --title "Security Onion Install" --gauge 'Please wait while installing' 6 60 0
+ else
+ cat >> $setup_log 2>&1
+ fi
+}
+
+if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then
+ echo "Preselecting variable values based on automated setup: $automation" >> $setup_log 2>&1
+ source automation/$automation
+ automated=yes
+
+ echo "Checking network configuration" >> $setup_log 2>&1
+ ip a >> $setup_log 2>&1
+
+ attempt=1
+ attempts=60
+ ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1
+ while [ $? -ne 0 ]; do
+ ip a >> $setup_log 2>&1
+ if [ $attempt -gt $attempts ]; then
+ echo "Network unavailable - setup cannot continue" >> $setup_log 2>&1
+ exit 1
+ fi
+ echo "Waiting for network to come up (attempt $attempt of $attempts)" >> $setup_log 2>&1
+ attempt=$((attempt + 1))
+ sleep 10;
+ ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1
+ done
+ echo "Network is up on $MNIC" >> $setup_log 2>&1
+fi
case "$setup_type" in
iso | network) # Accepted values
- echo "Beginning Security Onion $setup_type install"
+ echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1
;;
*)
- echo "Invalid install type, must be 'iso' or 'network'"
+ echo "Invalid install type, must be 'iso' or 'network'" | tee $setup_log
exit 1
;;
esac
@@ -37,9 +96,8 @@ esac
# Allow execution of SO tools during setup
export PATH=$PATH:../salt/common/tools/sbin
-date -u > $setup_log 2>&1
-
got_root
+
detect_os
if [ "$OS" == ubuntu ]; then
@@ -48,10 +106,10 @@ fi
setterm -blank 0
-if (whiptail_you_sure); then
+if [ "$setup_type" == 'iso' ] || (whiptail_you_sure); then
true
else
- echo "User cancelled setup." >> $setup_log 2>&1
+ echo "User cancelled setup." | tee $setup_log
whiptail_cancel
fi
@@ -134,17 +192,21 @@ echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1
minion_type=$(get_minion_type)
-# Set any constants needed
+# Set any variables needed
+set_default_log_size >> $setup_log 2>&1
+
if [[ $is_helix ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
HNSENSOR=inherit
MASTERUPDATES=0
fi
+
if [[ $is_helix || ( $is_master && $is_node ) ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
fi
+
if [[ $is_master && $is_node ]]; then
LSPIPELINEWORKERS=1
LSPIPELINEBATCH=125
@@ -153,6 +215,7 @@ if [[ $is_master && $is_node ]]; then
NIDS=Suricata
BROVERSION=ZEEK
fi
+
if [[ $is_node ]]; then
CURCLOSEDAYS=30
fi
@@ -195,6 +258,9 @@ fi
if [[ $is_distmaster || ( $is_sensor || $is_node ) && ! $is_eval ]]; then
whiptail_master_updates
+ if [[ $setup_type == 'network' && $MASTERUPDATES == 1 ]]; then
+ whiptail_master_updates_warning
+ fi
fi
if [[ $is_minion ]]; then
@@ -241,6 +307,10 @@ fi
whiptail_make_changes
+if [[ -n "$TURBO" ]]; then
+ use_turbo_proxy
+fi
+
if [[ "$setup_type" == 'iso' ]]; then
# Init networking so rest of install works
set_hostname_iso
@@ -485,17 +555,22 @@ fi
set_progress_str 95 'Verifying setup'
salt-call -l info state.highstate >> $setup_log 2>&1
-} | whiptail --title "Hybrid Hunter Install" --gauge "Please wait while installing" 6 60 0
+} | progress
success=$(tail -10 $setup_log | grep Failed | awk '{ print $2}')
if [[ "$success" = 0 ]]; then
whiptail_setup_complete
+ if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then
+ export IP=$ALLOW_CIDR
+ so-allow -$ALLOW_ROLE >> $setup_log 2>&1
+ fi
if [[ $THEHIVE == 1 ]]; then
- check_hive_init_then_reboot
- else
- shutdown -r now
+ check_hive_init
fi
else
whiptail_setup_failed
+fi
+
+if [[ -z $SKIP_REBOOT ]]; then
shutdown -r now
fi
diff --git a/setup/so-whiptail b/setup/so-whiptail
index cfe00b67b..9ba4ebc20 100755
--- a/setup/so-whiptail
+++ b/setup/so-whiptail
@@ -413,7 +413,6 @@ whiptail_log_size_limit() {
[ -n "$TESTING" ] && return
- set_defaul_log_size
log_size_limit=$(whiptail --title "Security Onion Setup" --inputbox \
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
@@ -958,7 +957,7 @@ whiptail_setup_complete() {
[ -n "$TESTING" ] && return
- whiptail --title "Security Onion Setup" --msgbox "Finished $install_type install. Press ENTER to reboot." 8 75
+ whiptail --title "Security Onion Setup" --msgbox "Finished $install_type install. Press Ok to reboot." 8 75
install_cleanup >> $setup_log 2>&1
}
@@ -967,7 +966,7 @@ whiptail_setup_failed() {
[ -n "$TESTING" ] && return
- whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press ENTER to reboot." 8 75
+ whiptail --title "Security Onion Setup" --msgbox "Install had a problem. Please see $setup_log for details. Press Ok to reboot." 8 75
install_cleanup >> $setup_log 2>&1
}
@@ -1027,7 +1026,17 @@ whiptail_master_updates() {
;;
esac
+}
+whiptail_master_updates_warning() {
+ [ -n "$TESTING" ] && return
+
+ whiptail --title "Security Onion Setup"\
+ --msgbox "Updating through the master node requires the master to have internet access, press ENTER to continue"\
+ 8 75
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
}
whiptail_node_updates() {
@@ -1048,7 +1057,7 @@ whiptail_you_sure() {
[ -n "$TESTING" ] && return
- whiptail --title "Security Onion Setup" --yesno "Are you sure you want to install Security Onion over the internet?" 8 75
+ whiptail --title "Security Onion Setup" --yesno "Are you sure you want to continue a network install of Security Onion?" 8 75
local exitstatus=$?
return $exitstatus
diff --git a/so-setup-network b/so-setup-network
index ae9af4ffa..2528ff14b 100755
--- a/so-setup-network
+++ b/so-setup-network
@@ -15,6 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-cd setup
+cd setup || exit
-./so-setup network
+./so-setup network "$@"