Merge branch 'dev' into feature/setup

# Conflicts:
#	salt/thehive/scripts/cortex_init
#	salt/thehive/scripts/hive_init
#	setup/so-functions
#	setup/so-whiptail
This commit is contained in:
William Wernert
2020-07-10 17:42:56 -04:00
161 changed files with 1329 additions and 1052 deletions

View File

@@ -57,7 +57,7 @@
- Fixed an issue where geoip was not properly parsed.
- ATT&CK Navigator is now it's own state.
- Standlone mode is now supported.
- Mastersearch previously used the same Grafana dashboard as a Search node. It now has its own dashboard that incorporates panels from the Master node and Search node dashboards.
- Managersearch previously used the same Grafana dashboard as a Search node. It now has its own dashboard that incorporates panels from the Manager node and Search node dashboards.
### Known Issues:

View File

@@ -13,8 +13,8 @@ role:
fleet:
heavynode:
helixsensor:
master:
mastersearch:
manager:
managersearch:
standalone:
searchnode:
sensor:

View File

@@ -24,7 +24,7 @@ firewall:
ips:
delete:
insert:
master:
manager:
ips:
delete:
insert:

View File

@@ -1,12 +1,12 @@
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{% set WAZUH = salt['pillar.get']('master:wazuh', '0') %}
{% set THEHIVE = salt['pillar.get']('master:thehive', '0') %}
{% set PLAYBOOK = salt['pillar.get']('master:playbook', '0') %}
{% set FREQSERVER = salt['pillar.get']('master:freq', '0') %}
{% set DOMAINSTATS = salt['pillar.get']('master:domainstats', '0') %}
{% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %}
{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
{% set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
eval:
containers:
@@ -20,7 +20,7 @@ eval:
- so-soc
- so-kratos
- so-idstools
{% if FLEETMASTER %}
{% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
@@ -83,7 +83,7 @@ hot_node:
- so-logstash
- so-elasticsearch
- so-curator
master_search:
manager_search:
containers:
- so-nginx
- so-telegraf
@@ -99,7 +99,7 @@ master_search:
- so-elastalert
- so-filebeat
- so-soctopus
{% if FLEETMASTER %}
{% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
@@ -122,7 +122,7 @@ master_search:
{% if DOMAINSTATS != '0' %}
- so-domainstats
{% endif %}
master:
manager:
containers:
- so-dockerregistry
- so-nginx
@@ -141,7 +141,7 @@ master:
- so-kibana
- so-elastalert
- so-filebeat
{% if FLEETMASTER %}
{% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis

View File

@@ -17,7 +17,7 @@ firewall:
- 5644
- 9822
udp:
master:
manager:
ports:
tcp:
- 1514

View File

@@ -16,6 +16,14 @@ logstash:
- so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja
templates:
- so/so-beats-template.json
- so/so-beats-template.json.jinja
- so/so-common-template.json
- so/so-zeek-template.json
- so/so-firewall-template.json.jinja
- so/so-flow-template.json.jinja
- so/so-ids-template.json.jinja
- so/so-import-template.json.jinja
- so/so-osquery-template.json.jinja
- so/so-ossec-template.json.jinja
- so/so-strelka-template.json.jinja
- so/so-syslog-template.json.jinja
- so/so-zeek-template.json.jinja

View File

@@ -1,6 +1,6 @@
logstash:
pipelines:
master:
manager:
config:
- so/0009_input_beats.conf
- so/0010_input_hhbeats.conf

View File

@@ -12,5 +12,14 @@ logstash:
- so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja
templates:
- so/so-beats-template.json.jinja
- so/so-common-template.json
- so/so-zeek-template.json
- so/so-firewall-template.json.jinja
- so/so-flow-template.json.jinja
- so/so-ids-template.json.jinja
- so/so-import-template.json.jinja
- so/so-osquery-template.json.jinja
- so/so-ossec-template.json.jinja
- so/so-strelka-template.json.jinja
- so/so-syslog-template.json.jinja
- so/so-zeek-template.json.jinja

View File

@@ -6,10 +6,10 @@ base:
- match: compound
- zeek
'*_mastersearch or *_heavynode':
'*_managersearch or *_heavynode':
- match: compound
- logstash
- logstash.master
- logstash.manager
- logstash.search
'*_sensor':
@@ -18,16 +18,16 @@ base:
- healthcheck.sensor
- minions.{{ grains.id }}
'*_master or *_mastersearch':
'*_manager or *_managersearch':
- match: compound
- static
- data.*
- secrets
- minions.{{ grains.id }}
'*_master':
'*_manager':
- logstash
- logstash.master
- logstash.manager
'*_eval':
- static
@@ -39,7 +39,7 @@ base:
'*_standalone':
- logstash
- logstash.master
- logstash.manager
- logstash.search
- data.*
- brologs

View File

@@ -6,7 +6,7 @@ import socket
def send(data):
mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('master:mainint'))
mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('manager:mainint'))
mainip = __salt__['grains.get']('ip_interfaces').get(mainint)[0]
dstport = 8094

View File

@@ -26,7 +26,7 @@ x509_signing_policies:
- extendedKeyUsage: serverAuth
- days_valid: 820
- copypath: /etc/pki/issued_certs/
masterssl:
managerssl:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt

View File

@@ -1,4 +1,4 @@
{% set master = salt['grains.get']('master') %}
{% set manager = salt['grains.get']('master') %}
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
@@ -20,7 +20,7 @@ pki_private_key:
/etc/pki/ca.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key
- CN: {{ master }}
- CN: {{ manager }}
- C: US
- ST: Utah
- L: Salt Lake City

View File

@@ -1,3 +1,5 @@
{% set role = grains.id.split('_') | last %}
# Add socore Group
socoregroup:
group.present:
@@ -131,3 +133,15 @@ utilsyncscripts:
- file_mode: 755
- template: jinja
- source: salt://common/tools/sbin
{% if role in ['eval', 'standalone', 'sensor', 'heavynode'] %}
# Add sensor cleanup
/usr/sbin/so-sensor-clean:
cron.present:
- user: root
- minute: '*'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
{% endif %}

View File

@@ -18,14 +18,14 @@
}
},grain='id', merge=salt['pillar.get']('docker')) %}
{% if role in ['eval', 'mastersearch', 'master', 'standalone'] %}
{{ append_containers('master', 'grafana', 0) }}
{{ append_containers('static', 'fleet_master', 0) }}
{{ append_containers('master', 'wazuh', 0) }}
{{ append_containers('master', 'thehive', 0) }}
{{ append_containers('master', 'playbook', 0) }}
{{ append_containers('master', 'freq', 0) }}
{{ append_containers('master', 'domainstats', 0) }}
{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
{{ append_containers('manager', 'grafana', 0) }}
{{ append_containers('static', 'fleet_manager', 0) }}
{{ append_containers('manager', 'wazuh', 0) }}
{{ append_containers('manager', 'thehive', 0) }}
{{ append_containers('manager', 'playbook', 0) }}
{{ append_containers('manager', 'freq', 0) }}
{{ append_containers('manager', 'domainstats', 0) }}
{% endif %}
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
@@ -37,7 +37,7 @@
{% endif %}
{% if role == 'searchnode' %}
{{ append_containers('master', 'wazuh', 0) }}
{{ append_containers('manager', 'wazuh', 0) }}
{% endif %}
{% if role == 'sensor' %}

View File

@@ -11,7 +11,7 @@ bro_logs_enabled() {
}
whiptail_master_adv_service_brologs() {
whiptail_manager_adv_service_brologs() {
BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
"conn" "Connection Logging" ON \
@@ -54,5 +54,5 @@ whiptail_master_adv_service_brologs() {
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
}
whiptail_master_adv_service_brologs
whiptail_manager_adv_service_brologs
bro_logs_enabled

View File

@@ -21,13 +21,13 @@ got_root(){
fi
}
master_check() {
# Check to see if this is a master
MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
if [ $MASTERCHECK == 'so-eval' ] || [ $MASTERCHECK == 'so-master' ] || [ $MASTERCHECK == 'so-mastersearch' ] || [ $MASTERCHECK == 'so-standalone' ] || [ $MASTERCHECK == 'so-helix' ]; then
echo "This is a master. We can proceed"
manager_check() {
# Check to see if this is a manager
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ]; then
echo "This is a manager. We can proceed"
else
echo "Please run soup on the master. The master controls all updates."
echo "Please run soup on the manager. The manager controls all updates."
exit 1
fi
}
@@ -56,13 +56,13 @@ version_check() {
fi
}
got_root
master_check
manager_check
version_check
# Use the hostname
HOSTNAME=$(hostname)
# List all the containers
if [ $MASTERCHECK != 'so-helix' ]; then
if [ $MANAGERCHECK != 'so-helix' ]; then
TRUSTED_CONTAINERS=( \
"so-acng:$VERSION" \
"so-thehive-cortex:$VERSION" \

View File

@@ -198,7 +198,7 @@ EOF
read alertoption
if [ $alertoption = "1" ] ; then
echo "Please enter the email address you want to send the alerts to. Note: Ensure the Master Server is configured for SMTP."
echo "Please enter the email address you want to send the alerts to. Note: Ensure the Manager Server is configured for SMTP."
read emailaddress
cat << EOF >> "$rulename.yaml"
# (Required)

View File

@@ -14,7 +14,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
. /usr/sbin/so-common
SKIP=0
@@ -50,7 +50,7 @@ done
if [ $SKIP -ne 1 ]; then
# List indices
echo
curl {{ MASTERIP }}:9200/_cat/indices?v
curl {{ MANAGERIP }}:9200/_cat/indices?v
echo
# Inform user we are about to delete all data
echo
@@ -89,10 +89,10 @@ fi
# Delete data
echo "Deleting data..."
INDXS=$(curl -s -XGET {{ MASTERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
INDXS=$(curl -s -XGET {{ MANAGERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
for INDX in ${INDXS}
do
curl -XDELETE "{{ MASTERIP }}:9200/${INDX}" > /dev/null 2>&1
curl -XDELETE "{{ MANAGERIP }}:9200/${INDX}" > /dev/null 2>&1
done
#Start Logstash/Filebeat

View File

@@ -1,5 +1,5 @@
#!/bin/bash
MASTER=MASTER
MANAGER=MANAGER
VERSION="HH1.1.4"
TRUSTED_CONTAINERS=( \
"so-nginx:$VERSION" \
@@ -37,7 +37,7 @@ do
echo "Downloading $i"
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination
docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
docker push $MASTER:5000/soshybridhunter/$i
docker tag soshybridhunter/$i $MANAGER:5000/soshybridhunter/$i
docker push $MANAGER:5000/soshybridhunter/$i
docker rmi soshybridhunter/$i
done

View File

@@ -15,7 +15,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
ESPORT=9200
THEHIVEESPORT=9400

View File

@@ -1,4 +1,4 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
#
@@ -16,7 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
default_salt_dir=/opt/so/saltstack/default
ELASTICSEARCH_HOST="{{ MASTERIP}}"
ELASTICSEARCH_HOST="{{ MANAGERIP}}"
ELASTICSEARCH_PORT=9200
#ELASTICSEARCH_AUTH=""

View File

@@ -15,28 +15,29 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set MASTER = salt['grains.get']('master') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion') %}
{%- set MASTERIP = salt['pillar.get']('static:masterip') -%}
{%- set MANAGERIP = salt['pillar.get']('static:managerip') -%}
function usage {
cat << EOF
Usage: $0 <pcap-file-1> [pcap-file-2] [pcap-file-N]
Imports one or more PCAP files for analysis. If available, curator will be automatically stopped.
Imports one or more PCAP files onto a sensor node. The PCAP traffic will be analyzed and
made available for review in the Security Onion toolset.
EOF
}
function pcapinfo() {
PCAP=$1
ARGS=$2
docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MASTER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MANAGER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
}
function pcapfix() {
PCAP=$1
PCAP_OUT=$2
docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MASTER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
}
function suricata() {
@@ -57,7 +58,7 @@ function suricata() {
-v ${NSM_PATH}/:/nsm/:rw \
-v $PCAP:/input.pcap:ro \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
{{ MASTER }}:5000/soshybridhunter/so-suricata:{{ VERSION }} \
{{ MANAGER }}:5000/soshybridhunter/so-suricata:{{ VERSION }} \
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
}
@@ -85,7 +86,7 @@ function zeek() {
-v /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro \
--entrypoint /opt/zeek/bin/zeek \
-w /nsm/zeek/logs \
{{ MASTER }}:5000/soshybridhunter/so-zeek:{{ VERSION }} \
{{ MANAGER }}:5000/soshybridhunter/so-zeek:{{ VERSION }} \
-C -r /input.pcap local > $NSM_PATH/logs/console.log 2>&1
}
@@ -110,14 +111,6 @@ for i in "$@"; do
fi
done
if ! [ -d /opt/so/conf/curator ]; then
echo "Curator is not installed on this node and cannot be stopped automatically."
else
echo -n "Stopping curator..."
so-curator-stop > /dev/null 2>&1
echo "Done"
fi
# track if we have any valid or invalid pcaps
INVALID_PCAPS="no"
VALID_PCAPS="no"
@@ -206,17 +199,20 @@ if [ "$INVALID_PCAPS" = "yes" ]; then
echo "Please note! One or more pcaps was invalid! You can scroll up to see which ones were invalid."
fi
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
if [ "$VALID_PCAPS" = "yes" ]; then
cat << EOF
Import complete!
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
https://{{ MASTERIP }}/kibana/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:'${START_OLDEST}T00:00:00.000Z',mode:absolute,to:'${END_NEWEST}T00:00:00.000Z'))
https://{{ MANAGERIP }}/kibana/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:'${START_OLDEST}T00:00:00.000Z',mode:absolute,to:'${END_NEWEST}T00:00:00.000Z'))
or you can manually set your Time Range to be:
From: $START_OLDEST To: $END_NEWEST
Please note that it may take 30 seconds or more for events to appear in Kibana.
Please note that it may take 30 seconds or more for events to appear in Onion Hunt.
EOF
fi

View File

@@ -1,9 +1,9 @@
#!/bin/bash
#
# {%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master', False) -%}
# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
# {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %}
# {%- set MASTER = salt['pillar.get']('master:url_base', '') %}
# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
@@ -20,7 +20,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
KIBANA_HOST={{ MASTER }}
KIBANA_HOST={{ MANAGER }}
KSO_PORT=5601
OUTFILE="saved_objects.ndjson"
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
@@ -29,7 +29,7 @@ curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_H
sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE
# Clean up for Fleet, if applicable
# {% if FLEET_NODE or FLEET_MASTER %}
# {% if FLEET_NODE or FLEET_MANAGER %}
# Fleet IP
sed -i "s/{{ MASTER }}/FLEETPLACEHOLDER/g" $OUTFILE
sed -i "s/{{ MANAGER }}/FLEETPLACEHOLDER/g" $OUTFILE
# {% endif %}

View File

@@ -0,0 +1,121 @@
#!/bin/bash
# Delete Zeek Logs based on defined CRIT_DISK_USAGE value
# Copyright 2014,2015,2016,2017,2018, 2019 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
SENSOR_DIR='/nsm'
CRIT_DISK_USAGE=90
CUR_USAGE=$(df -P $SENSOR_DIR | tail -1 | awk '{print $5}' | tr -d %)
LOG="/opt/so/log/sensor_clean.log"
TODAY=$(date -u "+%Y-%m-%d")
clean () {
## find the oldest Zeek logs directory
OLDEST_DIR=$(ls /nsm/zeek/logs/ | grep -v "current" | grep -v "stats" | grep -v "packetloss" | grep -v "zeek_clean" | sort | head -n 1)
if [ -z "$OLDEST_DIR" -o "$OLDEST_DIR" == ".." -o "$OLDEST_DIR" == "." ]
then
echo "$(date) - No old Zeek logs available to clean up in /nsm/zeek/logs/" >> $LOG
#exit 0
else
echo "$(date) - Removing directory: /nsm/zeek/logs/$OLDEST_DIR" >> $LOG
rm -rf /nsm/zeek/logs/"$OLDEST_DIR"
fi
## Remarking for now, as we are moving extracted files to /nsm/strelka/processed
## find oldest files in extracted directory and exclude today
#OLDEST_EXTRACT=$(find /nsm/zeek/extracted/complete -type f -printf '%T+ %p\n' 2>/dev/null | sort | grep -v $TODAY | head -n 1)
#if [ -z "$OLDEST_EXTRACT" -o "$OLDEST_EXTRACT" == ".." -o "$OLDEST_EXTRACT" == "." ]
#then
# echo "$(date) - No old extracted files available to clean up in /nsm/zeek/extracted/complete" >> $LOG
#else
# OLDEST_EXTRACT_DATE=`echo $OLDEST_EXTRACT | awk '{print $1}' | cut -d+ -f1`
# OLDEST_EXTRACT_FILE=`echo $OLDEST_EXTRACT | awk '{print $2}'`
# echo "$(date) - Removing extracted files for $OLDEST_EXTRACT_DATE" >> $LOG
# find /nsm/zeek/extracted/complete -type f -printf '%T+ %p\n' | grep $OLDEST_EXTRACT_DATE | awk '{print $2}' |while read FILE
# do
# echo "$(date) - Removing extracted file: $FILE" >> $LOG
# rm -f "$FILE"
# done
#fi
## Clean up Zeek extracted files processed by Strelka
STRELKA_FILES='/nsm/strelka/processed'
OLDEST_STRELKA=$(find $STRELKA_FILES -type f -printf '%T+ %p\n' | sort -n | head -n 1 )
if [ -z "$OLDEST_STRELKA" -o "$OLDEST_STRELKA" == ".." -o "$OLDEST_STRELKA" == "." ]
then
echo "$(date) - No old files available to clean up in $STRELKA_FILES" >> $LOG
else
OLDEST_STRELKA_DATE=`echo $OLDEST_STRELKA | awk '{print $1}' | cut -d+ -f1`
OLDEST_STRELKA_FILE=`echo $OLDEST_STRELKA | awk '{print $2}'`
echo "$(date) - Removing extracted files for $OLDEST_STRELKA_DATE" >> $LOG
find $STRELKA_FILES -type f -printf '%T+ %p\n' | grep $OLDEST_STRELKA_DATE | awk '{print $2}' |while read FILE
do
echo "$(date) - Removing file: $FILE" >> $LOG
rm -f "$FILE"
done
fi
## Clean up Suricata log files
SURICATA_LOGS='/nsm/suricata'
OLDEST_SURICATA=$(find $STRELKA_FILES -type f -printf '%T+ %p\n' | sort -n | head -n 1)
if [ -z "$OLDEST_SURICATA" -o "$OLDEST_SURICATA" == ".." -o "$OLDEST_SURICATA" == "." ]
then
echo "$(date) - No old files available to clean up in $SURICATA_LOGS" >> $LOG
else
OLDEST_SURICATA_DATE=`echo $OLDEST_SURICATA | awk '{print $1}' | cut -d+ -f1`
OLDEST_SURICATA_FILE=`echo $OLDEST_SURICATA | awk '{print $2}'`
echo "$(date) - Removing logs for $OLDEST_SURICATA_DATE" >> $LOG
find $SURICATA_LOGS -type f -printf '%T+ %p\n' | grep $OLDEST_SURICATA_DATE | awk '{print $2}' |while read FILE
do
echo "$(date) - Removing file: $FILE" >> $LOG
rm -f "$FILE"
done
fi
## Clean up extracted pcaps from Steno
PCAPS='/nsm/pcapout'
OLDEST_PCAP=$(find $PCAPS -type f -printf '%T+ %p\n' | sort -n | head -n 1 )
if [ -z "$OLDEST_PCAP" -o "$OLDEST_PCAP" == ".." -o "$OLDEST_PCAP" == "." ]
then
echo "$(date) - No old files available to clean up in $PCAPS" >> $LOG
else
OLDEST_PCAP_DATE=`echo $OLDEST_PCAP | awk '{print $1}' | cut -d+ -f1`
OLDEST_PCAP_FILE=`echo $OLDEST_PCAP | awk '{print $2}'`
echo "$(date) - Removing extracted files for $OLDEST_PCAP_DATE" >> $LOG
find $PCAPS -type f -printf '%T+ %p\n' | grep $OLDEST_PCAP_DATE | awk '{print $2}' |while read FILE
do
echo "$(date) - Removing file: $FILE" >> $LOG
rm -f "$FILE"
done
fi
}
# Check to see if we are already running
IS_RUNNING=$(ps aux | grep "sensor_clean" | grep -v grep | wc -l)
[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >> $LOG && exit 0
if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then
while [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ];
do
clean
CUR_USAGE=$(df -P $SENSOR_DIR | tail -1 | awk '{print $5}' | tr -d %)
done
else
echo "$(date) - Current usage value of $CUR_USAGE not greater than CRIT_DISK_USAGE value of $CRIT_DISK_USAGE..." >> $LOG
fi

View File

@@ -1,8 +1,4 @@
{%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
{%- set log_size_limit = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
{%- set log_size_limit = salt['pillar.get']('master:log_size_limit', '') -%}
{%- endif %}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-beats:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close Beats indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-beats.*|so-beats.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -1,9 +1,4 @@
{%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
{%- set cur_close_days = salt['pillar.get']('elasticsearch:cur_close_days', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
{%- set cur_close_days = salt['pillar.get']('master:cur_close_days', '') -%}
{%- endif -%}
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-firewall:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
@@ -15,8 +10,7 @@ actions:
1:
action: close
description: >-
Close indices older than {{cur_close_days}} days (based on index name), for logstash-
prefixed indices.
Close Firewall indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
@@ -25,7 +19,7 @@ actions:
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-.*|so-.*)$'
value: '^(logstash-firewall.*|so-firewall.*)$'
- filtertype: age
source: name
direction: older

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-ids:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close IDS indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-ids.*|so-ids.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-import:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close Import indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-import.*|so-import.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-osquery:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close osquery indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-osquery.*|so-osquery.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-ossec:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close ossec indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-ossec.*|so-ossec.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-strelka:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close Strelka indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-strelka.*|so-strelka.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-syslog:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close syslog indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-syslog.*|so-syslog.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-zeek:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close Zeek indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-zeek.*|so-zeek.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -5,10 +5,10 @@
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('master:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('master:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('master:log_size_limit', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif -%}
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC

View File

@@ -1,7 +1,7 @@
{% if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set elasticsearch = salt['pillar.get']('elasticsearch:mainip', '') -%}
{% elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
{%- set elasticsearch = salt['pillar.get']('master:mainip', '') -%}
{% elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set elasticsearch = salt['pillar.get']('manager:mainip', '') -%}
{%- endif %}
---

View File

@@ -1,6 +1,6 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
{% if grains['role'] in ['so-eval', 'so-node', 'so-mastersearch', 'so-heavynode', 'so-standalone'] %}
{% set MANAGER = salt['grains.get']('master') %}
{% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %}
# Curator
# Create the group
curatorgroup:
@@ -30,18 +30,10 @@ curlogdir:
- user: 934
- group: 939
curcloseconf:
file.managed:
- name: /opt/so/conf/curator/action/close.yml
- source: salt://curator/files/action/close.yml
- user: 934
- group: 939
- template: jinja
curdelconf:
file.managed:
- name: /opt/so/conf/curator/action/delete.yml
- source: salt://curator/files/action/delete.yml
actionconfs:
file.recurse:
- name: /opt/so/conf/curator/action
- source: salt://curator/files/action
- user: 934
- group: 939
- template: jinja
@@ -119,7 +111,7 @@ so-curatordeletecron:
so-curator:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-curator:{{ VERSION }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-curator:{{ VERSION }}
- hostname: curator
- name: so-curator
- user: curator

View File

@@ -127,11 +127,11 @@
@load policy/hassh
# You can load your own intel into:
# /opt/so/saltstack/bro/policy/intel/ on the master
# /opt/so/saltstack/bro/policy/intel/ on the manager
@load intel
# Load a custom Bro policy
# /opt/so/saltstack/bro/policy/custom/ on the master
# /opt/so/saltstack/bro/policy/custom/ on the manager
#@load custom/somebropolicy.bro
# Write logs in JSON

View File

@@ -121,11 +121,11 @@
@load policy/ja3
# You can load your own intel into:
# /opt/so/saltstack/bro/policy/intel/ on the master
# /opt/so/saltstack/bro/policy/intel/ on the manager
@load intel
# Load a custom Bro policy
# /opt/so/saltstack/bro/policy/custom/ on the master
# /opt/so/saltstack/bro/policy/custom/ on the manager
#@load custom/somebropolicy.bro
# Use JSON

View File

@@ -1,5 +1,5 @@
{% set esip = salt['pillar.get']('master:mainip', '') %}
{% set esport = salt['pillar.get']('master:es_port', '') %}
{% set esip = salt['pillar.get']('manager:mainip', '') %}
{% set esport = salt['pillar.get']('manager:es_port', '') %}
# This is the folder that contains the rule yaml files
# Any .yaml file will be loaded as a rule
rules_folder: /opt/elastalert/rules/
@@ -86,3 +86,25 @@ alert_time_limit:
index_settings:
shards: 1
replicas: 0
logging:
version: 1
incremental: false
disable_existing_loggers: false
formatters:
logline:
format: '%(asctime)s %(levelname)+8s %(name)+20s %(message)s'
handlers:
file:
class : logging.FileHandler
formatter: logline
level: INFO
filename: /var/log/elastalert/elastalert.log
loggers:
'':
level: INFO
handlers:
- file
propagate: false

View File

@@ -1,7 +1,7 @@
{% set es = salt['pillar.get']('static:masterip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
{% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set MASTER = salt['pillar.get']('master:url_base', '') %}
{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance.
#
@@ -39,7 +39,7 @@ hive_alert_config:
title: '{match[rule][name]}'
type: 'NIDS'
source: 'SecurityOnion'
description: "`SOC Hunt Pivot:` \n\n <https://{{MASTER}}/#/hunt?q=network.community_id%3A%20%20%22{match[network][community_id]}%22%20%7C%20groupby%20source.ip%20destination.ip,event.module,%20event.dataset> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/30d0ac90-729f-11ea-8dd2-9d8795a1200b?_g=(filters:!(('$state':(store:globalState),meta:(alias:!n,disabled:!f,index:'*:so-*',key:network.community_id,negate:!f,params:(query:'{match[network][community_id]}'),type:phrase),query:(match_phrase:(network.community_id:'{match[network][community_id]}')))),refreshInterval:(pause:!t,value:0),time:(from:now-7d,to:now))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
description: "`SOC Hunt Pivot:` \n\n <https://{{MANAGER}}/#/hunt?q=network.community_id%3A%20%20%22{match[network][community_id]}%22%20%7C%20groupby%20source.ip%20destination.ip,event.module,%20event.dataset> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MANAGER}}/kibana/app/kibana#/dashboard/30d0ac90-729f-11ea-8dd2-9d8795a1200b?_g=(filters:!(('$state':(store:globalState),meta:(alias:!n,disabled:!f,index:'*:so-*',key:network.community_id,negate:!f,params:(query:'{match[network][community_id]}'),type:phrase),query:(match_phrase:(network.community_id:'{match[network][community_id]}')))),refreshInterval:(pause:!t,value:0),time:(from:now-7d,to:now))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
severity: 2
tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}']
tlp: 3

View File

@@ -1,7 +1,7 @@
{% set es = salt['pillar.get']('static:masterip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
{% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set MASTER = salt['pillar.get']('master:url_base', '') %}
{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance.
#
@@ -38,7 +38,7 @@ hive_alert_config:
title: '{match[rule][name]}'
type: 'wazuh'
source: 'SecurityOnion'
description: "`SOC Hunt Pivot:` \n\n <https://{{MASTER}}/#/hunt?q=event.module%3A%20ossec%20AND%20rule.id%3A{match[rule][id]}%20%7C%20groupby%20host.name%20rule.name> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:')),sort:!('@timestamp',desc))>"
description: "`SOC Hunt Pivot:` \n\n <https://{{MANAGER}}/#/hunt?q=event.module%3A%20ossec%20AND%20rule.id%3A{match[rule][id]}%20%7C%20groupby%20host.name%20rule.name> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MANAGER}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:')),sort:!('@timestamp',desc))>"
severity: 2
tags: ['{match[rule][id]}','{match[host][name]}']
tlp: 3

View File

@@ -13,12 +13,12 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
{% set MANAGER = salt['grains.get']('master') %}
{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %}
{% set esalert = salt['pillar.get']('master:elastalert', '1') %}
{% set esip = salt['pillar.get']('master:mainip', '') %}
{% set esport = salt['pillar.get']('master:es_port', '') %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set esalert = salt['pillar.get']('manager:elastalert', '1') %}
{% set esip = salt['pillar.get']('manager:mainip', '') %}
{% set esport = salt['pillar.get']('manager:es_port', '') %}
{% elif grains['role'] == 'so-node' %}
{% set esalert = salt['pillar.get']('elasticsearch:elastalert', '0') %}
{% endif %}
@@ -101,7 +101,7 @@ elastaconf:
so-elastalert:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-elastalert:{{ VERSION }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-elastalert:{{ VERSION }}
- hostname: elastalert
- name: so-elastalert
- user: elastalert

View File

@@ -1,6 +1,11 @@
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
{%- set esclustername = salt['pillar.get']('master:esclustername', '') %}
cluster.name: "{{ esclustername }}"
{%- set NODE_ROUTE_TYPE = salt['pillar.get']('elasticsearch:node_route_type', 'hot') %}
{%- if salt['pillar.get']('elasticsearch:hot_warm_enabled') or salt['pillar.get']('elasticsearch:true_cluster') %}
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:true_cluster_name', '') %}
{%- else %}
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername', '') %}
{%- endif %}
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
cluster.name: "{{ ESCLUSTERNAME }}"
network.host: 0.0.0.0
# minimum_master_nodes need to be explicitly set when bound on a public IP
@@ -10,20 +15,13 @@ discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.
path.logs: /var/log/elasticsearch
action.destructive_requires_name: true
{%- else %}
{%- set esclustername = salt['grains.get']('host', '') %}
{%- set nodeip = salt['pillar.get']('elasticsearch:mainip', '') -%}
cluster.name: "{{ esclustername }}"
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
path.logs: /var/log/elasticsearch
action.destructive_requires_name: true
transport.bind_host: 0.0.0.0
transport.publish_host: {{ nodeip }}
transport.publish_host: {{ NODEIP }}
transport.publish_port: 9300
{%- endif %}
cluster.routing.allocation.disk.threshold_enabled: true
cluster.routing.allocation.disk.watermark.low: 95%
cluster.routing.allocation.disk.watermark.high: 98%
cluster.routing.allocation.disk.watermark.flood_stage: 98%
node.attr.box_type: {{ NODE_ROUTE_TYPE }}
node.name: {{ ESCLUSTERNAME }}
script.max_compilations_rate: 1000/1m

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
@@ -22,9 +22,9 @@
{% set FEATURES = '' %}
{% endif %}
{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %}
{% set esclustername = salt['pillar.get']('master:esclustername', '') %}
{% set esheap = salt['pillar.get']('master:esheap', '') %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set esclustername = salt['pillar.get']('manager:esclustername', '') %}
{% set esheap = salt['pillar.get']('manager:esheap', '') %}
{% elif grains['role'] in ['so-node','so-heavynode'] %}
{% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %}
{% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %}
@@ -101,7 +101,7 @@ eslogdir:
so-elasticsearch:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-elasticsearch:{{ VERSION }}{{ FEATURES }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-elasticsearch:{{ VERSION }}{{ FEATURES }}
- hostname: elasticsearch
- name: so-elasticsearch
- user: elasticsearch
@@ -141,7 +141,7 @@ so-elasticsearch-pipelines:
- file: esyml
- file: so-elasticsearch-pipelines-file
{% if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
so-elasticsearch-templates:
cmd.run:
- name: /usr/sbin/so-elasticsearch-templates

View File

@@ -1,7 +1,7 @@
{%- if grains.role == 'so-heavynode' %}
{%- set MASTER = salt['pillar.get']('sensor:mainip' '') %}
{%- set MANAGER = salt['pillar.get']('sensor:mainip' '') %}
{%- else %}
{%- set MASTER = grains['master'] %}
{%- set MANAGER = salt['grains.get']('master') %}
{%- endif %}
@@ -9,7 +9,7 @@
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
{%- set WAZUHENABLED = salt['pillar.get']('static:wazuh', '0') %}
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
name: {{ HOSTNAME }}
@@ -126,6 +126,8 @@ filebeat.inputs:
category: network
imported: true
processors:
- add_tags:
tags: [import]
- dissect:
tokenizer: "/nsm/import/%{import.id}/zeek/logs/%{import.file}"
field: "log.file.path"
@@ -164,6 +166,8 @@ filebeat.inputs:
category: network
imported: true
processors:
- add_tags:
tags: [import]
- dissect:
tokenizer: "/nsm/import/%{import.id}/suricata/%{import.file}"
field: "log.file.path"
@@ -214,7 +218,7 @@ filebeat.inputs:
{%- endif %}
{%- if FLEETMASTER or FLEETNODE %}
{%- if FLEETMANAGER or FLEETNODE %}
- type: log
paths:
@@ -252,7 +256,7 @@ output.{{ type }}:
{%- if grains['role'] == "so-eval" %}
output.elasticsearch:
enabled: true
hosts: ["{{ MASTER }}:9200"]
hosts: ["{{ MANAGER }}:9200"]
pipelines:
- pipeline: "%{[module]}.%{[dataset]}"
indices:
@@ -280,7 +284,7 @@ output.logstash:
enabled: true
# The Logstash hosts
hosts: ["{{ MASTER }}:5644"]
hosts: ["{{ MANAGER }}:5644"]
# Number of workers per Logstash host.
#worker: 1

View File

@@ -12,8 +12,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
{% set FEATURES = "-features" %}
@@ -51,10 +51,10 @@ filebeatconfsync:
OUTPUT: {{ salt['pillar.get']('filebeat:config:output', {}) }}
so-filebeat:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-filebeat:{{ VERSION }}{{ FEATURES }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-filebeat:{{ VERSION }}{{ FEATURES }}
- hostname: so-filebeat
- user: root
- extra_hosts: {{ MASTER }}:{{ MASTERIP }}
- extra_hosts: {{ MANAGER }}:{{ MANAGERIP }}
- binds:
- /nsm:/nsm:ro
- /opt/so/log/filebeat:/usr/share/filebeat/logs:rw

View File

@@ -6,7 +6,7 @@ role:
chain:
DOCKER-USER:
hostgroups:
master:
manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -85,12 +85,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
master:
- {{ portgroups.salt_manager }}
manager:
chain:
DOCKER-USER:
hostgroups:
master:
manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -166,12 +166,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
mastersearch:
- {{ portgroups.salt_manager }}
managersearch:
chain:
DOCKER-USER:
hostgroups:
master:
manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -247,12 +247,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
- {{ portgroups.salt_manager }}
standalone:
chain:
DOCKER-USER:
hostgroups:
master:
manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -328,12 +328,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
- {{ portgroups.salt_manager }}
helixsensor:
chain:
DOCKER-USER:
hostgroups:
master:
manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.playbook }}
@@ -391,12 +391,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- {{ portgroups.salt_master }}
- {{ portgroups.salt_manager }}
searchnode:
chain:
DOCKER-USER:
hostgroups:
master:
manager:
portgroups:
- {{ portgroups.elasticsearch_node }}
dockernet:

View File

@@ -19,4 +19,4 @@ firewall:
ips:
delete:
insert:
- {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
- {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}

View File

@@ -61,7 +61,7 @@ firewall:
redis:
tcp:
- 6379
salt_master:
salt_manager:
tcp:
- 4505
- 4506

View File

@@ -1,4 +1,4 @@
{% set MASTER = salt['grains.get']('master') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set ENROLLSECRET = salt['pillar.get']('secrets:fleet_enroll-secret') %}
{% set CURRENTPACKAGEVERSION = salt['pillar.get']('static:fleet_packages-version') %}
{% set VERSION = salt['pillar.get']('static:soversion') %}
@@ -19,6 +19,6 @@ so/fleet:
mainip: {{ grains.host }}
enroll-secret: {{ ENROLLSECRET }}
current-package-version: {{ CURRENTPACKAGEVERSION }}
master: {{ MASTER }}
manager: {{ MANAGER }}
version: {{ VERSION }}

View File

@@ -2,14 +2,14 @@
{%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%}
{%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set FLEETARCH = salt['grains.get']('role') %}
{% if FLEETARCH == "so-fleet" %}
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% else %}
{% set MAINIP = salt['pillar.get']('static:masterip') %}
{% set MAINIP = salt['pillar.get']('static:managerip') %}
{% endif %}
include:
@@ -105,7 +105,7 @@ fleet_password_none:
so-fleet:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-fleet:{{ VERSION }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-fleet:{{ VERSION }}
- hostname: so-fleet
- port_bindings:
- 0.0.0.0:8080:8080

View File

@@ -1,4 +1,4 @@
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%}
{%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%}

View File

@@ -12,7 +12,7 @@
}
]
},
"description": "This Dashboard provides a general overview of the Master",
"description": "This Dashboard provides a general overview of the Manager",
"editable": true,
"gnetId": 2381,
"graphTooltip": 0,
@@ -4162,7 +4162,7 @@
]
},
"timezone": "browser",
"title": "Master Node - {{ SERVERNAME }} Overview",
"title": "Manager Node - {{ SERVERNAME }} Overview",
"uid": "{{ UID }}",
"version": 3
}

View File

@@ -13,7 +13,7 @@
}
]
},
"description": "This Dashboard provides a general overview of a MasterSearch Node",
"description": "This Dashboard provides a general overview of a ManagerSearch Node",
"editable": true,
"gnetId": 2381,
"graphTooltip": 0,

View File

@@ -3,20 +3,20 @@ apiVersion: 1
providers:
{%- if grains['role'] != 'so-eval' %}
- name: 'Master'
folder: 'Master'
- name: 'Manager'
folder: 'Manager'
type: file
disableDeletion: false
editable: true
options:
path: /etc/grafana/grafana_dashboards/master
- name: 'Master Search'
folder: 'Master Search'
path: /etc/grafana/grafana_dashboards/manager
- name: 'Manager Search'
folder: 'Manager Search'
type: file
disableDeletion: false
editable: true
options:
path: /etc/grafana/grafana_dashboards/mastersearch
path: /etc/grafana/grafana_dashboards/managersearch
- name: 'Sensor Nodes'
folder: 'Sensor Nodes'
type: file

View File

@@ -1,4 +1,4 @@
{%- set MASTER = salt['pillar.get']('static:masterip', '') %}
{%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
apiVersion: 1
deleteDatasources:
@@ -10,7 +10,7 @@ datasources:
type: influxdb
access: proxy
database: telegraf
url: https://{{ MASTER }}:8086
url: https://{{ MANAGER }}:8086
jsonData:
tlsAuth: false
tlsAuthWithCACert: false

View File

@@ -1,8 +1,8 @@
{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
{% set MASTER = salt['grains.get']('master') %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
# Grafana all the things
grafanadir:
@@ -28,14 +28,14 @@ grafanadashdir:
grafanadashmdir:
file.directory:
- name: /opt/so/conf/grafana/grafana_dashboards/master
- name: /opt/so/conf/grafana/grafana_dashboards/manager
- user: 939
- group: 939
- makedirs: True
grafanadashmsdir:
file.directory:
- name: /opt/so/conf/grafana/grafana_dashboards/mastersearch
- name: /opt/so/conf/grafana/grafana_dashboards/managersearch
- user: 939
- group: 939
- makedirs: True
@@ -76,17 +76,17 @@ grafanaconf:
- template: jinja
- source: salt://grafana/etc
{% if salt['pillar.get']('mastertab', False) %}
{% for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %}
{% if salt['pillar.get']('managertab', False) %}
{% for SN, SNDATA in salt['pillar.get']('managertab', {}).items() %}
{% set NODETYPE = SN.split('_')|last %}
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
dashboard-master:
dashboard-manager:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json
- name: /opt/so/conf/grafana/grafana_dashboards/manager/{{ SN }}-Manager.json
- user: 939
- group: 939
- template: jinja
- source: salt://grafana/dashboards/master/master.json
- source: salt://grafana/dashboards/manager/manager.json
- defaults:
SERVERNAME: {{ SN }}
MANINT: {{ SNDATA.manint }}
@@ -99,17 +99,17 @@ dashboard-master:
{% endfor %}
{% endif %}
{% if salt['pillar.get']('mastersearchtab', False) %}
{% for SN, SNDATA in salt['pillar.get']('mastersearchtab', {}).items() %}
{% if salt['pillar.get']('managersearchtab', False) %}
{% for SN, SNDATA in salt['pillar.get']('managersearchtab', {}).items() %}
{% set NODETYPE = SN.split('_')|last %}
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
dashboard-mastersearch:
dashboard-managersearch:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/mastersearch/{{ SN }}-MasterSearch.json
- name: /opt/so/conf/grafana/grafana_dashboards/managersearch/{{ SN }}-ManagerSearch.json
- user: 939
- group: 939
- template: jinja
- source: salt://grafana/dashboards/mastersearch/mastersearch.json
- source: salt://grafana/dashboards/managersearch/managersearch.json
- defaults:
SERVERNAME: {{ SN }}
MANINT: {{ SNDATA.manint }}
@@ -216,7 +216,7 @@ dashboard-{{ SN }}:
so-grafana:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-grafana:{{ VERSION }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-grafana:{{ VERSION }}
- hostname: grafana
- user: socore
- binds:

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
{% set MANAGER = salt['grains.get']('master') %}
# IDSTools Setup
idstoolsdir:
file.directory:
@@ -60,7 +60,7 @@ synclocalnidsrules:
so-idstools:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
- hostname: so-idstools
- user: socore
- binds:

View File

@@ -1,9 +1,9 @@
{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
{% set MASTER = salt['grains.get']('master') %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
# Influx DB
influxconfdir:
@@ -26,7 +26,7 @@ influxdbconf:
so-influxdb:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-influxdb:{{ VERSION }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-influxdb:{{ VERSION }}
- hostname: influxdb
- environment:
- INFLUXDB_HTTP_LOG_ENABLED=false

View File

@@ -1,4 +1,4 @@
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
COUNT=0

View File

@@ -1,20 +1,20 @@
#!/bin/bash
# {%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master', False) -%}
# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
# {%- set MASTER = salt['pillar.get']('master:url_base', '') %}
# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
KIBANA_VERSION="7.6.1"
# Copy template file
cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_objects.ndjson
# {% if FLEET_NODE or FLEET_MASTER %}
# {% if FLEET_NODE or FLEET_MANAGER %}
# Fleet IP
sed -i "s/FLEETPLACEHOLDER/{{ MASTER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
sed -i "s/FLEETPLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
# {% endif %}
# SOCtopus and Master
sed -i "s/PLACEHOLDER/{{ MASTER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
# SOCtopus and Manager
sed -i "s/PLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
# Load saved objects
curl -X POST "localhost:5601/api/saved_objects/_import" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1

View File

@@ -1,6 +1,6 @@
---
# Default Kibana configuration from kibana-docker.
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
server.name: kibana
server.host: "0"
server.basePath: /kibana

View File

@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
{% set FEATURES = "-features" %}
@@ -69,13 +69,13 @@ kibanabin:
# Start the kibana docker
so-kibana:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-kibana:{{ VERSION }}{{ FEATURES }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-kibana:{{ VERSION }}{{ FEATURES }}
- hostname: kibana
- user: kibana
- environment:
- ELASTICSEARCH_HOST={{ MASTER }}
- ELASTICSEARCH_HOST={{ MANAGER }}
- ELASTICSEARCH_PORT=9200
- MASTER={{ MASTER }}
- MANAGER={{ MANAGER }}
- binds:
- /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw
- /opt/so/log/kibana:/var/log/kibana:rw
@@ -94,7 +94,7 @@ kibanadashtemplate:
wait_for_kibana:
module.run:
- http.wait_for_successful_query:
- url: "http://{{MASTER}}:5601/api/saved_objects/_find?type=config"
- url: "http://{{MANAGER}}:5601/api/saved_objects/_find?type=config"
- wait_for: 180
- onchanges:
- file: kibanadashtemplate

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
@@ -24,13 +24,13 @@
# Logstash Section - Decide which pillar to use
{% set lsheap = salt['pillar.get']('logstash_settings:lsheap', '') %}
{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %}
{% set freq = salt['pillar.get']('master:freq', '0') %}
{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set freq = salt['pillar.get']('manager:freq', '0') %}
{% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% elif grains['role'] == 'so-helix' %}
{% set freq = salt['pillar.get']('master:freq', '0') %}
{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
{% set freq = salt['pillar.get']('manager:freq', '0') %}
{% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% endif %}
@@ -159,7 +159,7 @@ lslogdir:
so-logstash:
docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-logstash:{{ VERSION }}{{ FEATURES }}
- image: {{ MANAGER }}:5000/soshybridhunter/so-logstash:{{ VERSION }}{{ FEATURES }}
- hostname: so-logstash
- name: so-logstash
- user: logstash

View File

@@ -1,13 +1,13 @@
{%- if grains.role == 'so-heavynode' %}
{%- set MASTER = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- else %}
{%- set MASTER = salt['pillar.get']('static:masterip', '') %}
{%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
{% endif -%}
{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %}
input {
redis {
host => '{{ MASTER }}'
host => '{{ MANAGER }}'
data_type => 'list'
key => 'logstash:unparsed'
type => 'redis-input'

View File

@@ -1,23 +1,10 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [module] =~ "zeek" {
mutate {
##add_tag => [ "conf_file_9000"]
}
}
}
output {
if [module] =~ "zeek" {
if [module] =~ "zeek" and "import" not in [tags] {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"

View File

@@ -1,27 +0,0 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if "switch" in [tags] and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9001"]
}
}
}
output {
if "switch" in [tags] and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "so-switch-%{+YYYY.MM.dd}"
template => "/so-common-template.json"
}
}
}

View File

@@ -1,26 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Updated by: Doug Burks
# Last Update: 5/16/2017
filter {
if "import" in [tags] and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9002"]
}
}
}
output {
if "import" in [tags] and "test_data" not in [tags] {
# stdout { codec => rubydebug }
if "import" in [tags] {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-import-%{+YYYY.MM.dd}"
template_name => "so-common"
template => "/so-common-template.json"
template_name => "so-import"
template => "/so-import-template.json"
template_overwrite => true
}
}

View File

@@ -1,27 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "sflow" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9004"]
}
}
}
output {
if [event_type] == "sflow" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
if [event_type] == "sflow" {
elasticsearch {
hosts => "{{ ES }}"
index => "so-flow-%{+YYYY.MM.dd}"
template => "/so-common-template.json"
template_name => "so-flow"
template => "/so-flow-template.json"
template_overwrite => true
}
}
}

View File

@@ -1,26 +0,0 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "dhcp" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9026"]
}
}
}
output {
if [event_type] == "dhcp" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
template => "/so-common-template.json"
}
}
}

View File

@@ -1,25 +0,0 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "esxi" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9029"]
}
}
}
output {
if [event_type] == "esxi" and "test_data" not in [tags] {
elasticsearch {
hosts => "{{ ES }}"
template => "/so-common-template.json"
}
}
}

View File

@@ -1,25 +0,0 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "greensql" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9030"]
}
}
}
output {
if [event_type] == "greensql" and "test_data" not in [tags] {
elasticsearch {
hosts => "{{ ES }}"
template => "/so-common-template.json"
}
}
}

View File

@@ -1,26 +0,0 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "iis" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9031"]
}
}
}
output {
if [event_type] == "iis" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
template => "/so-common-template.json"
}
}
}

View File

@@ -1,26 +0,0 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "mcafee" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9032"]
}
}
}
output {
if [event_type] == "mcafee" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
template => "/so-common-template.json"
}
}
}

View File

@@ -1,28 +1,15 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "ids" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9033"]
}
}
}
output {
if [event_type] == "ids" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
if [event_type] == "ids" and "import" not in [tags] {
elasticsearch {
hosts => "{{ ES }}"
index => "so-ids-%{+YYYY.MM.dd}"
template_name => "so-common"
template => "/so-common-template.json"
template_name => "so-ids"
template => "/so-ids-template.json"
template_overwrite => true
}
}

View File

@@ -1,24 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
filter {
if [module] =~ "syslog" {
mutate {
##add_tag => [ "conf_file_9000"]
}
}
}
output {
if [module] =~ "syslog" {
elasticsearch {
pipeline => "%{module}"
hosts => "{{ ES }}"
index => "so-syslog-%{+YYYY.MM.dd}"
template_name => "so-common"
template => "/so-common-template.json"
template_name => "so-syslog"
template => "/so-syslog-template.json"
template_overwrite => true
}
}

View File

@@ -1,20 +1,17 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Josh Brower
# Last Update: 12/29/2018
# Output to ES for osquery tagged logs
output {
if [module] =~ "osquery" {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-osquery-%{+YYYY.MM.dd}"
template => "/so-common-template.json"
template_name => "so-osquery"
template => "/so-osquery-template.json"
template_overwrite => true
}
}
}

View File

@@ -1,28 +1,15 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if "firewall" in [tags] and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9200"]
}
}
}
output {
if "firewall" in [tags] and "test_data" not in [tags] {
# stdout { codec => rubydebug }
if "firewall" in [tags] {
elasticsearch {
hosts => "{{ ES }}"
index => "so-firewall-%{+YYYY.MM.dd}"
template_name => "so-common"
template => "/so-common-template.json"
template_name => "so-firewall"
template => "/so-firewall-template.json"
template_overwrite => true
}
}

View File

@@ -1,27 +0,0 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "windows" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9300"]
}
}
}
output {
if [event_type] == "windows" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "so-windows-%{+YYYY.MM.dd}"
template => "/so-common-template.json"
}
}
}

View File

@@ -1,27 +0,0 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "dns" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9301"]
}
}
}
output {
if [event_type] == "dns" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "so-%{+YYYY.MM.dd}"
template => "/so-common-template.json"
}
}
}

View File

@@ -1,28 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [module] == "suricata" {
mutate {
##add_tag => [ "conf_file_9400"]
}
}
}
output {
if [module] =~ "suricata" {
if [module] =~ "suricata" and "import" not in [tags] {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-ids-%{+YYYY.MM.dd}"
template_name => "so-common"
template => "/so-common-template.json"
template_name => "so-ids"
template => "/so-ids-template.json"
}
}
}

View File

@@ -1,17 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
output {
if "beat-ext" in [tags] {
if "beat-ext" in [tags] and "import" not in [tags] {
elasticsearch {
pipeline => "beats.common"
hosts => "{{ ES }}"
index => "so-beats-%{+YYYY.MM.dd}"
template_name => "so-common"
template => "/so-common-template.json"
template_name => "so-beats"
template => "/so-beats-template.json"
template_overwrite => true
}
}

View File

@@ -1,29 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Updated by: Doug Burks
# Last Update: 9/19/2018
filter {
if [module] =~ "ossec" {
mutate {
##add_tag => [ "conf_file_9600"]
}
}
}
output {
if [module] =~ "ossec" {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-ossec-%{+YYYY.MM.dd}"
template_name => "so-common"
template => "/so-common-template.json"
template_name => "so-ossec"
template => "/so-ossec-template.json"
template_overwrite => true
}
}

View File

@@ -1,29 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [module] =~ "strelka" {
mutate {
##add_tag => [ "conf_file_9000"]
}
}
}
output {
if [event_type] =~ "strelka" {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-strelka-%{+YYYY.MM.dd}"
template_name => "so-common"
template => "/so-common-template.json"
template_name => "so-strelka"
template => "/so-strelka-template.json"
template_overwrite => true
}
}

View File

@@ -1,9 +1,8 @@
{% set MASTER = salt['pillar.get']('static:masterip', '') %}
{% set MANAGER = salt['pillar.get']('static:managerip', '') %}
{% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %}
output {
redis {
host => '{{ MASTER }}'
host => '{{ MANAGER }}'
data_type => 'list'
key => 'logstash:unparsed'
congestion_interval => 1

View File

@@ -0,0 +1,13 @@
{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-beats:shards', 1) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-beats:refresh', '30s') %}
{
"index_patterns": ["so-beats-*"],
"version": 50001,
"order": 11,
"settings":{
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":{{ SHARDS }},
"index.refresh_interval":"{{ REFRESH }}"
}
}

View File

@@ -5,7 +5,8 @@
"settings":{
"number_of_replicas":0,
"number_of_shards":1,
"index.refresh_interval":"30s"
"index.refresh_interval":"30s",
"index.routing.allocation.require.box_type":"hot"
},
"mappings":{
"dynamic":false,

View File

@@ -0,0 +1,13 @@
{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:shards', 1) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-firewall:refresh', '30s') %}
{
"index_patterns": ["so-firewall-*"],
"version":50001,
"order":11,
"settings":{
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":{{ SHARDS }},
"index.refresh_interval":"{{ REFRESH }}"
}
}

View File

@@ -0,0 +1,13 @@
{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-flow:shards', 1) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-flow:refresh', '30s') %}
{
"index_patterns": ["so-flow-*"],
"version": 50001,
"order": 11,
"settings":{
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":{{ SHARDS }},
"index.refresh_interval":"{{ REFRESH }}"
}
}

View File

@@ -0,0 +1,13 @@
{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-ids:shards', 1) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-ids:refresh', '30s') %}
{
"index_patterns": ["so-ids-*"],
"version":50001,
"order":11,
"settings":{
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":{{ SHARDS }},
"index.refresh_interval":"{{ REFRESH }}"
}
}

View File

@@ -0,0 +1,13 @@
{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-import:shards', 1) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-import:refresh', '30s') %}
{
"index_patterns": ["so-import-*"],
"version":50001,
"order": 11,
"settings":{
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":{{ SHARDS }},
"index.refresh_interval":"{{ REFRESH }}"
}
}

View File

@@ -0,0 +1,13 @@
{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-osquery:shards', 1) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-osquery:refresh', '30s') %}
{
"index_patterns": ["so-osquery-*"],
"version":50001,
"order":11,
"settings":{
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":{{ SHARDS }},
"index.refresh_interval":"{{ REFRESH }}"
}
}

View File

@@ -0,0 +1,13 @@
{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-ossec:shards', 1) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-ossec:refresh', '30s') %}
{
"index_patterns": ["so-ossec-*"],
"version":50001,
"order":11,
"settings":{
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":{{ SHARDS }},
"index.refresh_interval":"{{ REFRESH }}"
}
}

View File

@@ -0,0 +1,13 @@
{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-strelka:shards', 1) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-strelka:refresh', '30s') %}
{
"index_patterns": ["so-strelka-*"],
"version":50001,
"order":11,
"settings":{
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":{{ SHARDS }},
"index.refresh_interval":"{{ REFRESH }}"
}
}

View File

@@ -0,0 +1,14 @@
{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-syslog:shards', 1) %}
{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-syslog:refresh', '30s') %}
{
"index_patterns": ["so-syslog-*"],
"version":50001,
"order":11,
"settings":{
"number_of_replicas":{{ REPLICAS }},
"number_of_shards":{{ SHARDS }},
"index.refresh_interval":"{{ REFRESH }}"
}
}

View File

@@ -1,10 +0,0 @@
{
"index_patterns": ["so-zeek-*"],
"version":50001,
"order" : 11,
"settings":{
"number_of_replicas":0,
"number_of_shards":1,
"index.refresh_interval":"30s"
}
}

Some files were not shown because too many files have changed in this diff Show More