This commit is contained in:
Mike Reeves
2018-12-05 12:43:20 -05:00
11 changed files with 325 additions and 84 deletions

View File

@@ -1,3 +1,13 @@
{% if grains['role'] == 'so-node' %}
{%- set cur_close_days = salt['pillar.get']('node:cur_close_days', '') -%}
{% elif grains['role'] == 'so-eval' %}
{%- set cur_close_days = salt['pillar.get']('master:cur_close_days', '') -%}
{%- endif %}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
@@ -9,7 +19,7 @@ actions:
1:
action: close
description: >-
Close indices older than 2 days (based on index name), for logstash-
Close indices older than {{cur_close_days}} days (based on index name), for logstash-
prefixed indices.
options:
delete_aliases: False
@@ -26,5 +36,5 @@ actions:
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
unit_count: {{cur_close_days}}
exclude:

View File

@@ -1,3 +1,12 @@
{% if grains['role'] == 'so-node' %}
{%- set log_size_limit = salt['pillar.get']('node:log_size_limit', '') -%}
{% elif grains['role'] == 'so-eval' %}
{%- set log_size_limit = salt['pillar.get']('master:log_size_limit', '') -%}
{%- endif %}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
@@ -9,7 +18,7 @@ actions:
1:
action: delete_indices
description: >-
Delete indices when $disk_space value (in GB) is exceeded.
Delete indices when {{log_size_limit}}(GB) is exceeded.
options:
ignore_empty_list: True
disable_action: False
@@ -20,4 +29,4 @@ actions:
- filtertype: space
source: creation_date
use_age: True
disk_space: 43
disk_space: {{log_size_limit}}

View File

@@ -0,0 +1,2 @@
#!/bin/bash
/usr/sbin/so-curator-closed-delete > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/close.yml > /dev/null 2>&1

View File

@@ -0,0 +1,41 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#. /usr/sbin/so-elastic-common
#. /etc/nsm/securityonion.conf
# If logrotate script doesn't already exist, create it
#FILE="/etc/logrotate.d/so-curator-closed-delete"
#if ! [ -f ${FILE} ]; then
# cat << EOF > ${FILE}
#/var/log/nsm/so-curator-closed-delete.log {
# daily
# rotate 7
# copytruncate
# compress
# missingok
# notifempty
#}
#EOF
#fi
# Avoid starting multiple instances
if pgrep -f "so-curator-closed-delete-delete" >/dev/null; then
echo "Script is already running."
else
/usr/sbin/so-curator-closed-delete-delete
fi

View File

@@ -0,0 +1,58 @@
{% if grains['role'] == 'so-node' %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('node:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('node:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('node:log_size_limit', '') -%}
{% elif grains['role'] == 'so-eval' %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('master:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('master:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('master:log_size_limit', '') -%}
{%- endif %}
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#. /usr/sbin/so-elastic-common
#. /etc/nsm/securityonion.conf
LOG="/opt/so/log/curator/so-curator-closed-delete.log"
# Check for 2 conditions:
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
# 2. Are there any closed logstash- indices that we can delete?
# If both conditions are true, keep on looping until one of the conditions is false.
while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" > /dev/null; do
# We need to determine OLDEST_INDEX.
# First, get the list of closed indices that are prefixed with "logstash-".
# For example: logstash-ids-YYYY.MM.DD
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
# Finally, select the first entry in that sorted list.
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" | awk '{print $2}' | sort -t- -k3 | head -1)
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
# Finally, write a log entry that says we deleted it.
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
done

View File

@@ -0,0 +1,2 @@
#!/bin/bash
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/delete.yml > /dev/null 2>&1

View File

@@ -1,9 +1,19 @@
{% if grains['role'] == 'so-node' %}
{%- set elasticsearch = salt['pillar.get']('node:mainip', '') -%}
{% elif grains['role'] == 'so-eval' %}
{%- set elasticsearch = salt['pillar.get']('master:mainip', '') -%}
{%- endif %}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
client:
hosts:
- elasticsearch
- {{elasticsearch}}
port: 9200
url_prefix:
use_ssl: False

136
salt/curator/init.sls Normal file
View File

@@ -0,0 +1,136 @@
{% if grains['role'] == 'so-node' or grains['role'] == 'so-eval' %}
# Curator
# Create the group
curatorgroup:
group.present:
- name: curator
- gid: 934
# Add user
curator:
user.present:
- uid: 934
- gid: 934
- home: /opt/so/conf/curator
- createhome: False
# Create the log directory
curactiondir:
file.directory:
- name: /opt/so/conf/curator/action
- user: 934
- group: 939
- makedirs: True
curlogdir:
file.directory:
- name: /opt/so/log/curator
- user: 934
- group: 939
curcloseconf:
file.managed:
- name: /opt/so/conf/curator/action/close.yml
- source: salt://curator/files/action/close.yml
- user: 934
- group: 939
- template: jinja
curdelconf:
file.managed:
- name: /opt/so/conf/curator/action/delete.yml
- source: salt://curator/files/action/delete.yml
- user: 934
- group: 939
- template: jinja
curconf:
file.managed:
- name: /opt/so/conf/curator/curator.yml
- source: salt://curator/files/curator.yml
- user: 934
- group: 939
- template: jinja
curcloseddel:
file.managed:
- name: /usr/sbin/so-curator-closed-delete
- source: salt://curator/files/bin/so-curator-closed-delete
- user: 934
- group: 939
- mode: 755
curcloseddeldel:
file.managed:
- name: /usr/sbin/so-curator-closed-delete-delete
- source: salt://curator/files/bin/so-curator-closed-delete-delete
- user: 934
- group: 939
- mode: 755
- template: jinja
curclose:
file.managed:
- name: /usr/sbin/so-curator-close
- source: salt://curator/files/bin/so-curator-close
- user: 934
- group: 939
- mode: 755
curdel:
file.managed:
- name: /usr/sbin/so-curator-delete
- source: salt://curator/files/bin/so-curator-delete
- user: 934
- group: 939
- mode: 755
/usr/sbin/so-curator-closed-delete:
cron.present:
- user: root
- minute: '*'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
/usr/sbin/so-curator-close:
cron.present:
- user: root
- minute: '*'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
/usr/sbin/so-curator-delete:
cron.present:
- user: root
- minute: '*'
- hour: '*'
- daymonth: '*'
- month: '*'
- dayweek: '*'
so-curator:
docker_container.running:
- image: soshybridhunter/so-curator:HH1.0.3
- hostname: curator
- name: so-curator
- user: curator
- interactive: True
- tty: True
- binds:
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
- /opt/so/conf/curator/action/:/etc/curator/action:ro
- /opt/so/log/curator:/var/log/curator:rw
# Begin Curator Cron Jobs
# Close
# Delete
# Hot Warm
# Segment Merge
# End Curator Cron Jobs
{% endif %}

View File

@@ -150,6 +150,7 @@ so-freq:
docker_container.running:
- image: soshybridhunter/so-freqserver:HH1.0.3
- hostname: freqserver
- name: so-freqserver
- user: freqserver
- binds:
- /opt/so/log/freq_server:/var/log/freq_server:rw
@@ -185,7 +186,7 @@ so-domainstats:
docker_container.running:
- image: soshybridhunter/so-domainstats:HH1.0.3
- hostname: domainstats
- name: domainstats
- name: so-domainstats
- user: domainstats
- binds:
- /opt/so/log/domainstats:/var/log/domain_stats
@@ -193,82 +194,6 @@ so-domainstats:
{% endif %}
# Curator
# Create the group
curatorgroup:
group.present:
- name: curator
- gid: 934
# Add user
curator:
user.present:
- uid: 934
- gid: 934
- home: /opt/so/conf/curator
- createhome: False
# Create the log directory
curactiondir:
file.directory:
- name: /opt/so/conf/curator/action
- user: 934
- group: 939
- makedirs: True
curlogdir:
file.directory:
- name: /opt/so/log/curator
- user: 934
- group: 939
curclose:
file.managed:
- name: /opt/so/conf/curator/action/close.yml
- source: salt://elasticsearch/files/curator/action/close.yml
- user: 934
- group: 939
- template: jinja
curdel:
file.managed:
- name: /opt/so/conf/curator/action/delete.yml
- source: salt://elasticsearch/files/curator/action/delete.yml
- user: 934
- group: 939
- template: jinja
curconf:
file.managed:
- name: /opt/so/conf/curator/curator.yml
- source: salt://elasticsearch/files/curator/curator.yml
- user: 934
- group: 939
- template: jinja
so-curator:
docker_container.running:
- image: soshybridhunter/so-curator:HH1.0.3
- hostname: curator
- name: curator
- user: curator
- interactive: True
- tty: True
- binds:
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
- /opt/so/conf/curator/action/:/etc/curator/action:ro
- /opt/so/log/curator:/var/log/curator:rw
# Begin Curator Cron Jobs
# Close
# Delete
# Hot Warm
# Segment Merge
# End Curator Cron Jobs
# Elastalert
{% if esalert == 1 %}
@@ -311,7 +236,7 @@ so-elastalert:
docker_container.running:
- image: soshybridhunter/so-elastalert:HH1.0.3
- hostname: elastalert
- name: elastalert
- name: so-elastalert
- user: elastalert
- detach: True
- binds:

View File

@@ -27,6 +27,7 @@ base:
- pcap
- suricata
- bro
- curator
- utility
- schedule
@@ -60,6 +61,7 @@ base:
- firewall
- logstash
- elasticsearch
- curator
- schedule
'G@role:so-node and I@node:node_type:warm':
@@ -77,6 +79,7 @@ base:
- firewall
- logstash
- elasticsearch
- curator
- schedule
'G@role:mastersensor':

View File

@@ -22,6 +22,7 @@ NICS=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"
CPUCORES=$(cat /proc/cpuinfo | grep processor | wc -l)
LISTCORES=$(cat /proc/cpuinfo | grep processor | awk '{print $3 " \"" "core" "\""}')
RANDOMUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1)
NODE_ES_PORT="9200"
# End Global Variable Section
@@ -397,6 +398,16 @@ get_filesystem_nsm(){
FSNSM=$(df /nsm | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
}
get_log_size_limit() {
DISK_SIZE_K=`df /nsm |grep -v "^Filesystem" | awk '{print $2}'`
PERCENTAGE=85
DISK_SIZE=DISK_SIZE_K*1000
PERCENTAGE_DISK_SPACE=`echo $(($DISK_SIZE*$PERCENTAGE/100))`
LOG_SIZE_LIMIT=$(($PERCENTAGE_DISK_SPACE/1000000000))
}
get_filesystem_root(){
FSROOT=$(df / | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
}
@@ -493,6 +504,9 @@ master_pillar() {
echo " oinkcode: $OINKCODE" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
#echo " access_key: $ACCESS_KEY" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
#echo " access_secret: $ACCESS_SECRET" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
echo " es_port: $NODE_ES_PORT" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
echo " log_size_limit: $LOG_SIZE_LIMIT" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
echo " cur_close_days: $CURCLOSEDAYS" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
}
@@ -540,6 +554,9 @@ node_pillar() {
echo " ls_batch_count: $LSINPUTBATCHCOUNT" >> $TMP/$HOSTNAME.sls
echo " es_shard_count: $SHARDCOUNT" >> $TMP/$HOSTNAME.sls
echo " node_type: $NODETYPE" >> $TMP/$HOSTNAME.sls
echo " es_port: $NODE_ES_PORT" >> $TMP/$HOSTNAME.sls
echo " log_size_limit: $LOG_SIZE_LIMIT" >> $TMP/$HOSTNAME.sls
echo " cur_close_days: $CURCLOSEDAYS" >> $TMP/$HOSTNAME.sls
}
@@ -932,6 +949,16 @@ whiptail_check_exitstatus() {
}
whiptail_cur_close_days() {
CURCLOSEDAYS=$(whiptail --title "Security Onion Setup" --inputbox \
"Please specify the threshold (in days) at which Elasticsearch indices will be closed" 10 60 $CURCLOSEDAYS 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_homenet_master() {
# Ask for the HOME_NET on the master
@@ -979,6 +1006,18 @@ whiptail_install_type() {
}
whiptail_log_size_limit() {
LOG_SIZE_LIMIT=$(whiptail --title "Security Onion Setup" --inputbox \
"Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
By default, this is set to 85% of the disk space allotted for /nsm." 10 60 $LOG_SIZE_LIMIT 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_management_nic() {
MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 78 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
@@ -1508,11 +1547,13 @@ if (whiptail_you_sure); then
NSMSETUP=BASIC
NIDS=Suricata
BROVERSION=ZEEK
CURCLOSEDAYS=30
whiptail_make_changes
clear_master
mkdir -p /nsm
get_filesystem_root
get_filesystem_nsm
get_log_size_limit
get_main_ip
# Add the user so we can sit back and relax
echo ""
@@ -1553,6 +1594,10 @@ if (whiptail_you_sure); then
whiptail_management_server
whiptail_master_updates
set_updates
get_log_size_limit
whiptail_log_size_limit
CURCLOSEDAYS=30
whiptail_cur_close_days
es_heapsize
ls_heapsize
whiptail_node_advanced