mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-19 07:23:06 +01:00
Remove Curator
This commit is contained in:
@@ -18,6 +18,10 @@ so-elasticsearch_so-status.disabled:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-elasticsearch$
|
||||
|
||||
so-elasticsearch-indices-delete:
|
||||
cron.absent:
|
||||
- identifier: so-elasticsearch-indices-delete
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
|
||||
@@ -195,6 +195,26 @@ so-elasticsearch-roles-load:
|
||||
- require:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
so-curator-cluster-close:
|
||||
cron.absent:
|
||||
- identifier: so-curator-cluster-close
|
||||
|
||||
so-curator-cluster-delete:
|
||||
cron.absent:
|
||||
- identifier: so-curator-cluster-delete
|
||||
|
||||
so-elasticsearch-indices-delete:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/curator/cron-elasticsearch-indices-delete.log 2>&1
|
||||
- identifier: so-elasticsearch-indices-delete
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
- hour: '*'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
17
salt/elasticsearch/tools/sbin/so-elasticsearch-indices-delete
Executable file
17
salt/elasticsearch/tools/sbin/so-elasticsearch-indices-delete
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# Avoid starting multiple instances
|
||||
APP=clusterdelete
|
||||
lf=/tmp/$APP-pidLockFile
|
||||
# create empty lock file if none exists
|
||||
cat /dev/null >> $lf
|
||||
read lastPID < $lf
|
||||
# if lastPID is not null and a process with that pid exists , exit
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
|
||||
/usr/sbin/so-elasticsearch-indices-delete-delete
|
||||
67
salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete
Executable file
67
salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-indices-delete-delete
Executable file
@@ -0,0 +1,67 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICDEFAULTS %}
|
||||
{%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%}
|
||||
{%- set RETENTION = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) -%}
|
||||
|
||||
LOG="/opt/so/log/elasticsearch/so-elasticsearch-indices-delete.log"
|
||||
ALERT_LOG="/opt/so/log/elasticsearch/indices-delete-alert.log"
|
||||
LOG_SIZE_LIMIT_GB=$(/usr/sbin/so-elasticsearch-cluster-space-total {{ RETENTION.retention_pct}})
|
||||
LOG_SIZE_LIMIT=$(( "$LOG_SIZE_LIMIT_GB" * 1000 * 1000 * 1000 ))
|
||||
ITERATION=0
|
||||
MAX_ITERATIONS=10
|
||||
|
||||
overlimit() {
|
||||
[[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt ${LOG_SIZE_LIMIT} ]]
|
||||
}
|
||||
|
||||
###########################
|
||||
# Check for 2 conditions: #
|
||||
###########################
|
||||
# 1. Check if Elasticsearch indices are using more disk space than LOG_SIZE_LIMIT
|
||||
# 2. Check if the maximum number of iterations - MAX_ITERATIONS - has been exceeded. If so, exit.
|
||||
# Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, or the number of iterations has exceeded the maximum allowed number of iterations, we will break out of the loop.
|
||||
|
||||
while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
|
||||
|
||||
# If we can't query Elasticsearch, then immediately return false.
|
||||
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status > /dev/null 2>&1
|
||||
[ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit
|
||||
|
||||
# We iterate through the closed and open indices
|
||||
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
|
||||
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
|
||||
|
||||
for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do
|
||||
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
|
||||
# To do so, we need to identify to which data stream this index is associated
|
||||
# We extract the data stream name using the pattern below
|
||||
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
|
||||
DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
|
||||
# We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
|
||||
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
|
||||
if [ "$BACKING_INDICES" -gt 1 ]; then
|
||||
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
|
||||
# We make sure we are not trying to delete a write index
|
||||
if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
|
||||
# This should not be a write index, so we should be allowed to delete it
|
||||
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
|
||||
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
|
||||
fi
|
||||
fi
|
||||
if ! overlimit ; then
|
||||
exit
|
||||
fi
|
||||
((ITERATION++))
|
||||
done
|
||||
if [[ $ITERATION -ge $MAX_ITERATIONS ]]; then
|
||||
alert_id=$(uuidgen)
|
||||
printf "\n$(date) -> Maximum iteration limit reached ($MAX_ITERATIONS). Unable to bring disk below threshold. Writing alert ($alert_id) to ${ALERT_LOG}\n" >> ${LOG}
|
||||
printf "\n$(date),$alert_id,Maximum iteration limit reached ($MAX_ITERATIONS). Unable to bring disk below threshold.\n" >> ${ALERT_LOG}
|
||||
fi
|
||||
done
|
||||
Reference in New Issue
Block a user