mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-08 18:22:47 +01:00
Remove Curator closed index deletion scripts
This commit is contained in:
@@ -1,36 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
#. /usr/sbin/so-elastic-common
|
|
||||||
#. /etc/nsm/securityonion.conf
|
|
||||||
|
|
||||||
# If logrotate script doesn't already exist, create it
|
|
||||||
#FILE="/etc/logrotate.d/so-curator-closed-delete"
|
|
||||||
#if ! [ -f ${FILE} ]; then
|
|
||||||
# cat << EOF > ${FILE}
|
|
||||||
#/var/log/nsm/so-curator-closed-delete.log {
|
|
||||||
# daily
|
|
||||||
# rotate 7
|
|
||||||
# copytruncate
|
|
||||||
# compress
|
|
||||||
# missingok
|
|
||||||
# notifempty
|
|
||||||
#}
|
|
||||||
#EOF
|
|
||||||
#fi
|
|
||||||
|
|
||||||
# Avoid starting multiple instances
|
|
||||||
APP=closeddelete
|
|
||||||
lf=/tmp/$APP-pidLockFile
|
|
||||||
# create empty lock file if none exists
|
|
||||||
cat /dev/null >> $lf
|
|
||||||
read lastPID < $lf
|
|
||||||
# if lastPID is not null and a process with that pid exists , exit
|
|
||||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
|
||||||
echo $$ > $lf
|
|
||||||
|
|
||||||
/usr/sbin/so-curator-closed-delete-delete
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
{%- if grains['role'] in ['so-searchnode', 'so-heavynode'] %}
|
|
||||||
{%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%}
|
|
||||||
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port') -%}
|
|
||||||
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone', 'so-manager'] %}
|
|
||||||
{%- set ELASTICSEARCH_HOST = GLOBALS.manager_ip -%}
|
|
||||||
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port') -%}
|
|
||||||
{%- endif -%}
|
|
||||||
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit') -%}
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
LOG="/opt/so/log/curator/so-curator-closed-delete.log"
|
|
||||||
|
|
||||||
overlimit() {
|
|
||||||
|
|
||||||
[[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]]
|
|
||||||
}
|
|
||||||
|
|
||||||
closedindices() {
|
|
||||||
|
|
||||||
# If we can't query Elasticsearch, then immediately return false.
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed >/dev/null 2>&1
|
|
||||||
[ $? -eq 1 ] && return false
|
|
||||||
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
|
|
||||||
# Next, filter out any so-case indices.
|
|
||||||
# Finally, use grep's -q option to return true if there are any remaining logstash- or so- indices.
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -v "so-case" | grep -q -E "(logstash-|so-)"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check for 2 conditions:
|
|
||||||
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
|
|
||||||
# 2. Are there any closed indices that we can delete?
|
|
||||||
# If both conditions are true, keep on looping until one of the conditions is false.
|
|
||||||
while overlimit && closedindices; do
|
|
||||||
|
|
||||||
# We need to determine OLDEST_INDEX:
|
|
||||||
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
|
|
||||||
# Next, filter out any so-case indices and only select the remaining logstash- or so- indices.
|
|
||||||
# Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field.
|
|
||||||
# Finally, select the first entry in that sorted list.
|
|
||||||
OLDEST_INDEX=$(curl -K /opt/so/conf/elasticsearch/curl.config -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -v "so-case" | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
|
||||||
|
|
||||||
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config-XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
|
||||||
|
|
||||||
# Finally, write a log entry that says we deleted it.
|
|
||||||
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
|
|
||||||
|
|
||||||
done
|
|
||||||
86
salt/curator/files/bin/so-curator-cluster-delete-delete
Executable file
86
salt/curator/files/bin/so-curator-cluster-delete-delete
Executable file
@@ -0,0 +1,86 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
{%- if grains['role'] in ['so-searchnode', 'so-heavynode'] %}
|
||||||
|
{%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%}
|
||||||
|
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port') -%}
|
||||||
|
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone', 'so-manager'] %}
|
||||||
|
{%- set ELASTICSEARCH_HOST = GLOBALS.manager_ip -%}
|
||||||
|
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port') -%}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:retention:log_size_limit') -%}
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
LOG="/opt/so/log/curator/so-curator-cluster-delete.log"
|
||||||
|
|
||||||
|
overlimit() {
|
||||||
|
|
||||||
|
[[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "{{LOG_SIZE_LIMIT}}" ]]
|
||||||
|
}
|
||||||
|
|
||||||
|
closedindices() {
|
||||||
|
|
||||||
|
# If we can't query Elasticsearch, then immediately return false.
|
||||||
|
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close > /dev/null 2>&1
|
||||||
|
[ $? -eq 1 ] && return false
|
||||||
|
# First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'.
|
||||||
|
# Next, filter out any so-case indices.
|
||||||
|
# Finally, use grep's -q option to return true if there are any remaining logstash-, so-, or .ds-logs- indices.
|
||||||
|
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -q -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
|
||||||
|
for CLOSED_INDEX in ${CLOSED_INDICES}; do
|
||||||
|
# Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it.
|
||||||
|
# First, we need to check if the index is assigned as the current write index for a data stream
|
||||||
|
# To do so, we need to identify to which data stream this index is associated
|
||||||
|
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
|
||||||
|
DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN")
|
||||||
|
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
|
||||||
|
if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
|
||||||
|
# This should not be a write index, so we should be allowed to delete it
|
||||||
|
/usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE
|
||||||
|
# Finally, write a log entry that says we deleted it.
|
||||||
|
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG}
|
||||||
|
fi
|
||||||
|
if ! overlimit; then
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
while overlimit; do
|
||||||
|
|
||||||
|
# We need to determine OLDEST_OPEN_INDEX:
|
||||||
|
# First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'.
|
||||||
|
# Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices.
|
||||||
|
# Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field.
|
||||||
|
OPEN_INDICES=$(so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
|
||||||
|
#OLDEST_OPEN_INDEX=$(so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3 | head -1)
|
||||||
|
|
||||||
|
for OPEN_INDEX in ${OPEN_INDICES}; do
|
||||||
|
# Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it.
|
||||||
|
# First, we need to check if the index is assigned as the current write index for a data stream
|
||||||
|
# To do so, we need to identify to which data stream this index is associated
|
||||||
|
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
|
||||||
|
DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN")
|
||||||
|
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
|
||||||
|
if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
|
||||||
|
# This should not be a write index, so we should be allowed to delete it
|
||||||
|
/usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE
|
||||||
|
# Finally, write a log entry that says we deleted it.
|
||||||
|
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG}
|
||||||
|
fi
|
||||||
|
if ! overlimit; then
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
Reference in New Issue
Block a user