Simplify deletion logic and add stderr and stdout logging within script

This commit is contained in:
Wes
2023-04-13 15:04:16 +00:00
parent a8d3363a6f
commit d6421ee7cc

View File

@@ -16,67 +16,35 @@ overlimit() {
[[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "${LOG_SIZE_LIMIT}" ]] [[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "${LOG_SIZE_LIMIT}" ]]
} }
closedindices() { # Check to see if Elasticsearch indices using more disk space than LOG_SIZE_LIMIT
# If we can't query Elasticsearch, then immediately return false. # Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, we will break out of the loop.
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close > /dev/null 2>&1
[ $? -eq 1 ] && return false
# First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'.
# Next, filter out any so-case indices.
# Finally, use grep's -q option to return true if there are any remaining logstash-, so-, or .ds-logs- indices.
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -q -E "(logstash-|so-|.ds-logs-)"
}
# Check for 2 conditions:
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
# 2. Are there any closed indices that we can delete?
# If both conditions are true, keep on looping until one of the conditions is false.
while overlimit && closedindices; do
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
# We iterate through the closed indices
for CLOSED_INDEX in ${CLOSED_INDICES}; do
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index
if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it
/usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE
# Finally, write a log entry that says we deleted it.
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG}
fi
if ! overlimit; then
exit
fi
done
done
while overlimit; do while overlimit; do
# If we can't query Elasticsearch, then immediately return false.
# We need to determine the oldest open index. /usr/sbin/so-elasticsearch-query _cat/indices?h=index,status > /dev/null 2>&1
# First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'. [ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit
# Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices. # We iterate through the closed and open indices
# Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
# We iterate through the open indices for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do
for OPEN_INDEX in ${OPEN_INDICES}; do
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
# To do so, we need to identify to which data stream this index is associated # To do so, we need to identify to which data stream this index is associated
# We extract the data stream name using the pattern below # We extract the data stream name using the pattern below
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN") DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
# We look up the data stream, and determine the write index # We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
if [ "$BACKING_INDICES" -gt 1 ]; then
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
# We make sure we are not trying to delete a write index # We make sure we are not trying to delete a write index
if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
# This should not be a write index, so we should be allowed to delete it # This should not be a write index, so we should be allowed to delete it
/usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
# Finally, write a log entry that says we deleted it. /usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} fi
else
# We delete the entire data stream, since there is only one backing index
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Deleting ${DATASTREAM} data stream...\n" >> ${LOG}
/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} -XDELETE >> ${LOG} 2>&1
fi fi
if ! overlimit; then if ! overlimit; then
exit exit