From 2cb6f0f1e6795e492bd93e55a746a5222b382e90 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 27 Mar 2023 12:30:39 -0400 Subject: [PATCH 01/27] Add curator settings --- salt/elasticsearch/defaults.yaml | 2 ++ salt/elasticsearch/soc_elasticsearch.yaml | 7 ++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 3649d015e..37f55ddc7 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1,4 +1,6 @@ elasticsearch: + retention: + retention_pct: 50 config: node: {} cluster: diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index d7c310687..ded8e5be6 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -1,7 +1,12 @@ elasticsearch: esheap: description: Specify the memory heap size in (m)egabytes for Elasticsearch. - helpLink: elasticsearch.html + helpLink: elasticsearch.html + retention: + retention_pct: + decription: Total percentage of space used by Elasticsearch for multi node clusters + helpLink: elasticsearch.yaml + global: True config: cluster: name: From 6c3c5730c5550f01d71097d62cfc338186bfd9e7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 27 Mar 2023 12:33:34 -0400 Subject: [PATCH 02/27] Add curator settings --- salt/curator/files/action/delete.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/curator/files/action/delete.yml b/salt/curator/files/action/delete.yml index fb8ae30cb..c81a9e548 100644 --- a/salt/curator/files/action/delete.yml +++ b/salt/curator/files/action/delete.yml @@ -3,6 +3,11 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICDEFAULTS %} +{% set ELASTICMERGED = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) %} + +{{ ELASTICMERGED.retention_pct }} + {%- set log_size_limit = salt['pillar.get']('elasticsearch:log_size_limit') %} actions: 1: From 934b8894e2de0751a4008ea1b0c2a87ba73d0e64 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 00:54:04 +0000 Subject: [PATCH 03/27] Update Curator scripts --- .../files/bin/so-curator-cluster-close | 0 .../files/bin/so-curator-cluster-delete | 38 ++++++++------ salt/curator/init.sls | 50 ++++++++++++++----- 3 files changed, 60 insertions(+), 28 deletions(-) mode change 100644 => 100755 salt/curator/files/bin/so-curator-cluster-close mode change 100644 => 100755 salt/curator/files/bin/so-curator-cluster-delete diff --git a/salt/curator/files/bin/so-curator-cluster-close b/salt/curator/files/bin/so-curator-cluster-close old mode 100644 new mode 100755 diff --git a/salt/curator/files/bin/so-curator-cluster-delete b/salt/curator/files/bin/so-curator-cluster-delete old mode 100644 new mode 100755 index 34c3c10cf..e99e88659 --- a/salt/curator/files/bin/so-curator-cluster-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete @@ -4,7 +4,27 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -APP=delete + +#. /usr/sbin/so-elastic-common +#. /etc/nsm/securityonion.conf + +# If logrotate script doesn't already exist, create it +#FILE="/etc/logrotate.d/so-curator-cluster-delete" +#if ! [ -f ${FILE} ]; then +# cat << EOF > ${FILE} +#/var/log/nsm/so-curator-cluster-delete.log { +# daily +# rotate 7 +# copytruncate +# compress +# missingok +# notifempty +#} +#EOF +#fi + +# Avoid starting multiple instances +APP=clusterdelete lf=/tmp/$APP-pidLockFile # create empty lock file if none exists cat /dev/null >> $lf @@ -13,18 +33,4 @@ read lastPID < $lf [ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit echo $$ > $lf -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-kratos-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/logs-import-so-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/logs-strelka-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/logs-suricata-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/logs-syslog-delete.yml > /dev/null 2>&1; -docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/logs-zeek-delete.yml > /dev/null 2>&1; +/usr/sbin/so-curator-cluster-delete-delete diff --git a/salt/curator/init.sls b/salt/curator/init.sls index 94a666f53..7ef48a382 100644 --- a/salt/curator/init.sls +++ b/salt/curator/init.sls @@ -5,12 +5,6 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'docker/docker.map.jinja' import DOCKER %} -{% from "curator/map.jinja" import CURATOROPTIONS %} -{% from "curator/map.jinja" import CURATORMERGED %} -{% set REMOVECURATORCRON = False %} - # Curator # Create the group curatorgroup: @@ -27,6 +21,17 @@ curator: - createhome: False # Create the log directory +curlogdir: + file.directory: + - name: /opt/so/log/curator + - user: 934 + - group: 939 +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% if GLOBALS.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager']%} +{% from 'docker/docker.map.jinja' import DOCKER %} +{% from "curator/map.jinja" import CURATOROPTIONS %} +{% from "curator/map.jinja" import CURATORMERGED %} +{% set REMOVECURATORCRON = False %} curactiondir: file.directory: - name: /opt/so/conf/curator/action @@ -34,12 +39,6 @@ curactiondir: - group: 939 - makedirs: True -curlogdir: - file.directory: - - name: /opt/so/log/curator - - user: 934 - - group: 939 - actionconfs: file.recurse: - name: /opt/so/conf/curator/action @@ -172,7 +171,34 @@ so-curatorclusterdelete: - daymonth: '*' - month: '*' - dayweek: '*' +{% else %} +curnodedel: + file.managed: + - name: /usr/sbin/so-curator-node-delete + - source: salt://curator/files/bin/so-curator-node-delete + - user: 934 + - group: 939 + - mode: 755 +curnodedeldel: + file.managed: + - name: /usr/sbin/so-curator-node-delete-delete + - source: salt://curator/files/bin/so-curator-node-delete-delete + - user: 934 + - group: 939 + - mode: 755 + - template: jinja + +so-curatornodedeletecron: + cron.present: + - name: /usr/sbin/so-curator-node-delete > /opt/so/log/curator/cron-node-delete.log 2>&1 + - user: root + - minute: '*/5' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' +{% endif %} {% else %} {{sls}}_state_not_allowed: From 7030f3556180fcfb5e76abddcc5f47e47621b290 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 00:54:36 +0000 Subject: [PATCH 04/27] Update Curator state --- salt/curator/init.sls | 93 ++++++++++--------------------------------- 1 file changed, 20 insertions(+), 73 deletions(-) diff --git a/salt/curator/init.sls b/salt/curator/init.sls index 7ef48a382..d6267881e 100644 --- a/salt/curator/init.sls +++ b/salt/curator/init.sls @@ -5,6 +5,12 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} +{% from "curator/map.jinja" import CURATOROPTIONS %} +{% from "curator/map.jinja" import CURATORMERGED %} +{% set REMOVECURATORCRON = False %} + # Curator # Create the group curatorgroup: @@ -26,12 +32,7 @@ curlogdir: - name: /opt/so/log/curator - user: 934 - group: 939 -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% if GLOBALS.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager']%} -{% from 'docker/docker.map.jinja' import DOCKER %} -{% from "curator/map.jinja" import CURATOROPTIONS %} -{% from "curator/map.jinja" import CURATORMERGED %} -{% set REMOVECURATORCRON = False %} + curactiondir: file.directory: - name: /opt/so/conf/curator/action @@ -49,7 +50,6 @@ actionconfs: - defaults: CURATORMERGED: {{ CURATORMERGED }} - curconf: file.managed: - name: /opt/so/conf/curator/curator.yml @@ -60,40 +60,6 @@ curconf: - template: jinja - show_changes: False -curcloseddel: - file.managed: - - name: /usr/sbin/so-curator-closed-delete - - source: salt://curator/files/bin/so-curator-closed-delete - - user: 934 - - group: 939 - - mode: 755 - -curcloseddeldel: - file.managed: - - name: /usr/sbin/so-curator-closed-delete-delete - - source: salt://curator/files/bin/so-curator-closed-delete-delete - - user: 934 - - group: 939 - - mode: 755 - - template: jinja - -curclose: - file.managed: - - name: /usr/sbin/so-curator-close - - source: salt://curator/files/bin/so-curator-close - - user: 934 - - group: 939 - - mode: 755 - - template: jinja - -curdel: - file.managed: - - name: /usr/sbin/so-curator-delete - - source: salt://curator/files/bin/so-curator-delete - - user: 934 - - group: 939 - - mode: 755 - curclusterclose: file.managed: - name: /usr/sbin/so-curator-cluster-close @@ -103,13 +69,21 @@ curclusterclose: - mode: 755 - template: jinja -curclusterdelete: +curclusterdelete: file.managed: - - name: /usr/sbin/so-curator-cluster-delete + - name: /usr/sbin/so-curator-delete-delete - source: salt://curator/files/bin/so-curator-cluster-delete - user: 934 - group: 939 - mode: 755 + +curclusterdeletedelete: + file.managed: + - name: /usr/sbin/so-curator-cluster-delete-delete + - source: salt://curator/files/bin/so-curator-cluster-delete-delete + - user: 934 + - group: 939 + - mode: 755 - template: jinja so-curator: @@ -162,43 +136,16 @@ so-curatorclusterclose: - month: '*' - dayweek: '*' -so-curatorclusterdelete: +so-curatorclusterdeletecron: cron.present: - - name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-delete.log 2>&1 - - user: root - - minute: '2' - - hour: '*/1' - - daymonth: '*' - - month: '*' - - dayweek: '*' -{% else %} -curnodedel: - file.managed: - - name: /usr/sbin/so-curator-node-delete - - source: salt://curator/files/bin/so-curator-node-delete - - user: 934 - - group: 939 - - mode: 755 - -curnodedeldel: - file.managed: - - name: /usr/sbin/so-curator-node-delete-delete - - source: salt://curator/files/bin/so-curator-node-delete-delete - - user: 934 - - group: 939 - - mode: 755 - - template: jinja - -so-curatornodedeletecron: - cron.present: - - name: /usr/sbin/so-curator-node-delete > /opt/so/log/curator/cron-node-delete.log 2>&1 + - name: /usr/sbin/so-curator-cluster-delete-delete > /opt/so/log/curator/cron-cluster-delete.log 2>&1 - user: root - minute: '*/5' - hour: '*' - daymonth: '*' - month: '*' - dayweek: '*' -{% endif %} + {% else %} {{sls}}_state_not_allowed: From 32e92d10ad0718ef87e25ecf8b58ca6d6e319cd1 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 00:55:56 +0000 Subject: [PATCH 05/27] Add new cluster space management scripts --- .../so-elasticsearch-cluster-space-configure | 41 +++++++++++++++ .../sbin/so-elasticsearch-cluster-space-total | 51 +++++++++++++++++++ .../sbin/so-elasticsearch-cluster-space-used | 23 +++++++++ 3 files changed, 115 insertions(+) create mode 100755 salt/common/tools/sbin/so-elasticsearch-cluster-space-configure create mode 100755 salt/common/tools/sbin/so-elasticsearch-cluster-space-total create mode 100755 salt/common/tools/sbin/so-elasticsearch-cluster-space-used diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-configure b/salt/common/tools/sbin/so-elasticsearch-cluster-space-configure new file mode 100755 index 000000000..70fb37e3e --- /dev/null +++ b/salt/common/tools/sbin/so-elasticsearch-cluster-space-configure @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +. /usr/sbin/so-common + +# Determine available disk space +{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICDEFAULTS %} +{% set ELASTICMERGED = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) %} + +# Wait for ElasticSearch to initialize +#COUNT=0 +ELASTICSEARCH_CONNECTED="no" +while [[ "$COUNT" -le 240 ]]; do + so-elasticsearch-query / -k --output /dev/null --silent --head --fail + if [ $? -eq 0 ]; then + ELASTICSEARCH_CONNECTED="yes" + break + else + ((COUNT+=1)) + sleep 1 + fi +done +if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then + echo + echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" + echo + exit 1 +fi + +AVAILABLE_SPACE=$(/usr/sbin/so-elasticsearch-cluster-space-total {{ ELASTICMERGED.retention_pct }}) +ELASTICSEARCH_PILLAR="/opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls" +if grep -q log_size_limit $ELASTICSEARCH_PILLAR ; then + sed -i s"/log_size_limit:.*/log_size_limit: $AVAILABLE_SPACE/" $ELASTICSEARCH_PILLAR +else + echo " retention:" >> $ELASTICSEARCH_PILLAR + echo " log_size_limit: $AVAILABLE_SPACE" >> $ELASTICSEARCH_PILLAR +fi diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-total b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total new file mode 100755 index 000000000..962d515e2 --- /dev/null +++ b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total @@ -0,0 +1,51 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +. /usr/sbin/so-common + +TOTAL_AVAILABLE_SPACE=0 + +# Wait for ElasticSearch to initialize +COUNT=0 +ELASTICSEARCH_CONNECTED="no" +while [[ "$COUNT" -le 240 ]]; do + so-elasticsearch-query / -k --output /dev/null --silent --head --fail + if [ $? -eq 0 ]; then + ELASTICSEARCH_CONNECTED="yes" + break + else + ((COUNT+=1)) + sleep 1 + fi +done +if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then + echo + echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" + echo + exit 1 +fi + +# Set percentage of space to desired value, otherwise use a default value of 80 percent +if [[ "$1" != "" ]]; then + PERCENTAGE=$1 +else + PERCENTAGE=80 +fi + +# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space +for i in $(so-elasticsearch-query _cat/allocation | awk '{print $5}'); do + size=$(echo $i | grep -oE '[0-9]+') + unit=$(echo $i | grep -oE '[A-Za-z]+') + if [ $unit = "tb" ]; then + size=$(( size * 1024 )) + fi + TOTAL_AVAILABLE_SPACE=$(( TOTAL_AVAILABLE_SPACE + size )) +done + +# Calculate the percentage of available space based on our previously defined value +PERCENTAGE_AVAILABLE_SPACE=$(( TOTAL_AVAILABLE_SPACE*PERCENTAGE/100 )) +echo "$PERCENTAGE_AVAILABLE_SPACE" diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-used b/salt/common/tools/sbin/so-elasticsearch-cluster-space-used new file mode 100755 index 000000000..3e8832ba0 --- /dev/null +++ b/salt/common/tools/sbin/so-elasticsearch-cluster-space-used @@ -0,0 +1,23 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +. /usr/sbin/so-common + +TOTAL_AVAILABLE_SPACE=0 + +# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space +for i in $(so-elasticsearch-query _cat/allocation | awk '{print $3}'); do + size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}') + unit=$(echo $i | grep -oE '[A-Za-z]+') + if [ $unit = "tb" ]; then + size=$(( size * 1024 )) + fi + TOTAL_AVAILABLE_SPACE=$(( TOTAL_AVAILABLE_SPACE + size )) +done + +# Calculate the percentage of available space based on our previously defined value +echo "$TOTAL_AVAILABLE_SPACE" From fc0b9fa47cb598faeaf43caec4028a507801563c Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 00:57:45 +0000 Subject: [PATCH 06/27] Remove Curator closed index deletion scripts --- .../files/bin/so-curator-closed-delete | 36 -------- .../files/bin/so-curator-closed-delete-delete | 61 ------------- .../bin/so-curator-cluster-delete-delete | 86 +++++++++++++++++++ 3 files changed, 86 insertions(+), 97 deletions(-) delete mode 100755 salt/curator/files/bin/so-curator-closed-delete delete mode 100755 salt/curator/files/bin/so-curator-closed-delete-delete create mode 100755 salt/curator/files/bin/so-curator-cluster-delete-delete diff --git a/salt/curator/files/bin/so-curator-closed-delete b/salt/curator/files/bin/so-curator-closed-delete deleted file mode 100755 index e585df406..000000000 --- a/salt/curator/files/bin/so-curator-closed-delete +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - -#. /usr/sbin/so-elastic-common -#. /etc/nsm/securityonion.conf - -# If logrotate script doesn't already exist, create it -#FILE="/etc/logrotate.d/so-curator-closed-delete" -#if ! [ -f ${FILE} ]; then -# cat << EOF > ${FILE} -#/var/log/nsm/so-curator-closed-delete.log { -# daily -# rotate 7 -# copytruncate -# compress -# missingok -# notifempty -#} -#EOF -#fi - -# Avoid starting multiple instances -APP=closeddelete -lf=/tmp/$APP-pidLockFile -# create empty lock file if none exists -cat /dev/null >> $lf -read lastPID < $lf -# if lastPID is not null and a process with that pid exists , exit -[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit -echo $$ > $lf - -/usr/sbin/so-curator-closed-delete-delete diff --git a/salt/curator/files/bin/so-curator-closed-delete-delete b/salt/curator/files/bin/so-curator-closed-delete-delete deleted file mode 100755 index fe62d3bf5..000000000 --- a/salt/curator/files/bin/so-curator-closed-delete-delete +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'vars/globals.map.jinja' import GLOBALS %} -{%- if grains['role'] in ['so-searchnode', 'so-heavynode'] %} - {%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%} - {%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port') -%} -{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone', 'so-manager'] %} - {%- set ELASTICSEARCH_HOST = GLOBALS.manager_ip -%} - {%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port') -%} -{%- endif -%} -{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit') -%} - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -LOG="/opt/so/log/curator/so-curator-closed-delete.log" - -overlimit() { - - [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] -} - -closedindices() { - - # If we can't query Elasticsearch, then immediately return false. - curl -K /opt/so/conf/elasticsearch/curl.config -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed >/dev/null 2>&1 - [ $? -eq 1 ] && return false - # First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed. - # Next, filter out any so-case indices. - # Finally, use grep's -q option to return true if there are any remaining logstash- or so- indices. - curl -K /opt/so/conf/elasticsearch/curl.config -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -v "so-case" | grep -q -E "(logstash-|so-)" -} - -# Check for 2 conditions: -# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT? -# 2. Are there any closed indices that we can delete? -# If both conditions are true, keep on looping until one of the conditions is false. -while overlimit && closedindices; do - - # We need to determine OLDEST_INDEX: - # First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed. - # Next, filter out any so-case indices and only select the remaining logstash- or so- indices. - # Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. - # Finally, select the first entry in that sorted list. - OLDEST_INDEX=$(curl -K /opt/so/conf/elasticsearch/curl.config -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -v "so-case" | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1) - - # Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it. - curl -K /opt/so/conf/elasticsearch/curl.config-XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX} - - # Finally, write a log entry that says we deleted it. - echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG} - -done diff --git a/salt/curator/files/bin/so-curator-cluster-delete-delete b/salt/curator/files/bin/so-curator-cluster-delete-delete new file mode 100755 index 000000000..4a1dec241 --- /dev/null +++ b/salt/curator/files/bin/so-curator-cluster-delete-delete @@ -0,0 +1,86 @@ +#!/bin/bash +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'vars/globals.map.jinja' import GLOBALS %} +{%- if grains['role'] in ['so-searchnode', 'so-heavynode'] %} + {%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%} + {%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port') -%} +{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone', 'so-manager'] %} + {%- set ELASTICSEARCH_HOST = GLOBALS.manager_ip -%} + {%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port') -%} +{%- endif -%} +{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:retention:log_size_limit') -%} + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + + + +LOG="/opt/so/log/curator/so-curator-cluster-delete.log" + +overlimit() { + + [[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "{{LOG_SIZE_LIMIT}}" ]] +} + +closedindices() { + + # If we can't query Elasticsearch, then immediately return false. + /usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close > /dev/null 2>&1 + [ $? -eq 1 ] && return false + # First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'. + # Next, filter out any so-case indices. + # Finally, use grep's -q option to return true if there are any remaining logstash-, so-, or .ds-logs- indices. + CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -q -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) + for CLOSED_INDEX in ${CLOSED_INDICES}; do + # Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it. + # First, we need to check if the index is assigned as the current write index for a data stream + # To do so, we need to identify to which data stream this index is associated + DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" + DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN") + CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) + if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then + # This should not be a write index, so we should be allowed to delete it + /usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE + # Finally, write a log entry that says we deleted it. + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG} + fi + if ! overlimit; then + exit + fi + done +} + +while overlimit; do + + # We need to determine OLDEST_OPEN_INDEX: + # First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'. + # Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices. + # Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. + OPEN_INDICES=$(so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) + #OLDEST_OPEN_INDEX=$(so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3 | head -1) + + for OPEN_INDEX in ${OPEN_INDICES}; do + # Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it. + # First, we need to check if the index is assigned as the current write index for a data stream + # To do so, we need to identify to which data stream this index is associated + DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" + DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN") + CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) + if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then + # This should not be a write index, so we should be allowed to delete it + /usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE + # Finally, write a log entry that says we deleted it. + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} + fi + if ! overlimit; then + exit + fi + done +done + From a38aa903ac5626740b8035bb742ada608ff0c2bb Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 01:36:52 +0000 Subject: [PATCH 07/27] Configure cluster space settings --- salt/elasticsearch/init.sls | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index b9caa2e89..e1d2b041b 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -372,6 +372,13 @@ so-es-cluster-settings: - docker_container: so-elasticsearch - file: es_sync_scripts +so-es-cluster-space-configure: + cmd.run: + - name: /usr/sbin/so-elasticsearch-cluster-space-configure + - cwd: /opt/so + - require: + - docker_container: so-elasticsearch + so-elasticsearch-ilm-policy-load: cmd.run: - name: /usr/sbin/so-elasticsearch-ilm-policy-load @@ -407,7 +414,6 @@ so-elasticsearch-roles-load: - docker_container: so-elasticsearch - file: es_sync_scripts - {% else %} {{sls}}_state_not_allowed: From c68235c169c7c3a383955c0e77f896f94ee6a6bf Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 02:27:27 +0000 Subject: [PATCH 08/27] Fix Curator script name --- salt/curator/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/curator/init.sls b/salt/curator/init.sls index d6267881e..f60bc9e28 100644 --- a/salt/curator/init.sls +++ b/salt/curator/init.sls @@ -71,7 +71,7 @@ curclusterclose: curclusterdelete: file.managed: - - name: /usr/sbin/so-curator-delete-delete + - name: /usr/sbin/so-curator-delete - source: salt://curator/files/bin/so-curator-cluster-delete - user: 934 - group: 939 From e2290d8a8e0606884940c53b8405e31c3c8bc619 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 12:19:36 +0000 Subject: [PATCH 09/27] Remove unncessary Salt logic for Elasticsearch --- .../curator/files/bin/so-curator-cluster-delete-delete | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/salt/curator/files/bin/so-curator-cluster-delete-delete b/salt/curator/files/bin/so-curator-cluster-delete-delete index 4a1dec241..73076851c 100755 --- a/salt/curator/files/bin/so-curator-cluster-delete-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete-delete @@ -5,13 +5,7 @@ # Elastic License 2.0. {% from 'vars/globals.map.jinja' import GLOBALS %} -{%- if grains['role'] in ['so-searchnode', 'so-heavynode'] %} - {%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%} - {%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port') -%} -{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone', 'so-manager'] %} - {%- set ELASTICSEARCH_HOST = GLOBALS.manager_ip -%} - {%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port') -%} -{%- endif -%} +{%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%} {%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:retention:log_size_limit') -%} # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one @@ -19,8 +13,6 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. - - LOG="/opt/so/log/curator/so-curator-cluster-delete.log" overlimit() { From 4352825cebf47897dc4277d17af2f7e66cdf806d Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 12:25:49 +0000 Subject: [PATCH 10/27] Calculate log size limit every time so-curator-cluster-delete-delete runs --- salt/curator/files/bin/so-curator-cluster-delete-delete | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/salt/curator/files/bin/so-curator-cluster-delete-delete b/salt/curator/files/bin/so-curator-cluster-delete-delete index 73076851c..e2468b22e 100755 --- a/salt/curator/files/bin/so-curator-cluster-delete-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete-delete @@ -5,8 +5,9 @@ # Elastic License 2.0. {% from 'vars/globals.map.jinja' import GLOBALS %} -{%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%} -{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:retention:log_size_limit') -%} +{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICDEFAULTS %} +{%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%} +{%- set RETENTION = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) -%} # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at @@ -17,7 +18,7 @@ LOG="/opt/so/log/curator/so-curator-cluster-delete.log" overlimit() { - [[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "{{LOG_SIZE_LIMIT}}" ]] + [[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "/usr/sbin/so-elasticsearch-cluster-space-total{{ RETENTION.retention_pct}}" ]] } closedindices() { From 22e8e3be28d18c64c888acf9c2a78b256d288cf1 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 12:27:12 +0000 Subject: [PATCH 11/27] Remove the cluster space configuration script --- .../so-elasticsearch-cluster-space-configure | 41 ------------------- 1 file changed, 41 deletions(-) delete mode 100755 salt/common/tools/sbin/so-elasticsearch-cluster-space-configure diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-configure b/salt/common/tools/sbin/so-elasticsearch-cluster-space-configure deleted file mode 100755 index 70fb37e3e..000000000 --- a/salt/common/tools/sbin/so-elasticsearch-cluster-space-configure +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -. /usr/sbin/so-common - -# Determine available disk space -{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICDEFAULTS %} -{% set ELASTICMERGED = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) %} - -# Wait for ElasticSearch to initialize -#COUNT=0 -ELASTICSEARCH_CONNECTED="no" -while [[ "$COUNT" -le 240 ]]; do - so-elasticsearch-query / -k --output /dev/null --silent --head --fail - if [ $? -eq 0 ]; then - ELASTICSEARCH_CONNECTED="yes" - break - else - ((COUNT+=1)) - sleep 1 - fi -done -if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then - echo - echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" - echo - exit 1 -fi - -AVAILABLE_SPACE=$(/usr/sbin/so-elasticsearch-cluster-space-total {{ ELASTICMERGED.retention_pct }}) -ELASTICSEARCH_PILLAR="/opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls" -if grep -q log_size_limit $ELASTICSEARCH_PILLAR ; then - sed -i s"/log_size_limit:.*/log_size_limit: $AVAILABLE_SPACE/" $ELASTICSEARCH_PILLAR -else - echo " retention:" >> $ELASTICSEARCH_PILLAR - echo " log_size_limit: $AVAILABLE_SPACE" >> $ELASTICSEARCH_PILLAR -fi From f854d92cab671cea27c338659ba0a3182366f871 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 12:27:45 +0000 Subject: [PATCH 12/27] Remove the cluster space configuration script reference from the Elasticsearch state --- salt/elasticsearch/init.sls | 7 ------- 1 file changed, 7 deletions(-) diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index e1d2b041b..e128c9dae 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -372,13 +372,6 @@ so-es-cluster-settings: - docker_container: so-elasticsearch - file: es_sync_scripts -so-es-cluster-space-configure: - cmd.run: - - name: /usr/sbin/so-elasticsearch-cluster-space-configure - - cwd: /opt/so - - require: - - docker_container: so-elasticsearch - so-elasticsearch-ilm-policy-load: cmd.run: - name: /usr/sbin/so-elasticsearch-ilm-policy-load From adbc9df2223bd231944bfabca1db7f01a94a0686 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 12:54:32 +0000 Subject: [PATCH 13/27] Changes for LOG_SIZE_LIMIT --- salt/curator/files/bin/so-curator-cluster-delete-delete | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/curator/files/bin/so-curator-cluster-delete-delete b/salt/curator/files/bin/so-curator-cluster-delete-delete index e2468b22e..109c3380b 100755 --- a/salt/curator/files/bin/so-curator-cluster-delete-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete-delete @@ -15,10 +15,10 @@ # Elastic License 2.0. LOG="/opt/so/log/curator/so-curator-cluster-delete.log" +LOG_SIZE_LIMIT=$(/usr/sbin/so-elasticsearch-cluster-space-total {{ RETENTION.retention_pct}}) overlimit() { - - [[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "/usr/sbin/so-elasticsearch-cluster-space-total{{ RETENTION.retention_pct}}" ]] + [[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "${LOG_SIZE_LIMIT}" ]] } closedindices() { @@ -41,7 +41,7 @@ closedindices() { # This should not be a write index, so we should be allowed to delete it /usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE # Finally, write a log entry that says we deleted it. - echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG} + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG} fi if ! overlimit; then exit @@ -69,7 +69,7 @@ while overlimit; do # This should not be a write index, so we should be allowed to delete it /usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE # Finally, write a log entry that says we deleted it. - echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} fi if ! overlimit; then exit From b3b030958cbff6206871f3a35a9c115441c1ac88 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 13:17:23 +0000 Subject: [PATCH 14/27] Use explicit path to so-elasticsearch-query --- salt/curator/files/bin/so-curator-cluster-delete-delete | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/curator/files/bin/so-curator-cluster-delete-delete b/salt/curator/files/bin/so-curator-cluster-delete-delete index 109c3380b..ff77b9a39 100755 --- a/salt/curator/files/bin/so-curator-cluster-delete-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete-delete @@ -55,7 +55,7 @@ while overlimit; do # First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'. # Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices. # Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. - OPEN_INDICES=$(so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) + OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) #OLDEST_OPEN_INDEX=$(so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3 | head -1) for OPEN_INDEX in ${OPEN_INDICES}; do From a1394b9102461bbf7431c1e972dd929076ef2150 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 13:18:00 +0000 Subject: [PATCH 15/27] Use explicit path to so-elasticsearch-query --- salt/common/tools/sbin/so-elasticsearch-cluster-space-total | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-total b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total index 962d515e2..f7bd151ad 100755 --- a/salt/common/tools/sbin/so-elasticsearch-cluster-space-total +++ b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total @@ -13,7 +13,7 @@ TOTAL_AVAILABLE_SPACE=0 COUNT=0 ELASTICSEARCH_CONNECTED="no" while [[ "$COUNT" -le 240 ]]; do - so-elasticsearch-query / -k --output /dev/null --silent --head --fail + /usr/sbin/so-elasticsearch-query / -k --output /dev/null --silent --head --fail if [ $? -eq 0 ]; then ELASTICSEARCH_CONNECTED="yes" break @@ -37,7 +37,7 @@ else fi # Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space -for i in $(so-elasticsearch-query _cat/allocation | awk '{print $5}'); do +for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do size=$(echo $i | grep -oE '[0-9]+') unit=$(echo $i | grep -oE '[A-Za-z]+') if [ $unit = "tb" ]; then From 1ab253b8c38e989388f5d0d5d78cbafbb7d73655 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 13:18:14 +0000 Subject: [PATCH 16/27] Use explicit path to so-elasticsearch-query --- salt/common/tools/sbin/so-elasticsearch-cluster-space-used | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-used b/salt/common/tools/sbin/so-elasticsearch-cluster-space-used index 3e8832ba0..dbc35440d 100755 --- a/salt/common/tools/sbin/so-elasticsearch-cluster-space-used +++ b/salt/common/tools/sbin/so-elasticsearch-cluster-space-used @@ -10,7 +10,7 @@ TOTAL_AVAILABLE_SPACE=0 # Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space -for i in $(so-elasticsearch-query _cat/allocation | awk '{print $3}'); do +for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $3}'); do size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}') unit=$(echo $i | grep -oE '[A-Za-z]+') if [ $unit = "tb" ]; then From 82efce0b310d84df16cac53a880bd45abaa221b8 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 13:23:23 +0000 Subject: [PATCH 17/27] Ensure so-curator-cluster-delete is run to manage so-curator-cluster-delete-delete --- salt/curator/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/curator/init.sls b/salt/curator/init.sls index f60bc9e28..26a8edc0e 100644 --- a/salt/curator/init.sls +++ b/salt/curator/init.sls @@ -138,7 +138,7 @@ so-curatorclusterclose: so-curatorclusterdeletecron: cron.present: - - name: /usr/sbin/so-curator-cluster-delete-delete > /opt/so/log/curator/cron-cluster-delete.log 2>&1 + - name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-cluster-delete.log 2>&1 - user: root - minute: '*/5' - hour: '*' From 138b31270547873c81da3fced2da7a9ec058139a Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 13:52:59 +0000 Subject: [PATCH 18/27] Fix script name --- salt/curator/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/curator/init.sls b/salt/curator/init.sls index 26a8edc0e..36f1261fe 100644 --- a/salt/curator/init.sls +++ b/salt/curator/init.sls @@ -71,7 +71,7 @@ curclusterclose: curclusterdelete: file.managed: - - name: /usr/sbin/so-curator-delete + - name: /usr/sbin/so-curator-cluster-delete - source: salt://curator/files/bin/so-curator-cluster-delete - user: 934 - group: 939 From e1bda5acfd80b88d9e5b74f99af833156b91e5b1 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 14:18:27 +0000 Subject: [PATCH 19/27] Update verbiage for so-curator-cluster-delete-delete --- .../bin/so-curator-cluster-delete-delete | 52 +++++++++---------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/salt/curator/files/bin/so-curator-cluster-delete-delete b/salt/curator/files/bin/so-curator-cluster-delete-delete index ff77b9a39..2818dd506 100755 --- a/salt/curator/files/bin/so-curator-cluster-delete-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete-delete @@ -9,11 +9,6 @@ {%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%} {%- set RETENTION = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) -%} -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - LOG="/opt/so/log/curator/so-curator-cluster-delete.log" LOG_SIZE_LIMIT=$(/usr/sbin/so-elasticsearch-cluster-space-total {{ RETENTION.retention_pct}}) @@ -26,22 +21,26 @@ closedindices() { # If we can't query Elasticsearch, then immediately return false. /usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close > /dev/null 2>&1 [ $? -eq 1 ] && return false - # First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'. + # We need to determine the oldest closed index. + # First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'. # Next, filter out any so-case indices. # Finally, use grep's -q option to return true if there are any remaining logstash-, so-, or .ds-logs- indices. CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -q -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) - for CLOSED_INDEX in ${CLOSED_INDICES}; do - # Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it. - # First, we need to check if the index is assigned as the current write index for a data stream + # We iterate through the closed indices + for CLOSED_INDEX in ${CLOSED_INDICES}; do + # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream # To do so, we need to identify to which data stream this index is associated + # We extract the data stream name using the pattern below DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN") + # We look up the data stream, and determine the write index CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) - if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then - # This should not be a write index, so we should be allowed to delete it - /usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE - # Finally, write a log entry that says we deleted it. - echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG} + # We make sure we are not trying to delete a write index + if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then + # This should not be a write index, so we should be allowed to delete it + /usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE + # Finally, write a log entry that says we deleted it. + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG} fi if ! overlimit; then exit @@ -51,25 +50,26 @@ closedindices() { while overlimit; do - # We need to determine OLDEST_OPEN_INDEX: + # We need to determine the oldest open index. # First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'. # Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices. # Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) - #OLDEST_OPEN_INDEX=$(so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3 | head -1) - - for OPEN_INDEX in ${OPEN_INDICES}; do - # Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it. - # First, we need to check if the index is assigned as the current write index for a data stream + # We iterate through the open indices + for OPEN_INDEX in ${OPEN_INDICES}; do + # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream # To do so, we need to identify to which data stream this index is associated + # We extract the data stream name using the pattern below DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN") - CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) - if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then - # This should not be a write index, so we should be allowed to delete it - /usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE - # Finally, write a log entry that says we deleted it. - echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} + # We look up the data stream, and determine the write index + CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) + # We make sure we are not trying to delete a write index + if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then + # This should not be a write index, so we should be allowed to delete it + /usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE + # Finally, write a log entry that says we deleted it. + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} fi if ! overlimit; then exit From d494381e9d99273f5cd1149c3271f2818cac3ec6 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 14:18:49 +0000 Subject: [PATCH 20/27] Update verbiage for so-curator-cluster-delete --- .../files/bin/so-curator-cluster-delete | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/salt/curator/files/bin/so-curator-cluster-delete b/salt/curator/files/bin/so-curator-cluster-delete index e99e88659..0f7945b78 100755 --- a/salt/curator/files/bin/so-curator-cluster-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete @@ -4,25 +4,6 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. - -#. /usr/sbin/so-elastic-common -#. /etc/nsm/securityonion.conf - -# If logrotate script doesn't already exist, create it -#FILE="/etc/logrotate.d/so-curator-cluster-delete" -#if ! [ -f ${FILE} ]; then -# cat << EOF > ${FILE} -#/var/log/nsm/so-curator-cluster-delete.log { -# daily -# rotate 7 -# copytruncate -# compress -# missingok -# notifempty -#} -#EOF -#fi - # Avoid starting multiple instances APP=clusterdelete lf=/tmp/$APP-pidLockFile From 9411f5ca79f760f77aa43eb320eb68505477bb99 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 28 Mar 2023 10:54:21 -0400 Subject: [PATCH 21/27] Fix closed index function and check --- .../bin/so-curator-cluster-delete-delete | 95 ++++++++++--------- 1 file changed, 51 insertions(+), 44 deletions(-) diff --git a/salt/curator/files/bin/so-curator-cluster-delete-delete b/salt/curator/files/bin/so-curator-cluster-delete-delete index 2818dd506..4f136274e 100755 --- a/salt/curator/files/bin/so-curator-cluster-delete-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete-delete @@ -9,67 +9,47 @@ {%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%} {%- set RETENTION = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) -%} +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + LOG="/opt/so/log/curator/so-curator-cluster-delete.log" LOG_SIZE_LIMIT=$(/usr/sbin/so-elasticsearch-cluster-space-total {{ RETENTION.retention_pct}}) overlimit() { - [[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "${LOG_SIZE_LIMIT}" ]] + [[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "${LOG_SIZE_LIMIT}" ]] } closedindices() { - # If we can't query Elasticsearch, then immediately return false. /usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close > /dev/null 2>&1 [ $? -eq 1 ] && return false - # We need to determine the oldest closed index. - # First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'. + # First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'. # Next, filter out any so-case indices. # Finally, use grep's -q option to return true if there are any remaining logstash-, so-, or .ds-logs- indices. - CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -q -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) - # We iterate through the closed indices - for CLOSED_INDEX in ${CLOSED_INDICES}; do - # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream - # To do so, we need to identify to which data stream this index is associated - # We extract the data stream name using the pattern below - DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" - DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN") - # We look up the data stream, and determine the write index - CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) - # We make sure we are not trying to delete a write index - if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then - # This should not be a write index, so we should be allowed to delete it - /usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE - # Finally, write a log entry that says we deleted it. - echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG} - fi - if ! overlimit; then - exit - fi - done + /usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -q -E "(logstash-|so-|.ds-logs-)" } -while overlimit; do +# Check for 2 conditions: +# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT? +# 2. Are there any closed indices that we can delete? +# If both conditions are true, keep on looping until one of the conditions is false. - # We need to determine the oldest open index. - # First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'. - # Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices. - # Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. - OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) - # We iterate through the open indices - for OPEN_INDEX in ${OPEN_INDICES}; do - # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream +while overlimit && closedindices; do + CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) + for CLOSED_INDEX in ${CLOSED_INDICES}; do + # Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it. + # First, we need to check if the index is assigned as the current write index for a data stream # To do so, we need to identify to which data stream this index is associated - # We extract the data stream name using the pattern below DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" - DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN") - # We look up the data stream, and determine the write index - CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) - # We make sure we are not trying to delete a write index - if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then - # This should not be a write index, so we should be allowed to delete it - /usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE - # Finally, write a log entry that says we deleted it. - echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} + DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN") + CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) + if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then + # This should not be a write index, so we should be allowed to delete it + /usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE + # Finally, write a log entry that says we deleted it. + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG} fi if ! overlimit; then exit @@ -77,3 +57,30 @@ while overlimit; do done done +while overlimit; do + + # We need to determine OLDEST_OPEN_INDEX: + # First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'. + # Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices. + # Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. + OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) + #OLDEST_OPEN_INDEX=$(so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3 | head -1) + + for OPEN_INDEX in ${OPEN_INDICES}; do + # Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it. + # First, we need to check if the index is assigned as the current write index for a data stream + # To do so, we need to identify to which data stream this index is associated + DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" + DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN") + CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) + if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then + # This should not be a write index, so we should be allowed to delete it + /usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE + # Finally, write a log entry that says we deleted it. + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} + fi + if ! overlimit; then + exit + fi + done +done From 303fec6302b26370e8bb5c29d645f740e26e3dbb Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 28 Mar 2023 10:59:39 -0400 Subject: [PATCH 22/27] Fix verbiage for so-curator-cluster-delete-delete --- .../bin/so-curator-cluster-delete-delete | 40 ++++++++++--------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/salt/curator/files/bin/so-curator-cluster-delete-delete b/salt/curator/files/bin/so-curator-cluster-delete-delete index 4f136274e..7d67a76ff 100755 --- a/salt/curator/files/bin/so-curator-cluster-delete-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete-delete @@ -38,18 +38,21 @@ closedindices() { while overlimit && closedindices; do CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) - for CLOSED_INDEX in ${CLOSED_INDICES}; do - # Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it. - # First, we need to check if the index is assigned as the current write index for a data stream + # We iterate through the closed indices + for CLOSED_INDEX in ${CLOSED_INDICES}; do + # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream # To do so, we need to identify to which data stream this index is associated + # We extract the data stream name using the pattern below DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN") + # We look up the data stream, and determine the write index CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) + # We make sure we are not trying to delete a write index if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then - # This should not be a write index, so we should be allowed to delete it - /usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE - # Finally, write a log entry that says we deleted it. - echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG} + # This should not be a write index, so we should be allowed to delete it + /usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE + # Finally, write a log entry that says we deleted it. + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG} fi if ! overlimit; then exit @@ -59,25 +62,26 @@ done while overlimit; do - # We need to determine OLDEST_OPEN_INDEX: + # We need to determine the oldest open index. # First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'. - # Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices. - # Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. + # Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices. + # Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field. OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3) - #OLDEST_OPEN_INDEX=$(so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3 | head -1) - + # We iterate through the open indices for OPEN_INDEX in ${OPEN_INDICES}; do - # Now that we've determined OLDEST_OPEN_INDEX, ask Elasticsearch to delete it. - # First, we need to check if the index is assigned as the current write index for a data stream + # Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream # To do so, we need to identify to which data stream this index is associated + # We extract the data stream name using the pattern below DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+" DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN") + # We look up the data stream, and determine the write index CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name) + # We make sure we are not trying to delete a write index if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then - # This should not be a write index, so we should be allowed to delete it - /usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE - # Finally, write a log entry that says we deleted it. - echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} + # This should not be a write index, so we should be allowed to delete it + /usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE + # Finally, write a log entry that says we deleted it. + echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG} fi if ! overlimit; then exit From 942182e826f7d14ecb003fd3a864388b4e7429dc Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 28 Mar 2023 11:00:14 -0400 Subject: [PATCH 23/27] Remove additional copyright in so-curator-cluster-delete-delete --- salt/curator/files/bin/so-curator-cluster-delete-delete | 5 ----- 1 file changed, 5 deletions(-) diff --git a/salt/curator/files/bin/so-curator-cluster-delete-delete b/salt/curator/files/bin/so-curator-cluster-delete-delete index 7d67a76ff..fe41ca1f0 100755 --- a/salt/curator/files/bin/so-curator-cluster-delete-delete +++ b/salt/curator/files/bin/so-curator-cluster-delete-delete @@ -9,11 +9,6 @@ {%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%} {%- set RETENTION = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) -%} -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - LOG="/opt/so/log/curator/so-curator-cluster-delete.log" LOG_SIZE_LIMIT=$(/usr/sbin/so-elasticsearch-cluster-space-total {{ RETENTION.retention_pct}}) From e77e645a361f3f15cb9a9e2e504a565fbbd994fd Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 28 Mar 2023 11:45:57 -0400 Subject: [PATCH 24/27] Update so-elasticsearch-cluster-space-total --- salt/common/tools/sbin/so-elasticsearch-cluster-space-total | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-total b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total index f7bd151ad..0e10b026c 100755 --- a/salt/common/tools/sbin/so-elasticsearch-cluster-space-total +++ b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total @@ -7,6 +7,8 @@ . /usr/sbin/so-common +{% from 'vars/globals.map.jinja' import GLOBALS %} + TOTAL_AVAILABLE_SPACE=0 # Wait for ElasticSearch to initialize @@ -37,7 +39,11 @@ else fi # Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space +{% if GLOBALS.role == 'so-manager' %} +for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBLAS.manager }} | awk '{print $5}'); do +{% else %} for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do +{% endif %} size=$(echo $i | grep -oE '[0-9]+') unit=$(echo $i | grep -oE '[A-Za-z]+') if [ $unit = "tb" ]; then From 74840264d74929307c65fe3372c94c8cb54cf989 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 28 Mar 2023 11:49:05 -0400 Subject: [PATCH 25/27] Update so-elasticsearch-cluster-space-used --- salt/common/tools/sbin/so-elasticsearch-cluster-space-used | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-used b/salt/common/tools/sbin/so-elasticsearch-cluster-space-used index dbc35440d..b8ac4f6e6 100755 --- a/salt/common/tools/sbin/so-elasticsearch-cluster-space-used +++ b/salt/common/tools/sbin/so-elasticsearch-cluster-space-used @@ -6,11 +6,16 @@ # Elastic License 2.0. . /usr/sbin/so-common +{% from 'vars/globals.map.jinja' import GLOBALS %} TOTAL_AVAILABLE_SPACE=0 # Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space +{% if GLOBALS.role == 'so-manager' %} +for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $3}'); do +{% else %} for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $3}'); do +{% endif %} size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}') unit=$(echo $i | grep -oE '[A-Za-z]+') if [ $unit = "tb" ]; then From ed8f94463818ce99acf57710bcef16b7191085e7 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 15:55:33 +0000 Subject: [PATCH 26/27] Fix typo in GLOBALS reference --- salt/common/tools/sbin/so-elasticsearch-cluster-space-total | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-total b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total index 0e10b026c..57360d505 100755 --- a/salt/common/tools/sbin/so-elasticsearch-cluster-space-total +++ b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total @@ -40,7 +40,7 @@ fi # Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space {% if GLOBALS.role == 'so-manager' %} -for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBLAS.manager }} | awk '{print $5}'); do +for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS.manager }} | awk '{print $5}'); do {% else %} for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do {% endif %} From 6099a04e414d521fba6134c012ed9d7e7a8e6e28 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 28 Mar 2023 16:04:54 +0000 Subject: [PATCH 27/27] Change how the size is determined, in case there a decimal value is provided --- salt/common/tools/sbin/so-elasticsearch-cluster-space-total | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-elasticsearch-cluster-space-total b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total index 57360d505..3faa2a7a9 100755 --- a/salt/common/tools/sbin/so-elasticsearch-cluster-space-total +++ b/salt/common/tools/sbin/so-elasticsearch-cluster-space-total @@ -44,7 +44,7 @@ for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v {{ GLOBALS {% else %} for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do {% endif %} - size=$(echo $i | grep -oE '[0-9]+') + size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}') unit=$(echo $i | grep -oE '[A-Za-z]+') if [ $unit = "tb" ]; then size=$(( size * 1024 ))