diff --git a/HOTFIX b/HOTFIX index e87aa6dbd..e69de29bb 100644 --- a/HOTFIX +++ b/HOTFIX @@ -1 +0,0 @@ -CURATOR GRAFANA_DASH_ALLOW WAZUH diff --git a/README.md b/README.md index b64c72a34..580590829 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -## Security Onion 2.3.70 +## Security Onion 2.3.80 -Security Onion 2.3.70 is here! +Security Onion 2.3.80 is here! ## Screenshots diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index f6cc31508..1a8c028f3 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -1,18 +1,18 @@ -### 2.3.70-WAZUH ISO image built on 2021/08/30 +### 2.3.80 ISO image built on 2021/09/27 ### Download and Verify -2.3.70-WAZUH ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.3.70-WAZUH.iso +2.3.80 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.3.80.iso -MD5: CEDEF3C38089896C252F9E3C75F7CB15 -SHA1: FB420115C72DABDEB87C8B27F26E862C94628057 -SHA256: CC3E75A97163E9CD255DA0D9C3EB11922FA045651827F291025398943C1BC230 +MD5: 24F38563860416F4A8ABE18746913E14 +SHA1: F923C005F54EA2A17AB225ADA0DA46042707AAD9 +SHA256: 8E95D10AF664D9A406C168EC421D943CB23F0D0C1813C6C2DBA9B4E131984018 Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.70-WAZUH.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.80.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS @@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.70-WAZUH.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.80.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.3.70-WAZUH.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.3.80.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.3.70-WAZUH.iso.sig securityonion-2.3.70-WAZUH.iso +gpg --verify securityonion-2.3.80.iso.sig securityonion-2.3.80.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Mon 30 Aug 2021 06:13:14 PM EDT using RSA key ID FE507013 +gpg: Signature made Mon 27 Sep 2021 08:55:01 AM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/VERSION b/VERSION index e183d6a6c..d5c0d128d 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.3.70 +2.3.80 diff --git a/pillar/elasticsearch/eval.sls b/pillar/elasticsearch/eval.sls index 2dbb08f59..84ff89a23 100644 --- a/pillar/elasticsearch/eval.sls +++ b/pillar/elasticsearch/eval.sls @@ -1,7 +1,7 @@ elasticsearch: templates: - so/so-beats-template.json.jinja - - so/so-common-template.json + - so/so-common-template.json.jinja - so/so-firewall-template.json.jinja - so/so-flow-template.json.jinja - so/so-ids-template.json.jinja @@ -10,4 +10,4 @@ elasticsearch: - so/so-ossec-template.json.jinja - so/so-strelka-template.json.jinja - so/so-syslog-template.json.jinja - - so/so-zeek-template.json.jinja \ No newline at end of file + - so/so-zeek-template.json.jinja diff --git a/pillar/elasticsearch/manager.sls b/pillar/elasticsearch/manager.sls index 9ff97de5b..84ff89a23 100644 --- a/pillar/elasticsearch/manager.sls +++ b/pillar/elasticsearch/manager.sls @@ -1,7 +1,7 @@ elasticsearch: templates: - so/so-beats-template.json.jinja - - so/so-common-template.json + - so/so-common-template.json.jinja - so/so-firewall-template.json.jinja - so/so-flow-template.json.jinja - so/so-ids-template.json.jinja diff --git a/pillar/elasticsearch/search.sls b/pillar/elasticsearch/search.sls index 9ff97de5b..84ff89a23 100644 --- a/pillar/elasticsearch/search.sls +++ b/pillar/elasticsearch/search.sls @@ -1,7 +1,7 @@ elasticsearch: templates: - so/so-beats-template.json.jinja - - so/so-common-template.json + - so/so-common-template.json.jinja - so/so-firewall-template.json.jinja - so/so-flow-template.json.jinja - so/so-ids-template.json.jinja diff --git a/salt/common/tools/sbin/so-checkin b/salt/common/tools/sbin/so-checkin index c70701b71..4f0583906 100755 --- a/salt/common/tools/sbin/so-checkin +++ b/salt/common/tools/sbin/so-checkin @@ -17,4 +17,4 @@ . /usr/sbin/so-common -salt-call state.highstate -linfo +salt-call state.highstate -l info diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 7ad74ad49..fe97c9b27 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -99,6 +99,15 @@ check_password() { return $? } +check_password_and_exit() { + local password=$1 + if ! check_password "$password"; then + echo "Password is invalid. Do not include single quotes, double quotes, dollar signs, and backslashes in the password." + exit 2 + fi + return 0 +} + check_elastic_license() { [ -n "$TESTING" ] && return diff --git a/salt/common/tools/sbin/so-elasticsearch-roles-load b/salt/common/tools/sbin/so-elasticsearch-roles-load new file mode 100644 index 000000000..d094163ee --- /dev/null +++ b/salt/common/tools/sbin/so-elasticsearch-roles-load @@ -0,0 +1,57 @@ +#!/bin/bash +# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +{%- set mainint = salt['pillar.get']('host:mainint') %} +{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %} + +default_conf_dir=/opt/so/conf +ELASTICSEARCH_HOST="{{ MYIP }}" +ELASTICSEARCH_PORT=9200 + +# Define a default directory to load roles from +ELASTICSEARCH_ROLES="$default_conf_dir/elasticsearch/roles/" + +# Wait for ElasticSearch to initialize +echo -n "Waiting for ElasticSearch..." +COUNT=0 +ELASTICSEARCH_CONNECTED="no" +while [[ "$COUNT" -le 240 ]]; do + {{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" + if [ $? -eq 0 ]; then + ELASTICSEARCH_CONNECTED="yes" + echo "connected!" + break + else + ((COUNT+=1)) + sleep 1 + echo -n "." + fi +done +if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then + echo + echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" + echo +fi + +cd ${ELASTICSEARCH_ROLES} + +echo "Loading templates..." +for role in *; do + name=$(echo "$role" | cut -d. -f1) + so-elasticsearch-query _security/role/$name -XPUT -d @"$role" +done + +cd - >/dev/null diff --git a/salt/common/tools/sbin/so-fleet-user-add b/salt/common/tools/sbin/so-fleet-user-add index e905424a9..8ce7325c3 100755 --- a/salt/common/tools/sbin/so-fleet-user-add +++ b/salt/common/tools/sbin/so-fleet-user-add @@ -41,10 +41,7 @@ if [[ $? == 0 ]]; then fi read -rs FLEET_PASS -if ! check_password "$FLEET_PASS"; then - echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password." - exit 2 -fi +check_password_and_exit "$FLEET_PASS" FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1) if [[ $? -ne 0 ]]; then diff --git a/salt/common/tools/sbin/so-fleet-user-update b/salt/common/tools/sbin/so-fleet-user-update index e6a142d1d..793f7b622 100755 --- a/salt/common/tools/sbin/so-fleet-user-update +++ b/salt/common/tools/sbin/so-fleet-user-update @@ -52,7 +52,7 @@ fi read -rs FLEET_PASS if ! check_password "$FLEET_PASS"; then - echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password." + echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password." exit 2 fi diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 9b6e2174a..0b0f89698 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -17,6 +17,7 @@ # NOTE: This script depends on so-common IMAGEREPO=security-onion-solutions +STATUS_CONF='/opt/so/conf/so-status/so-status.conf' # shellcheck disable=SC2120 container_list() { @@ -137,6 +138,11 @@ update_docker_containers() { if [[ $result -eq 0 ]]; then cat $SIGNPATH/KEYS | gpg --import - >> "$LOG_FILE" 2>&1 fi + + # If downloading for soup, check if any optional images need to be pulled + if [[ $CURLTYPE == 'soup' ]]; then + grep -q "so-logscan" "$STATUS_CONF" && TRUSTED_CONTAINERS+=("so-logscan") + fi # Download the containers from the interwebs for i in "${TRUSTED_CONTAINERS[@]}" diff --git a/salt/common/tools/sbin/so-import-evtx b/salt/common/tools/sbin/so-import-evtx new file mode 100644 index 000000000..9e640beaa --- /dev/null +++ b/salt/common/tools/sbin/so-import-evtx @@ -0,0 +1,172 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +{%- set MANAGER = salt['grains.get']('master') %} +{%- set VERSION = salt['pillar.get']('global:soversion') %} +{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} +{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%} +{%- set URLBASE = salt['pillar.get']('global:url_base') %} +{% set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %} +{% set ES_PW = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} + +INDEX_DATE=$(date +'%Y.%m.%d') +RUNID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1) + +. /usr/sbin/so-common + +function usage { + cat << EOF +Usage: $0 [evtx-file-2] [evtx-file-*] + +Imports one or more evtx files into Security Onion. The evtx files will be analyzed and made available for review in the Security Onion toolset. +EOF +} + + +function evtx2es() { + EVTX=$1 + HASH=$2 + + docker run --rm \ + -v "$EVTX:/tmp/$RUNID.evtx" \ + --entrypoint evtx2es \ + {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} \ + --host {{ MANAGERIP }} --scheme https \ + --index so-beats-$INDEX_DATE --pipeline import.wel \ + --login {{ES_USER}} --pwd {{ES_PW}} \ + "/tmp/$RUNID.evtx" 1>/dev/null 2>/dev/null + + docker run --rm \ + -v "$EVTX:/tmp/import.evtx" \ + -v "/nsm/import/evtx-end_newest:/tmp/newest" \ + -v "/nsm/import/evtx-start_oldest:/tmp/oldest" \ + --entrypoint '/evtx_calc_timestamps.sh' \ + {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} +} + +# if no parameters supplied, display usage +if [ $# -eq 0 ]; then + usage + exit 1 +fi + +# ensure this is a Manager node +require_manager + +# verify that all parameters are files +for i in "$@"; do + if ! [ -f "$i" ]; then + usage + echo "\"$i\" is not a valid file!" + exit 2 + fi +done + +# track if we have any valid or invalid evtx +INVALID_EVTXS="no" +VALID_EVTXS="no" + +# track oldest start and newest end so that we can generate the Kibana search hyperlink at the end +START_OLDEST="2050-12-31" +END_NEWEST="1971-01-01" + +touch /nsm/import/evtx-start_oldest +touch /nsm/import/evtx-end_newest + +echo $START_OLDEST > /nsm/import/evtx-start_oldest +echo $END_NEWEST > /nsm/import/evtx-end_newest + +# paths must be quoted in case they include spaces +for EVTX in "$@"; do + EVTX=$(/usr/bin/realpath "$EVTX") + echo "Processing Import: ${EVTX}" + + # generate a unique hash to assist with dedupe checks + HASH=$(md5sum "${EVTX}" | awk '{ print $1 }') + HASH_DIR=/nsm/import/${HASH} + echo "- assigning unique identifier to import: $HASH" + + if [ -d $HASH_DIR ]; then + echo "- this EVTX has already been imported; skipping" + INVALID_EVTXS="yes" + else + VALID_EVTXS="yes" + + EVTX_DIR=$HASH_DIR/evtx + mkdir -p $EVTX_DIR + + # import evtx and write them to import ingest pipeline + echo "- importing logs to Elasticsearch..." + evtx2es "${EVTX}" $HASH + + # compare $START to $START_OLDEST + START=$(cat /nsm/import/evtx-start_oldest) + START_COMPARE=$(date -d $START +%s) + START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s) + if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then + START_OLDEST=$START + fi + + # compare $ENDNEXT to $END_NEWEST + END=$(cat /nsm/import/evtx-end_newest) + ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"` + ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s) + END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s) + if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then + END_NEWEST=$ENDNEXT + fi + + cp -f "${EVTX}" "${EVTX_DIR}"/data.evtx + chmod 644 "${EVTX_DIR}"/data.evtx + + fi # end of valid evtx + + echo + +done # end of for-loop processing evtx files + +# remove temp files +echo "Cleaning up:" +for TEMP_EVTX in ${TEMP_EVTXS[@]}; do + echo "- removing temporary evtx $TEMP_EVTX" + rm -f $TEMP_EVTX +done + +# output final messages +if [ "$INVALID_EVTXS" = "yes" ]; then + echo + echo "Please note! One or more evtx was invalid! You can scroll up to see which ones were invalid." +fi + +START_OLDEST_FORMATTED=`date +%Y-%m-%d --date="$START_OLDEST"` +START_OLDEST_SLASH=$(echo $START_OLDEST_FORMATTED | sed -e 's/-/%2F/g') +END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g') + +if [ "$VALID_EVTXS" = "yes" ]; then +cat << EOF + +Import complete! + +You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser: +https://{{ URLBASE }}/#/hunt?q=import.id:${RUNID}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC + +or you can manually set your Time Range to be (in UTC): +From: $START_OLDEST_FORMATTED To: $END_NEWEST + +Please note that it may take 30 seconds or more for events to appear in Hunt. +EOF +fi diff --git a/salt/common/tools/sbin/so-playbook-import b/salt/common/tools/sbin/so-playbook-import new file mode 100644 index 000000000..107851278 --- /dev/null +++ b/salt/common/tools/sbin/so-playbook-import @@ -0,0 +1,22 @@ +#!/bin/bash + +# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /usr/sbin/so-common + +ENABLEPLAY=${1:-False} + +docker exec so-soctopus /usr/local/bin/python -c "import playbook; print(playbook.play_import($ENABLEPLAY))" diff --git a/salt/common/tools/sbin/so-raid-status b/salt/common/tools/sbin/so-raid-status index e817554d3..7ba89b1c8 100755 --- a/salt/common/tools/sbin/so-raid-status +++ b/salt/common/tools/sbin/so-raid-status @@ -20,6 +20,9 @@ appliance_check() { {%- if salt['grains.get']('sosmodel', '') %} APPLIANCE=1 + {%- if grains['sosmodel'] in ['SO2AMI01', 'SO2GCI01', 'SO2AZI01'] %} + exit 0 + {%- endif %} DUDEYOUGOTADELL=$(dmidecode |grep Dell) if [[ -n $DUDEYOUGOTADELL ]]; then APPTYPE=dell @@ -90,7 +93,7 @@ check_software_raid {%- endif %} if [[ -n $SWRAID ]]; then - if [[ $SWRAID == '0' && BOSSRAID == '0' ]]; then + if [[ $SWRAID == '0' && $BOSSRAID == '0' ]]; then RAIDSTATUS=0 else RAIDSTATUS=1 diff --git a/salt/common/tools/sbin/so-rule-update b/salt/common/tools/sbin/so-rule-update index 397719d61..a3c2616a4 100755 --- a/salt/common/tools/sbin/so-rule-update +++ b/salt/common/tools/sbin/so-rule-update @@ -1,13 +1,10 @@ #!/bin/bash -got_root() { - # Make sure you are root - if [ "$(id -u)" -ne 0 ]; then - echo "This script must be run using sudo!" - exit 1 - fi +. /usr/sbin/so-common -} +argstr="" +for arg in "$@"; do + argstr="${argstr} \"${arg}\"" +done -got_root -docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat $1" +docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}" diff --git a/salt/common/tools/sbin/so-thehive-user-add b/salt/common/tools/sbin/so-thehive-user-add index 5d174fdf1..9cbe0cd56 100755 --- a/salt/common/tools/sbin/so-thehive-user-add +++ b/salt/common/tools/sbin/so-thehive-user-add @@ -41,10 +41,7 @@ if [[ $? == 0 ]]; then fi read -rs THEHIVE_PASS -if ! check_password "$THEHIVE_PASS"; then - echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password." - exit 2 -fi +check_password_and_exit "$THEHIVE_PASS" # Create new user in TheHive resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}") diff --git a/salt/common/tools/sbin/so-thehive-user-update b/salt/common/tools/sbin/so-thehive-user-update index 6df199f6a..fdda5eaa7 100755 --- a/salt/common/tools/sbin/so-thehive-user-update +++ b/salt/common/tools/sbin/so-thehive-user-update @@ -42,7 +42,7 @@ fi read -rs THEHIVE_PASS if ! check_password "$THEHIVE_PASS"; then - echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password." + echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password." exit 2 fi diff --git a/salt/common/tools/sbin/so-user b/salt/common/tools/sbin/so-user index 742c3ca5d..f7604d298 100755 --- a/salt/common/tools/sbin/so-user +++ b/salt/common/tools/sbin/so-user @@ -18,11 +18,17 @@ source $(dirname $0)/so-common -if [[ $# -lt 1 || $# -gt 2 ]]; then - echo "Usage: $0 [email]" +DEFAULT_ROLE=analyst + +if [[ $# -lt 1 || $# -gt 3 ]]; then + echo "Usage: $0 [email] [role]" + echo "" + echo " where is one of the following:" echo "" echo " list: Lists all user email addresses currently defined in the identity system" - echo " add: Adds a new user to the identity system; requires 'email' parameter" + echo " add: Adds a new user to the identity system; requires 'email' parameter, while 'role' parameter is optional and defaults to $DEFAULT_ROLE" + echo " addrole: Grants a role to an existing user; requires 'email' and 'role' parameters" + echo " delrole: Removes a role from an existing user; requires 'email' and 'role' parameters" echo " update: Updates a user's password; requires 'email' parameter" echo " enable: Enables a user; requires 'email' parameter" echo " disable: Disables a user; requires 'email' parameter" @@ -36,14 +42,18 @@ fi operation=$1 email=$2 +role=$3 kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434} databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite} bcryptRounds=${BCRYPT_ROUNDS:-12} elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users} elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles} +socRolesFile=${SOC_ROLES_FILE:-/opt/so/conf/soc/soc_users_roles} esUID=${ELASTIC_UID:-930} esGID=${ELASTIC_GID:-930} +soUID=${SOCORE_UID:-939} +soGID=${SOCORE_GID:-939} function lock() { # Obtain file descriptor lock @@ -80,7 +90,7 @@ function findIdByEmail() { email=$1 response=$(curl -Ss -L ${kratosUrl}/identities) - identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id") + identityId=$(echo "${response}" | jq -r ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id") echo $identityId } @@ -89,17 +99,20 @@ function validatePassword() { len=$(expr length "$password") if [[ $len -lt 6 ]]; then - echo "Password does not meet the minimum requirements" - exit 2 + fail "Password does not meet the minimum requirements" fi + check_password_and_exit "$password" } function validateEmail() { email=$1 # (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\]) if [[ ! "$email" =~ ^[[:alnum:]._%+-]+@[[:alnum:].-]+\.[[:alpha:]]{2,}$ ]]; then - echo "Email address is invalid" - exit 3 + fail "Email address is invalid" + fi + + if [[ "$email" =~ [A-Z] ]]; then + fail "Email addresses cannot contain uppercase letters" fi } @@ -127,21 +140,47 @@ function updatePassword() { validatePassword "$password" fi - if [[ -n $identityId ]]; then + if [[ -n "$identityId" ]]; then # Generate password hash passwordHash=$(hashPassword "$password") # Update DB with new hash - echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath" + echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath" [[ $? != 0 ]] && fail "Unable to update password" fi } -function createElasticFile() { +function createFile() { filename=$1 - tmpFile=${filename} - truncate -s 0 "$tmpFile" - chmod 600 "$tmpFile" - chown "${esUID}:${esGID}" "$tmpFile" + uid=$2 + gid=$3 + + mkdir -p $(dirname "$filename") + truncate -s 0 "$filename" + chmod 600 "$filename" + chown "${uid}:${gid}" "$filename" +} + +function ensureRoleFileExists() { + if [[ ! -f "$socRolesFile" || ! -s "$socRolesFile" ]]; then + # Generate the new users file + rolesTmpFile="${socRolesFile}.tmp" + createFile "$rolesTmpFile" "$soUID" "$soGID" + + if [[ -f "$databasePath" ]]; then + echo "Migrating roles to new file: $socRolesFile" + + echo "select 'superuser:' || id from identities;" | sqlite3 "$databasePath" \ + >> "$rolesTmpFile" + [[ $? != 0 ]] && fail "Unable to read identities from database" + + echo "The following users have all been migrated with the super user role:" + cat "${rolesTmpFile}" + else + echo "Database file does not exist yet, installation is likely not yet complete." + fi + + mv "${rolesTmpFile}" "${socRolesFile}" + fi } function syncElasticSystemUser() { @@ -172,33 +211,31 @@ function syncElasticSystemRole() { } function syncElastic() { - echo "Syncing users between SOC and Elastic..." + echo "Syncing users and roles between SOC and Elastic..." + usersTmpFile="${elasticUsersFile}.tmp" + createFile "${usersTmpFile}" "$esUID" "$esGID" rolesTmpFile="${elasticRolesFile}.tmp" - createElasticFile "${usersTmpFile}" - createElasticFile "${rolesTmpFile}" + createFile "${rolesTmpFile}" "$esUID" "$esGID" authPillarJson=$(lookup_salt_value "auth" "elasticsearch" "pillar" "json") syncElasticSystemUser "$authPillarJson" "so_elastic_user" "$usersTmpFile" - syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile" - syncElasticSystemUser "$authPillarJson" "so_kibana_user" "$usersTmpFile" - syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile" - syncElasticSystemUser "$authPillarJson" "so_logstash_user" "$usersTmpFile" - syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile" - syncElasticSystemUser "$authPillarJson" "so_beats_user" "$usersTmpFile" - syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile" - syncElasticSystemUser "$authPillarJson" "so_monitor_user" "$usersTmpFile" + + syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile" + syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile" + syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile" + syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile" syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_collector" "$rolesTmpFile" syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_agent" "$rolesTmpFile" syncElasticSystemRole "$authPillarJson" "so_monitor_user" "monitoring_user" "$rolesTmpFile" - if [[ -f "$databasePath" ]]; then - # Generate the new users file + if [[ -f "$databasePath" && -f "$socRolesFile" ]]; then + # Append the SOC users echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \ "from identity_credential_identifiers ici, identity_credentials ic " \ "where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \ @@ -208,17 +245,18 @@ function syncElastic() { >> "$usersTmpFile" [[ $? != 0 ]] && fail "Unable to read credential hashes from database" - # Generate the new users_roles file - - echo "select 'superuser:' || ici.identifier " \ - "from identity_credential_identifiers ici, identity_credentials ic " \ - "where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \ - "order by ici.identifier;" | \ - sqlite3 "$databasePath" \ - >> "$rolesTmpFile" - [[ $? != 0 ]] && fail "Unable to read credential IDs from database" + # Append the user roles + while IFS="" read -r rolePair || [ -n "$rolePair" ]; do + userId=$(echo "$rolePair" | cut -d: -f2) + role=$(echo "$rolePair" | cut -d: -f1) + echo "select '$role:' || ici.identifier " \ + "from identity_credential_identifiers ici, identity_credentials ic " \ + "where ici.identity_credential_id=ic.id and ic.identity_id = '$userId';" | \ + sqlite3 "$databasePath" >> "$rolesTmpFile" + done < "$socRolesFile" + else - echo "Database file does not exist yet, skipping users export" + echo "Database file or soc roles file does not exist yet, skipping users export" fi if [[ -s "${usersTmpFile}" ]]; then @@ -236,15 +274,22 @@ function syncElastic() { } function syncAll() { + ensureRoleFileExists + + # Check if a sync is needed. Sync is not needed if the following are true: + # - user database entries are all older than the elastic users file + # - soc roles file last modify date is older than the elastic roles file if [[ -z "$FORCE_SYNC" && -f "$databasePath" && -f "$elasticUsersFile" ]]; then usersFileAgeSecs=$(echo $(($(date +%s) - $(date +%s -r "$elasticUsersFile")))) staleCount=$(echo "select count(*) from identity_credentials where updated_at >= Datetime('now', '-${usersFileAgeSecs} seconds');" \ | sqlite3 "$databasePath") - if [[ "$staleCount" == "0" ]]; then + if [[ "$staleCount" == "0" && "$elasticRolesFile" -nt "$socRolesFile" ]]; then return 1 fi fi + syncElastic + return 0 } @@ -252,11 +297,64 @@ function listUsers() { response=$(curl -Ss -L ${kratosUrl}/identities) [[ $? != 0 ]] && fail "Unable to communicate with Kratos" - echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort + users=$(echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort) + for user in $users; do + roles=$(grep "$user" "$elasticRolesFile" | cut -d: -f1 | tr '\n' ' ') + echo "$user: $roles" + done +} + +function addUserRole() { + email=$1 + role=$2 + + adjustUserRole "$email" "$role" "add" +} + +function deleteUserRole() { + email=$1 + role=$2 + + adjustUserRole "$email" "$role" "del" +} + +function adjustUserRole() { + email=$1 + role=$2 + op=$3 + + identityId=$(findIdByEmail "$email") + [[ ${identityId} == "" ]] && fail "User not found" + + ensureRoleFileExists + + filename="$socRolesFile" + hasRole=0 + grep "$role:" "$socRolesFile" | grep -q "$identityId" && hasRole=1 + if [[ "$op" == "add" ]]; then + if [[ "$hasRole" == "1" ]]; then + echo "User '$email' already has the role: $role" + return 1 + else + echo "$role:$identityId" >> "$filename" + fi + elif [[ "$op" == "del" ]]; then + if [[ "$hasRole" -ne 1 ]]; then + fail "User '$email' does not have the role: $role" + else + sed "/^$role:$identityId\$/d" "$filename" > "$filename.tmp" + cat "$filename".tmp > "$filename" + rm -f "$filename".tmp + fi + else + fail "Unsupported role adjustment operation: $op" + fi + return 0 } function createUser() { email=$1 + role=$2 now=$(date -u +%FT%TZ) addUserJson=$(cat < "$rolesTmpFile" + mv "$rolesTmpFile" "$socRolesFile" } case "${operation}" in @@ -339,7 +443,7 @@ case "${operation}" in lock validateEmail "$email" updatePassword - createUser "$email" + createUser "$email" "${role:-$DEFAULT_ROLE}" syncAll echo "Successfully added new user to SOC" check_container thehive && echo "$password" | so-thehive-user-add "$email" @@ -351,6 +455,31 @@ case "${operation}" in listUsers ;; + "addrole") + verifyEnvironment + [[ "$email" == "" ]] && fail "Email address must be provided" + [[ "$role" == "" ]] && fail "Role must be provided" + + lock + validateEmail "$email" + if addUserRole "$email" "$role"; then + syncElastic + echo "Successfully added role to user" + fi + ;; + + "delrole") + verifyEnvironment + [[ "$email" == "" ]] && fail "Email address must be provided" + [[ "$role" == "" ]] && fail "Role must be provided" + + lock + validateEmail "$email" + deleteUserRole "$email" "$role" + syncElastic + echo "Successfully removed role from user" + ;; + "update") verifyEnvironment [[ "$email" == "" ]] && fail "Email address must be provided" diff --git a/salt/common/tools/sbin/so-yara-update b/salt/common/tools/sbin/so-yara-update index 9f749727f..025cf8c70 100755 --- a/salt/common/tools/sbin/so-yara-update +++ b/salt/common/tools/sbin/so-yara-update @@ -1,5 +1,4 @@ #!/bin/bash - # Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC # # This program is free software: you can redistribute it and/or modify @@ -20,13 +19,8 @@ echo "Starting to check for yara rule updates at $(date)..." output_dir="/opt/so/saltstack/default/salt/strelka/rules" mkdir -p $output_dir - repos="$output_dir/repos.txt" -ignorefile="$output_dir/ignore.txt" - -deletecounter=0 newcounter=0 -updatecounter=0 {% if ISAIRGAP is sameas true %} @@ -35,58 +29,21 @@ echo "Airgap mode enabled." clone_dir="/nsm/repo/rules/strelka" repo_name="signature-base" mkdir -p /opt/so/saltstack/default/salt/strelka/rules/signature-base - +# Ensure a copy of the license is available for the rules [ -f $clone_dir/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name # Copy over rules for i in $(find $clone_dir/yara -name "*.yar*"); do rule_name=$(echo $i | awk -F '/' '{print $NF}') - repo_sum=$(sha256sum $i | awk '{print $1}') - - # Check rules against those in ignore list -- don't copy if ignored. - if ! grep -iq $rule_name $ignorefile; then - existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l) - - # For existing rules, check to see if they need to be updated, by comparing checksums - if [ $existing_rules -gt 0 ];then - local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}') - if [ "$repo_sum" != "$local_sum" ]; then - echo "Checksums do not match!" - echo "Updating $rule_name..." - cp $i $output_dir/$repo_name; - ((updatecounter++)) - fi - else - # If rule doesn't exist already, we'll add it - echo "Adding new rule: $rule_name..." - cp $i $output_dir/$repo_name - ((newcounter++)) - fi - fi; -done - -# Check to see if we have any old rules that need to be removed -for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do - is_repo_rule=$(find $clone_dir -name "$i" | wc -l) - if [ $is_repo_rule -eq 0 ]; then - echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..." - rm $output_dir/$repo_name/$i - ((deletecounter++)) - fi + echo "Adding rule: $rule_name..." + cp $i $output_dir/$repo_name + ((newcounter++)) done echo "Done!" if [ "$newcounter" -gt 0 ];then - echo "$newcounter new rules added." -fi - -if [ "$updatecounter" -gt 0 ];then - echo "$updatecounter rules updated." -fi - -if [ "$deletecounter" -gt 0 ];then - echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo." + echo "$newcounter rules added." fi {% else %} @@ -99,69 +56,32 @@ if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then if ! $(echo "$repo" | grep -qE '^#'); then # Remove old repo if existing bc of previous error condition or unexpected disruption repo_name=`echo $repo | awk -F '/' '{print $NF}'` - [ -d $repo_name ] && rm -rf $repo_name + [ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name # Clone repo and make appropriate directories for rules - git clone $repo $clone_dir/$repo_name echo "Analyzing rules from $clone_dir/$repo_name..." mkdir -p $output_dir/$repo_name + # Ensure a copy of the license is available for the rules [ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name # Copy over rules for i in $(find $clone_dir/$repo_name -name "*.yar*"); do rule_name=$(echo $i | awk -F '/' '{print $NF}') - repo_sum=$(sha256sum $i | awk '{print $1}') - - # Check rules against those in ignore list -- don't copy if ignored. - if ! grep -iq $rule_name $ignorefile; then - existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l) - - # For existing rules, check to see if they need to be updated, by comparing checksums - if [ $existing_rules -gt 0 ];then - local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}') - if [ "$repo_sum" != "$local_sum" ]; then - echo "Checksums do not match!" - echo "Updating $rule_name..." - cp $i $output_dir/$repo_name; - ((updatecounter++)) - fi - else - # If rule doesn't exist already, we'll add it - echo "Adding new rule: $rule_name..." - cp $i $output_dir/$repo_name - ((newcounter++)) - fi - fi; - done - - # Check to see if we have any old rules that need to be removed - for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do - is_repo_rule=$(find $clone_dir/$repo_name -name "$i" | wc -l) - if [ $is_repo_rule -eq 0 ]; then - echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..." - rm $output_dir/$repo_name/$i - ((deletecounter++)) - fi - done - rm -rf $clone_dir/$repo_name - fi - done < $repos + echo "Adding rule: $rule_name..." + cp $i $output_dir/$repo_name + ((newcounter++)) + done + rm -rf $clone_dir/$repo_name + fi + done < $repos echo "Done!" - + if [ "$newcounter" -gt 0 ];then - echo "$newcounter new rules added." + echo "$newcounter rules added." fi - - if [ "$updatecounter" -gt 0 ];then - echo "$updatecounter rules updated." - fi - - if [ "$deletecounter" -gt 0 ];then - echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo." - fi - + else echo "Server returned $gh_status status code." echo "No connectivity to Github...exiting..." diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index ce8923e90..caea21866 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -27,6 +27,7 @@ SOUP_LOG=/root/soup.log INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log WHATWOULDYOUSAYYAHDOHERE=soup whiptail_title='Security Onion UPdater' +NOTIFYCUSTOMELASTICCONFIG=false check_err() { local exit_code=$1 @@ -105,17 +106,20 @@ add_common() { airgap_mounted() { # Let's see if the ISO is already mounted. - if [ -f /tmp/soagupdate/SecurityOnion/VERSION ]; then + if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then echo "The ISO is already mounted" else - echo "" - cat << EOF + if [[ -z $ISOLOC ]]; then + echo "This is airgap. Ask for a location." + echo "" + cat << EOF In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided. For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso. Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom. EOF - read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC + read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC + fi if [[ -f $ISOLOC ]]; then # Mounting the ISO image mkdir -p /tmp/soagupdate @@ -131,7 +135,7 @@ EOF elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then ln -s $ISOLOC /tmp/soagupdate echo "Found the update content" - else + elif [[ -b $ISOLOC ]]; then mkdir -p /tmp/soagupdate mount $ISOLOC /tmp/soagupdate if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then @@ -140,7 +144,11 @@ EOF exit 0 else echo "Device has been mounted!" - fi + fi + else + echo "Could not find Security Onion ISO content at ${ISOLOC}" + echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded." + exit 0 fi fi } @@ -150,7 +158,7 @@ airgap_update_dockers() { # Let's copy the tarball if [[ ! -f $AGDOCKER/registry.tar ]]; then echo "Unable to locate registry. Exiting" - exit 1 + exit 0 else echo "Stopping the registry docker" docker stop so-dockerregistry @@ -282,25 +290,31 @@ check_os_updates() { OSUPDATES=$(yum -q list updates | wc -l) fi if [[ "$OSUPDATES" -gt 0 ]]; then - echo $NEEDUPDATES - echo "" - read -p "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm - - if [[ "$confirm" == [cC] ]]; then + if [[ -z $UNATTENDED ]]; then + echo "$NEEDUPDATES" + echo "" + read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm + if [[ "$confirm" == [cC] ]]; then echo "Continuing without updating packages" - elif [[ "$confirm" == [uU] ]]; then + elif [[ "$confirm" == [uU] ]]; then echo "Applying Grid Updates" - set +e - run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.' - set -e - else + update_flag=true + else echo "Exiting soup" exit 0 + fi + else + update_flag=true fi else - echo "Looks like you have an updated OS" + echo "Looks like you have an updated OS" + fi + + if [[ $update_flag == true ]]; then + set +e + run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.' + set -e fi - } clean_dockers() { @@ -372,6 +386,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20 [[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30 [[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50 + [[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_2.3.5X_to_2.3.80 true } @@ -609,6 +624,46 @@ EOF INSTALLEDVERSION=2.3.50 } +up_2.3.5X_to_2.3.80() { + + # Remove watermark settings from global.sls + sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls + + # Add new indices to the global + sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls + sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls + sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls + sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls + + # Do some pillar formatting + tc=$(grep -w true_cluster /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print tolower($2)'}| xargs) + + if [[ "$tc" == "true" ]]; then + tcname=$(grep -w true_cluster_name /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print $2'}) + sed -i "/^elasticsearch:/a \\ config: \n cluster: \n name: $tcname" /opt/so/saltstack/local/pillar/global.sls + sed -i '/ true_cluster_name/d' /opt/so/saltstack/local/pillar/global.sls + sed -i '/ esclustername/d' /opt/so/saltstack/local/pillar/global.sls + + for file in /opt/so/saltstack/local/pillar/minions/*.sls; do + if [[ ${file} != *"manager.sls"* ]]; then + noderoutetype=$(grep -w node_route_type $file | awk -F: {'print $2'}) + if [ -n "$noderoutetype" ]; then + sed -i "/^elasticsearch:/a \\ config: \n node: \n attr: \n box_type: $noderoutetype" $file + sed -i '/ node_route_type/d' $file + noderoutetype='' + fi + fi + done + fi + + # check for local es config to inform user that the config in local is now ignored and those options need to be placed in the pillar + if [ -f "/opt/so/saltstack/local/salt/elasticsearch/files/elasticsearch.yml" ]; then + NOTIFYCUSTOMELASTICCONFIG=true + fi + + INSTALLEDVERSION=2.3.80 +} + verify_upgradespace() { CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//') if [ "$CURRENTSPACE" -lt "10" ]; then @@ -624,7 +679,7 @@ upgrade_space() { clean_dockers if ! verify_upgradespace; then echo "There is not enough space to perform the upgrade. Please free up space and try again" - exit 1 + exit 0 fi else echo "You have enough space for upgrade. Proceeding with soup." @@ -649,8 +704,8 @@ thehive_maint() { done if [ "$THEHIVE_CONNECTED" == "yes" ]; then echo "Migrating thehive databases if needed." - curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate" - curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate" + curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1 + curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1 fi } @@ -774,39 +829,23 @@ verify_latest_update_script() { } main() { - set -e - set +e trap 'check_err $?' EXIT - echo "### Preparing soup at $(date) ###" - while getopts ":b" opt; do - case "$opt" in - b ) # process option b - shift - BATCHSIZE=$1 - if ! [[ "$BATCHSIZE" =~ ^[0-9]+$ ]]; then - echo "Batch size must be a number greater than 0." - exit 1 - fi - ;; - \? ) - echo "Usage: cmd [-b]" - ;; - esac - done - + echo "Checking to see if this is an airgap install." + echo "" + check_airgap + if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then + echo "Missing file argument (-f ) for unattended airgap upgrade." + exit 0 + fi echo "Checking to see if this is a manager." echo "" require_manager set_minionid - echo "Checking to see if this is an airgap install." - echo "" - check_airgap echo "Found that Security Onion $INSTALLEDVERSION is currently installed." echo "" if [[ $is_airgap -eq 0 ]]; then # Let's mount the ISO since this is airgap - echo "This is airgap. Ask for a location." airgap_mounted else echo "Cloning Security Onion github repo into $UPDATE_DIR." @@ -894,7 +933,7 @@ main() { echo "Once the issue is resolved, run soup again." echo "Exiting." echo "" - exit 1 + exit 0 else echo "Salt upgrade success." echo "" @@ -953,8 +992,6 @@ main() { set +e salt-call state.highstate -l info queue=True set -e - echo "" - echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete." echo "" echo "Stopping Salt Master to remove ACL" @@ -977,6 +1014,13 @@ main() { [[ $is_airgap -eq 0 ]] && unmount_update thehive_maint + echo "" + echo "Upgrade to $NEWVERSION complete." + + # Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error + set +e + + echo "Checking the number of minions." NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l) if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then if [[ $is_airgap -eq 0 ]]; then @@ -987,8 +1031,10 @@ main() { fi fi + echo "Checking for local modifications." check_local_mods + echo "Checking sudoers file." check_sudoers if [[ -n $lsl_msg ]]; then @@ -1026,10 +1072,56 @@ EOF fi fi + if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then + + cat << EOF + + +A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80. + +If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at https://docs.securityonion.net/en/2.3/elasticsearch.html. + +EOF + + fi + echo "### soup has been served at $(date) ###" } -cat << EOF +while getopts ":b:f:y" opt; do + case ${opt} in + b ) + BATCHSIZE="$OPTARG" + if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then + echo "Batch size must be a number greater than 0." + exit 1 + fi + ;; + y ) + if [[ ! -f /opt/so/state/yeselastic.txt ]]; then + echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License." + exit 1 + else + UNATTENDED=true + fi + ;; + f ) + ISOLOC="$OPTARG" + ;; + \? ) + echo "Usage: soup [-b] [-y] [-f ]" + exit 1 + ;; + : ) + echo "Invalid option: $OPTARG requires an argument" + exit 1 + ;; + esac +done +shift $((OPTIND - 1)) + +if [[ -z $UNATTENDED ]]; then + cat << EOF SOUP - Security Onion UPdater @@ -1041,7 +1133,9 @@ Press Enter to continue or Ctrl-C to cancel. EOF -read -r input + read -r input +fi +echo "### Preparing soup at $(date) ###" main "$@" | tee -a $SOUP_LOG diff --git a/salt/curator/files/action/so-aws-close.yml b/salt/curator/files/action/so-aws-close.yml new file mode 100644 index 000000000..44f0bfa97 --- /dev/null +++ b/salt/curator/files/action/so-aws-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-aws:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close aws indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-aws.*|so-aws.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-aws-delete.yml b/salt/curator/files/action/so-aws-delete.yml new file mode 100644 index 000000000..a67ee88b8 --- /dev/null +++ b/salt/curator/files/action/so-aws-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete aws indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-aws.*|so-aws.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-aws-warm.yml b/salt/curator/files/action/so-aws-warm.yml new file mode 100644 index 000000000..5369ed9a9 --- /dev/null +++ b/salt/curator/files/action/so-aws-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-aws + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-azure-close.yml b/salt/curator/files/action/so-azure-close.yml new file mode 100644 index 000000000..901b2c0ba --- /dev/null +++ b/salt/curator/files/action/so-azure-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-azure:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close azure indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-azure.*|so-azure.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-azure-delete.yml b/salt/curator/files/action/so-azure-delete.yml new file mode 100644 index 000000000..102a69d3d --- /dev/null +++ b/salt/curator/files/action/so-azure-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete azure indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-azure.*|so-azure.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-azure-warm.yml b/salt/curator/files/action/so-azure-warm.yml new file mode 100644 index 000000000..d6f606125 --- /dev/null +++ b/salt/curator/files/action/so-azure-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-azure + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-barracuda-close.yml b/salt/curator/files/action/so-barracuda-close.yml new file mode 100644 index 000000000..496832db7 --- /dev/null +++ b/salt/curator/files/action/so-barracuda-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close barracuda indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-barracuda.*|so-barracuda.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-barracuda-delete.yml b/salt/curator/files/action/so-barracuda-delete.yml new file mode 100644 index 000000000..49d472618 --- /dev/null +++ b/salt/curator/files/action/so-barracuda-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete barracuda indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-barracuda.*|so-barracuda.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-barracuda-warm.yml b/salt/curator/files/action/so-barracuda-warm.yml new file mode 100644 index 000000000..334a4114a --- /dev/null +++ b/salt/curator/files/action/so-barracuda-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-barracuda + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-beats-delete.yml b/salt/curator/files/action/so-beats-delete.yml new file mode 100644 index 000000000..77931d661 --- /dev/null +++ b/salt/curator/files/action/so-beats-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete beats indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-beats.*|so-beats.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-beats-warm.yml b/salt/curator/files/action/so-beats-warm.yml new file mode 100644 index 000000000..da9f76656 --- /dev/null +++ b/salt/curator/files/action/so-beats-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-beats + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-bluecoat-close.yml b/salt/curator/files/action/so-bluecoat-close.yml new file mode 100644 index 000000000..86d9277eb --- /dev/null +++ b/salt/curator/files/action/so-bluecoat-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close bluecoat indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-bluecoat.*|so-bluecoat.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-bluecoat-delete.yml b/salt/curator/files/action/so-bluecoat-delete.yml new file mode 100644 index 000000000..318624416 --- /dev/null +++ b/salt/curator/files/action/so-bluecoat-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete bluecoat indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-bluecoat.*|so-bluecoat.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-bluecoat-warm.yml b/salt/curator/files/action/so-bluecoat-warm.yml new file mode 100644 index 000000000..47a8d712f --- /dev/null +++ b/salt/curator/files/action/so-bluecoat-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-bluecoat + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-cef-close.yml b/salt/curator/files/action/so-cef-close.yml new file mode 100644 index 000000000..49e07f764 --- /dev/null +++ b/salt/curator/files/action/so-cef-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cef:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close cef indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-cef.*|so-cef.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-cef-delete.yml b/salt/curator/files/action/so-cef-delete.yml new file mode 100644 index 000000000..0ee7d6501 --- /dev/null +++ b/salt/curator/files/action/so-cef-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete cef indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-cef.*|so-cef.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-cef-warm.yml b/salt/curator/files/action/so-cef-warm.yml new file mode 100644 index 000000000..0a79fd2ba --- /dev/null +++ b/salt/curator/files/action/so-cef-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-cef + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-checkpoint-close.yml b/salt/curator/files/action/so-checkpoint-close.yml new file mode 100644 index 000000000..cffdf6473 --- /dev/null +++ b/salt/curator/files/action/so-checkpoint-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close checkpoint indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-checkpoint.*|so-checkpoint.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-checkpoint-delete.yml b/salt/curator/files/action/so-checkpoint-delete.yml new file mode 100644 index 000000000..d1ac13efe --- /dev/null +++ b/salt/curator/files/action/so-checkpoint-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete checkpoint indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-checkpoint.*|so-checkpoint.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-checkpoint-warm.yml b/salt/curator/files/action/so-checkpoint-warm.yml new file mode 100644 index 000000000..0aaec1e19 --- /dev/null +++ b/salt/curator/files/action/so-checkpoint-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-checkpoint + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-cisco-close.yml b/salt/curator/files/action/so-cisco-close.yml new file mode 100644 index 000000000..cd1faade1 --- /dev/null +++ b/salt/curator/files/action/so-cisco-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cisco:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close cisco indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-cisco.*|so-cisco.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-cisco-delete.yml b/salt/curator/files/action/so-cisco-delete.yml new file mode 100644 index 000000000..bb5e06f7f --- /dev/null +++ b/salt/curator/files/action/so-cisco-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete cisco indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-cisco.*|so-cisco.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-cisco-warm.yml b/salt/curator/files/action/so-cisco-warm.yml new file mode 100644 index 000000000..a143a95c2 --- /dev/null +++ b/salt/curator/files/action/so-cisco-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-cisco + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-cyberark-close.yml b/salt/curator/files/action/so-cyberark-close.yml new file mode 100644 index 000000000..e352e8355 --- /dev/null +++ b/salt/curator/files/action/so-cyberark-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close cyberark indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-cyberark.*|so-cyberark.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-cyberark-delete.yml b/salt/curator/files/action/so-cyberark-delete.yml new file mode 100644 index 000000000..784f6881e --- /dev/null +++ b/salt/curator/files/action/so-cyberark-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete cyberark indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-cyberark.*|so-cyberark.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-cyberark-warm.yml b/salt/curator/files/action/so-cyberark-warm.yml new file mode 100644 index 000000000..8eae0b542 --- /dev/null +++ b/salt/curator/files/action/so-cyberark-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-cyberark + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-cylance-close.yml b/salt/curator/files/action/so-cylance-close.yml new file mode 100644 index 000000000..d808569fb --- /dev/null +++ b/salt/curator/files/action/so-cylance-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cylance:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close cylance indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-cylance.*|so-cylance.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-cylance-delete.yml b/salt/curator/files/action/so-cylance-delete.yml new file mode 100644 index 000000000..54cf3938b --- /dev/null +++ b/salt/curator/files/action/so-cylance-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete cylance indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-cylance.*|so-cylance.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-cylance-warm.yml b/salt/curator/files/action/so-cylance-warm.yml new file mode 100644 index 000000000..c9da7e68a --- /dev/null +++ b/salt/curator/files/action/so-cylance-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-cylance + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-elasticsearch-close.yml b/salt/curator/files/action/so-elasticsearch-close.yml new file mode 100644 index 000000000..3c4ff0dac --- /dev/null +++ b/salt/curator/files/action/so-elasticsearch-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close elasticsearch indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-elasticsearch.*|so-elasticsearch.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-elasticsearch-delete.yml b/salt/curator/files/action/so-elasticsearch-delete.yml new file mode 100644 index 000000000..05cc68abe --- /dev/null +++ b/salt/curator/files/action/so-elasticsearch-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete elasticsearch indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-elasticsearch.*|so-elasticsearch.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-elasticsearch-warm.yml b/salt/curator/files/action/so-elasticsearch-warm.yml new file mode 100644 index 000000000..9d82fc27b --- /dev/null +++ b/salt/curator/files/action/so-elasticsearch-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-elasticsearch + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-f5-close.yml b/salt/curator/files/action/so-f5-close.yml new file mode 100644 index 000000000..e1cdb48a1 --- /dev/null +++ b/salt/curator/files/action/so-f5-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-f5:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close f5 indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-f5.*|so-f5.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-f5-delete.yml b/salt/curator/files/action/so-f5-delete.yml new file mode 100644 index 000000000..06704010a --- /dev/null +++ b/salt/curator/files/action/so-f5-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-f5:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete f5 indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-f5.*|so-f5.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-f5-warm.yml b/salt/curator/files/action/so-f5-warm.yml new file mode 100644 index 000000000..12fbbe7ad --- /dev/null +++ b/salt/curator/files/action/so-f5-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-f5:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-f5 + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-firewall-delete.yml b/salt/curator/files/action/so-firewall-delete.yml new file mode 100644 index 000000000..7588de437 --- /dev/null +++ b/salt/curator/files/action/so-firewall-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete firewall indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-firewall.*|so-firewall.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-firewall-warm.yml b/salt/curator/files/action/so-firewall-warm.yml new file mode 100644 index 000000000..2e9643dc3 --- /dev/null +++ b/salt/curator/files/action/so-firewall-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-firewall + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-fortinet-close.yml b/salt/curator/files/action/so-fortinet-close.yml new file mode 100644 index 000000000..e11fb86c6 --- /dev/null +++ b/salt/curator/files/action/so-fortinet-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close fortinet indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-fortinet.*|so-fortinet.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-fortinet-delete.yml b/salt/curator/files/action/so-fortinet-delete.yml new file mode 100644 index 000000000..9379e47c2 --- /dev/null +++ b/salt/curator/files/action/so-fortinet-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete fortinet indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-fortinet.*|so-fortinet.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-fortinet-warm.yml b/salt/curator/files/action/so-fortinet-warm.yml new file mode 100644 index 000000000..db9a6f2db --- /dev/null +++ b/salt/curator/files/action/so-fortinet-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-fortinet + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-gcp-close.yml b/salt/curator/files/action/so-gcp-close.yml new file mode 100644 index 000000000..f9dd0af24 --- /dev/null +++ b/salt/curator/files/action/so-gcp-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-gcp:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close gcp indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-gcp.*|so-gcp.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-gcp-delete.yml b/salt/curator/files/action/so-gcp-delete.yml new file mode 100644 index 000000000..5c8ab33d8 --- /dev/null +++ b/salt/curator/files/action/so-gcp-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-gcp:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete gcp indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-gcp.*|so-gcp.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-gcp-warm.yml b/salt/curator/files/action/so-gcp-warm.yml new file mode 100644 index 000000000..3bb9eee80 --- /dev/null +++ b/salt/curator/files/action/so-gcp-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-gcp:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-gcp + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-google_workspace-close.yml b/salt/curator/files/action/so-google_workspace-close.yml new file mode 100644 index 000000000..1ecda5893 --- /dev/null +++ b/salt/curator/files/action/so-google_workspace-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close google_workspace indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-google_workspace.*|so-google_workspace.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-google_workspace-delete.yml b/salt/curator/files/action/so-google_workspace-delete.yml new file mode 100644 index 000000000..923feda8f --- /dev/null +++ b/salt/curator/files/action/so-google_workspace-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete google_workspace indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-google_workspace.*|so-google_workspace.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-google_workspace-warm.yml b/salt/curator/files/action/so-google_workspace-warm.yml new file mode 100644 index 000000000..7eb2d883f --- /dev/null +++ b/salt/curator/files/action/so-google_workspace-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-google_workspace + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-ids-delete.yml b/salt/curator/files/action/so-ids-delete.yml new file mode 100644 index 000000000..e5bda4e34 --- /dev/null +++ b/salt/curator/files/action/so-ids-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-ids:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete IDS indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-ids.*|so-ids.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-ids-warm.yml b/salt/curator/files/action/so-ids-warm.yml new file mode 100644 index 000000000..0edad5b5b --- /dev/null +++ b/salt/curator/files/action/so-ids-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-ids:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-ids + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-imperva-close.yml b/salt/curator/files/action/so-imperva-close.yml new file mode 100644 index 000000000..55ec2e472 --- /dev/null +++ b/salt/curator/files/action/so-imperva-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-imperva:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close imperva indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-imperva.*|so-imperva.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-imperva-delete.yml b/salt/curator/files/action/so-imperva-delete.yml new file mode 100644 index 000000000..b5526e2fb --- /dev/null +++ b/salt/curator/files/action/so-imperva-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-imperva:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete imperva indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-imperva.*|so-imperva.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-imperva-warm.yml b/salt/curator/files/action/so-imperva-warm.yml new file mode 100644 index 000000000..0297d5cd6 --- /dev/null +++ b/salt/curator/files/action/so-imperva-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-imperva:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-imperva + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-import-delete.yml b/salt/curator/files/action/so-import-delete.yml new file mode 100644 index 000000000..aa9808c5f --- /dev/null +++ b/salt/curator/files/action/so-import-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-import:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete import indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-import.*|so-import.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-import-warm.yml b/salt/curator/files/action/so-import-warm.yml new file mode 100644 index 000000000..3a6fa3d3d --- /dev/null +++ b/salt/curator/files/action/so-import-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-import:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-import + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-infoblox-close.yml b/salt/curator/files/action/so-infoblox-close.yml new file mode 100644 index 000000000..9fd4c5070 --- /dev/null +++ b/salt/curator/files/action/so-infoblox-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close infoblox indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-infoblox.*|so-infoblox.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-infoblox-delete.yml b/salt/curator/files/action/so-infoblox-delete.yml new file mode 100644 index 000000000..0a7fdafbe --- /dev/null +++ b/salt/curator/files/action/so-infoblox-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete infoblox indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-infoblox.*|so-infoblox.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-infoblox-warm.yml b/salt/curator/files/action/so-infoblox-warm.yml new file mode 100644 index 000000000..a2f571b7a --- /dev/null +++ b/salt/curator/files/action/so-infoblox-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-infoblox + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-juniper-close.yml b/salt/curator/files/action/so-juniper-close.yml new file mode 100644 index 000000000..466a51eca --- /dev/null +++ b/salt/curator/files/action/so-juniper-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-juniper:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close juniper indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-juniper.*|so-juniper.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-juniper-delete.yml b/salt/curator/files/action/so-juniper-delete.yml new file mode 100644 index 000000000..18abc86ac --- /dev/null +++ b/salt/curator/files/action/so-juniper-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-juniper:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete juniper indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-juniper.*|so-juniper.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-juniper-warm.yml b/salt/curator/files/action/so-juniper-warm.yml new file mode 100644 index 000000000..5369ed9a9 --- /dev/null +++ b/salt/curator/files/action/so-juniper-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-aws + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-kibana-close.yml b/salt/curator/files/action/so-kibana-close.yml new file mode 100644 index 000000000..7347fb01c --- /dev/null +++ b/salt/curator/files/action/so-kibana-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-kibana:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close kibana indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-kibana.*|so-kibana.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-kibana-delete.yml b/salt/curator/files/action/so-kibana-delete.yml new file mode 100644 index 000000000..5a775b8de --- /dev/null +++ b/salt/curator/files/action/so-kibana-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kibana:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete kibana indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-kibana.*|so-kibana.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-kibana-warm.yml b/salt/curator/files/action/so-kibana-warm.yml new file mode 100644 index 000000000..b5674c8c3 --- /dev/null +++ b/salt/curator/files/action/so-kibana-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kibana:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-kibana + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-logstash-close.yml b/salt/curator/files/action/so-logstash-close.yml new file mode 100644 index 000000000..23787e237 --- /dev/null +++ b/salt/curator/files/action/so-logstash-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-logstash:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close logstash indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-logstash.*|so-logstash.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-logstash-delete.yml b/salt/curator/files/action/so-logstash-delete.yml new file mode 100644 index 000000000..d9ff848da --- /dev/null +++ b/salt/curator/files/action/so-logstash-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-logstash:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete logstash indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-logstash.*|so-logstash.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-logstash-warm.yml b/salt/curator/files/action/so-logstash-warm.yml new file mode 100644 index 000000000..826bf2975 --- /dev/null +++ b/salt/curator/files/action/so-logstash-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-logstash:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-logstash + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-microsoft-close.yml b/salt/curator/files/action/so-microsoft-close.yml new file mode 100644 index 000000000..f4eaf738f --- /dev/null +++ b/salt/curator/files/action/so-microsoft-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close microsoft indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-microsoft.*|so-microsoft.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-microsoft-delete.yml b/salt/curator/files/action/so-microsoft-delete.yml new file mode 100644 index 000000000..f1a854c83 --- /dev/null +++ b/salt/curator/files/action/so-microsoft-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete microsoft indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-microsoft.*|so-microsoft.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-microsoft-warm.yml b/salt/curator/files/action/so-microsoft-warm.yml new file mode 100644 index 000000000..551d0cb56 --- /dev/null +++ b/salt/curator/files/action/so-microsoft-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-microsoft + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-misp-close.yml b/salt/curator/files/action/so-misp-close.yml new file mode 100644 index 000000000..e39781353 --- /dev/null +++ b/salt/curator/files/action/so-misp-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-misp:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close misp indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-misp.*|so-misp.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-misp-delete.yml b/salt/curator/files/action/so-misp-delete.yml new file mode 100644 index 000000000..ceaa9c73d --- /dev/null +++ b/salt/curator/files/action/so-misp-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-misp:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete misp indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-misp.*|so-misp.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-misp-warm.yml b/salt/curator/files/action/so-misp-warm.yml new file mode 100644 index 000000000..af29975b0 --- /dev/null +++ b/salt/curator/files/action/so-misp-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-misp:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-misp + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-netflow-close.yml b/salt/curator/files/action/so-netflow-close.yml new file mode 100644 index 000000000..cc9ade28d --- /dev/null +++ b/salt/curator/files/action/so-netflow-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-netflow:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close netflow indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-netflow.*|so-netflow.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-netflow-delete.yml b/salt/curator/files/action/so-netflow-delete.yml new file mode 100644 index 000000000..5bc76ad15 --- /dev/null +++ b/salt/curator/files/action/so-netflow-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-netflow:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete netflow indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-netflow.*|so-netflow.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-netflow-warm.yml b/salt/curator/files/action/so-netflow-warm.yml new file mode 100644 index 000000000..ea57bb72b --- /dev/null +++ b/salt/curator/files/action/so-netflow-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-netflow:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-netflow + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-netscout-close.yml b/salt/curator/files/action/so-netscout-close.yml new file mode 100644 index 000000000..d99374d2f --- /dev/null +++ b/salt/curator/files/action/so-netscout-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-netscout:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close netscout indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-netscout.*|so-netscout.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-netscout-delete.yml b/salt/curator/files/action/so-netscout-delete.yml new file mode 100644 index 000000000..3c0e249b5 --- /dev/null +++ b/salt/curator/files/action/so-netscout-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-netscout:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete netscout indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-netscout.*|so-netscout.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-netscout-warm.yml b/salt/curator/files/action/so-netscout-warm.yml new file mode 100644 index 000000000..1b93c3118 --- /dev/null +++ b/salt/curator/files/action/so-netscout-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-netscout:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-netscout + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-o365-close.yml b/salt/curator/files/action/so-o365-close.yml new file mode 100644 index 000000000..4dece060f --- /dev/null +++ b/salt/curator/files/action/so-o365-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-o365:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close o365 indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-o365.*|so-o365.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-o365-delete.yml b/salt/curator/files/action/so-o365-delete.yml new file mode 100644 index 000000000..13c7c1344 --- /dev/null +++ b/salt/curator/files/action/so-o365-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-o365:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete o365 indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-o365.*|so-o365.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-o365-warm.yml b/salt/curator/files/action/so-o365-warm.yml new file mode 100644 index 000000000..cbb7bc24e --- /dev/null +++ b/salt/curator/files/action/so-o365-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-o365:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-o365 + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-okta-close.yml b/salt/curator/files/action/so-okta-close.yml new file mode 100644 index 000000000..10f7e4b60 --- /dev/null +++ b/salt/curator/files/action/so-okta-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-okta:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close okta indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-okta.*|so-okta.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-okta-warm.yml b/salt/curator/files/action/so-okta-warm.yml new file mode 100644 index 000000000..75764860d --- /dev/null +++ b/salt/curator/files/action/so-okta-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-okta:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-okta + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-okta.delete.yml b/salt/curator/files/action/so-okta.delete.yml new file mode 100644 index 000000000..1beeb0fc0 --- /dev/null +++ b/salt/curator/files/action/so-okta.delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-okta:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete okta indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-okta.*|so-okta.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-osquery-delete.yml b/salt/curator/files/action/so-osquery-delete.yml new file mode 100644 index 000000000..d77b1b3d1 --- /dev/null +++ b/salt/curator/files/action/so-osquery-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-osquery:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete import indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-osquery.*|so-osquery.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-osquery-warm.yml b/salt/curator/files/action/so-osquery-warm.yml new file mode 100644 index 000000000..156a83c7a --- /dev/null +++ b/salt/curator/files/action/so-osquery-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-osquery:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-osquery + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-ossec-delete.yml b/salt/curator/files/action/so-ossec-delete.yml new file mode 100644 index 000000000..7aea13e41 --- /dev/null +++ b/salt/curator/files/action/so-ossec-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-ossec:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete ossec indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-ossec.*|so-ossec.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-ossec-warm.yml b/salt/curator/files/action/so-ossec-warm.yml new file mode 100644 index 000000000..769d6cbea --- /dev/null +++ b/salt/curator/files/action/so-ossec-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-ossec:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-ossec + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-proofpoint-close.yml b/salt/curator/files/action/so-proofpoint-close.yml new file mode 100644 index 000000000..888c9fc64 --- /dev/null +++ b/salt/curator/files/action/so-proofpoint-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-proofpoint:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close proofpoint indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-proofpoint.*|so-proofpoint.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-proofpoint-delete.yml b/salt/curator/files/action/so-proofpoint-delete.yml new file mode 100644 index 000000000..903dde204 --- /dev/null +++ b/salt/curator/files/action/so-proofpoint-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-proofpoint:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete proofpoint indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-proofpoint.*|so-proofpoint.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-proofpoint-warm.yml b/salt/curator/files/action/so-proofpoint-warm.yml new file mode 100644 index 000000000..8304ae41a --- /dev/null +++ b/salt/curator/files/action/so-proofpoint-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-proofpoint:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-proofpoint + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-radware-close.yml b/salt/curator/files/action/so-radware-close.yml new file mode 100644 index 000000000..59a7bbafd --- /dev/null +++ b/salt/curator/files/action/so-radware-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-radware:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close radware indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-radware.*|so-radware.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-radware-delete.yml b/salt/curator/files/action/so-radware-delete.yml new file mode 100644 index 000000000..1fe09cded --- /dev/null +++ b/salt/curator/files/action/so-radware-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-radware:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete radware indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-radware.*|so-radware.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-radware-warm.yml b/salt/curator/files/action/so-radware-warm.yml new file mode 100644 index 000000000..8d4337aaf --- /dev/null +++ b/salt/curator/files/action/so-radware-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-radware:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-radware + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-redis-close.yml b/salt/curator/files/action/so-redis-close.yml new file mode 100644 index 000000000..b69935f21 --- /dev/null +++ b/salt/curator/files/action/so-redis-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-redis:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close redis indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-redis.*|so-redis.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-redis-delete.yml b/salt/curator/files/action/so-redis-delete.yml new file mode 100644 index 000000000..f6e73dce8 --- /dev/null +++ b/salt/curator/files/action/so-redis-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-redis:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete redis indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-redis.*|so-redis.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-redis-warm.yml b/salt/curator/files/action/so-redis-warm.yml new file mode 100644 index 000000000..a5b1055c3 --- /dev/null +++ b/salt/curator/files/action/so-redis-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-redis:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-redis + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-snort-close.yml b/salt/curator/files/action/so-snort-close.yml new file mode 100644 index 000000000..8f6209255 --- /dev/null +++ b/salt/curator/files/action/so-snort-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-snort:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close snort indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-snort.*|so-snort.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-snort-delete.yml b/salt/curator/files/action/so-snort-delete.yml new file mode 100644 index 000000000..50f68988b --- /dev/null +++ b/salt/curator/files/action/so-snort-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-snort:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete snort indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-snort.*|so-snort.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-snort-warm.yml b/salt/curator/files/action/so-snort-warm.yml new file mode 100644 index 000000000..3bbc977e2 --- /dev/null +++ b/salt/curator/files/action/so-snort-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-snort:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-snort + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-snyk-close.yml b/salt/curator/files/action/so-snyk-close.yml new file mode 100644 index 000000000..e13d8f98d --- /dev/null +++ b/salt/curator/files/action/so-snyk-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-snyk:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close snyk indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-snyk.*|so-snyk.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-snyk-delete.yml b/salt/curator/files/action/so-snyk-delete.yml new file mode 100644 index 000000000..cec0b942f --- /dev/null +++ b/salt/curator/files/action/so-snyk-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-snyk:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete snyk indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-snyk.*|so-snyk.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-snyk-warm.yml b/salt/curator/files/action/so-snyk-warm.yml new file mode 100644 index 000000000..f9b10bbdd --- /dev/null +++ b/salt/curator/files/action/so-snyk-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-snyk:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-snyk + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-sonicwall-close.yml b/salt/curator/files/action/so-sonicwall-close.yml new file mode 100644 index 000000000..9cc23d3af --- /dev/null +++ b/salt/curator/files/action/so-sonicwall-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-sonicwall:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close sonicwall indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-sonicwall.*|so-sonicwall.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-sonicwall-delete.yml b/salt/curator/files/action/so-sonicwall-delete.yml new file mode 100644 index 000000000..c7d38361f --- /dev/null +++ b/salt/curator/files/action/so-sonicwall-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-sonicwall:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete sonicwall indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-sonicwall.*|so-sonicwall.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-sonicwall-warm.yml b/salt/curator/files/action/so-sonicwall-warm.yml new file mode 100644 index 000000000..fa8ceb3e4 --- /dev/null +++ b/salt/curator/files/action/so-sonicwall-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-sonicwall:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-sonicwall + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-sophos-close.yml b/salt/curator/files/action/so-sophos-close.yml new file mode 100644 index 000000000..b7574b996 --- /dev/null +++ b/salt/curator/files/action/so-sophos-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-sophos:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close sophos indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-sophos.*|so-sophos.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-sophos-delete.yml b/salt/curator/files/action/so-sophos-delete.yml new file mode 100644 index 000000000..433df908a --- /dev/null +++ b/salt/curator/files/action/so-sophos-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-sophos:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete sophos indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-sophos.*|so-sophos.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-sophos-warm.yml b/salt/curator/files/action/so-sophos-warm.yml new file mode 100644 index 000000000..40cc60084 --- /dev/null +++ b/salt/curator/files/action/so-sophos-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-sophos:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-sophos + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-strelka-delete.yml b/salt/curator/files/action/so-strelka-delete.yml new file mode 100644 index 000000000..3487aeb6d --- /dev/null +++ b/salt/curator/files/action/so-strelka-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-strelka:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete Strelka indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-strelka.*|so-strelka.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-strelka-warm.yml b/salt/curator/files/action/so-strelka-warm.yml new file mode 100644 index 000000000..cfa88b0c1 --- /dev/null +++ b/salt/curator/files/action/so-strelka-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-strelka:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-strelka + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-syslog-delete.yml b/salt/curator/files/action/so-syslog-delete.yml new file mode 100644 index 000000000..5fe7417ad --- /dev/null +++ b/salt/curator/files/action/so-syslog-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-syslog:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete syslog indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-syslog.*|so-syslog.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-syslog-warm.yml b/salt/curator/files/action/so-syslog-warm.yml new file mode 100644 index 000000000..e5ebb2fa6 --- /dev/null +++ b/salt/curator/files/action/so-syslog-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-syslog:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-syslog + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-tomcat-close.yml b/salt/curator/files/action/so-tomcat-close.yml new file mode 100644 index 000000000..ea0d95b0a --- /dev/null +++ b/salt/curator/files/action/so-tomcat-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-tomcat:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close tomcat indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-tomcat.*|so-tomcat.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-tomcat-delete.yml b/salt/curator/files/action/so-tomcat-delete.yml new file mode 100644 index 000000000..77035613f --- /dev/null +++ b/salt/curator/files/action/so-tomcat-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-tomcat:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete tomcat indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-tomcat.*|so-tomcat.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-tomcat-warm.yml b/salt/curator/files/action/so-tomcat-warm.yml new file mode 100644 index 000000000..8fb7884c0 --- /dev/null +++ b/salt/curator/files/action/so-tomcat-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-tomcat:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-tomcat + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-zeek-delete.yml b/salt/curator/files/action/so-zeek-delete.yml new file mode 100644 index 000000000..0694c2aed --- /dev/null +++ b/salt/curator/files/action/so-zeek-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-zeek:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete Zeek indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-zeek.*|so-zeek.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-zeek-warm.yml b/salt/curator/files/action/so-zeek-warm.yml new file mode 100644 index 000000000..2b4b6a729 --- /dev/null +++ b/salt/curator/files/action/so-zeek-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-zeek:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-zeek + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/so-zscaler-close.yml b/salt/curator/files/action/so-zscaler-close.yml new file mode 100644 index 000000000..5a008a27d --- /dev/null +++ b/salt/curator/files/action/so-zscaler-close.yml @@ -0,0 +1,29 @@ +{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-zscaler:close', 30) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: close + description: >- + Close zscaler indices older than {{cur_close_days}} days. + options: + delete_aliases: False + timeout_override: + continue_if_exception: False + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-zscaler.*|so-zscaler.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{cur_close_days}} + exclude: diff --git a/salt/curator/files/action/so-zscaler-delete.yml b/salt/curator/files/action/so-zscaler-delete.yml new file mode 100644 index 000000000..238fea083 --- /dev/null +++ b/salt/curator/files/action/so-zscaler-delete.yml @@ -0,0 +1,29 @@ +{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-zscaler:delete', 365) -%} +--- +# Remember, leave a key empty if there is no value. None will be a string, +# not a Python "NoneType" +# +# Also remember that all examples have 'disable_action' set to True. If you +# want to use this action as a template, be sure to set this to False after +# copying it. +actions: + 1: + action: delete_indices + description: >- + Delete zscaler indices when older than {{ DELETE_DAYS }} days. + options: + ignore_empty_list: True + disable_action: False + filters: + - filtertype: pattern + kind: regex + value: '^(logstash-zscaler.*|so-zscaler.*)$' + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ DELETE_DAYS }} + exclude: + + \ No newline at end of file diff --git a/salt/curator/files/action/so-zscaler-warm.yml b/salt/curator/files/action/so-zscaler-warm.yml new file mode 100644 index 000000000..8a7d8187a --- /dev/null +++ b/salt/curator/files/action/so-zscaler-warm.yml @@ -0,0 +1,24 @@ +{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-zscaler:warm', 7) -%} +actions: + 1: + action: allocation + description: "Apply shard allocation filtering rules to the specified indices" + options: + key: box_type + value: warm + allocation_type: require + wait_for_completion: true + timeout_override: + continue_if_exception: false + disable_action: false + filters: + - filtertype: pattern + kind: prefix + value: so-zscaler + - filtertype: age + source: name + direction: older + timestring: '%Y.%m.%d' + unit: days + unit_count: {{ WARM_DAYS }} + diff --git a/salt/curator/files/action/warm.yml b/salt/curator/files/action/warm.yml deleted file mode 100644 index efa2196b2..000000000 --- a/salt/curator/files/action/warm.yml +++ /dev/null @@ -1,23 +0,0 @@ -#actions: -# 1: -# action: allocation -# description: "Apply shard allocation filtering rules to the specified indices" -# options: -# key: box_type -# value: warm -# allocation_type: require -# wait_for_completion: true -# timeout_override: -# continue_if_exception: false -# disable_action: false -# filters: -# - filtertype: pattern -# kind: prefix -# value: so- -# - filtertype: age -# source: name -# direction: older -# timestring: '%Y.%m.%d' -# unit: days -# unit_count: 3 - diff --git a/salt/curator/files/bin/so-curator-close b/salt/curator/files/bin/so-curator-close index 6f7b96790..c1e940b4d 100644 --- a/salt/curator/files/bin/so-curator-close +++ b/salt/curator/files/bin/so-curator-close @@ -23,5 +23,22 @@ read lastPID < $lf # if lastPID is not null and a process with that pid exists , exit [ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit echo $$ > $lf +{% from 'filebeat/map.jinja' import THIRDPARTY with context %} +{% from 'filebeat/map.jinja' import SO with context %} -/usr/sbin/so-curator-closed-delete > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-close.yml > /dev/null 2>&1 +/usr/sbin/so-curator-closed-delete > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-close.yml > /dev/null 2>&1; +{% for INDEX in THIRDPARTY.modules.keys() -%} +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1; +{% endfor -%} +{% for INDEX in SO.modules.keys() -%} +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1{% if not loop.last %};{% endif %} +{% endfor -%} diff --git a/salt/curator/files/bin/so-curator-cluster-close b/salt/curator/files/bin/so-curator-cluster-close new file mode 100644 index 000000000..7b8c41c9e --- /dev/null +++ b/salt/curator/files/bin/so-curator-cluster-close @@ -0,0 +1,44 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP=close +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf + +{% from 'filebeat/map.jinja' import THIRDPARTY with context %} +{% from 'filebeat/map.jinja' import SO with context %} + +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-close.yml > /dev/null 2>&1; +{% for INDEX in THIRDPARTY.modules.keys() -%} +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1; +{% endfor -%} +{% for INDEX in SO.modules.keys() -%} +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-close.yml > /dev/null 2>&1{% if not loop.last %};{% endif %} +{% endfor -%} \ No newline at end of file diff --git a/salt/curator/files/bin/so-curator-cluster-delete b/salt/curator/files/bin/so-curator-cluster-delete new file mode 100644 index 000000000..21195da5d --- /dev/null +++ b/salt/curator/files/bin/so-curator-cluster-delete @@ -0,0 +1,44 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP=delete +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf + +{% from 'filebeat/map.jinja' import THIRDPARTY with context %} +{% from 'filebeat/map.jinja' import SO with context %} + +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-delete.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-delete.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-delete.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-delete.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-delete.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-delete.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-delete.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-delete.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-delete.yml > /dev/null 2>&1; +{% for INDEX in THIRDPARTY.modules.keys() -%} +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-delete.yml > /dev/null 2>&1; +{% endfor -%} +{% for INDEX in SO.modules.keys() -%} +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-delete.yml > /dev/null 2>&1{% if not loop.last %};{% endif %} +{% endfor -%} \ No newline at end of file diff --git a/salt/curator/files/bin/so-curator-cluster-warm b/salt/curator/files/bin/so-curator-cluster-warm new file mode 100644 index 000000000..4348ba2e3 --- /dev/null +++ b/salt/curator/files/bin/so-curator-cluster-warm @@ -0,0 +1,44 @@ +#!/bin/bash +# +# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP=warm +lf=/tmp/$APP-pidLockFile +# create empty lock file if none exists +cat /dev/null >> $lf +read lastPID < $lf +# if lastPID is not null and a process with that pid exists , exit +[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit +echo $$ > $lf + +{% from 'filebeat/map.jinja' import THIRDPARTY with context %} +{% from 'filebeat/map.jinja' import SO with context %} + +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-warm.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-warm.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-warm.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-warm.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-warm.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-warm.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-warm.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-warm.yml > /dev/null 2>&1; +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-warm.yml > /dev/null 2>&1; +{% for INDEX in THIRDPARTY.modules.keys() -%} +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-warm.yml > /dev/null 2>&1; +{% endfor -%} +{% for INDEX in SO.modules.keys() -%} +docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-{{ INDEX }}-warm.yml > /dev/null 2>&1{% if not loop.last %};{% endif %} +{% endfor -%} \ No newline at end of file diff --git a/salt/curator/files/curator.yml b/salt/curator/files/curator.yml index 15ecb8cb1..5ec4bdc5f 100644 --- a/salt/curator/files/curator.yml +++ b/salt/curator/files/curator.yml @@ -19,7 +19,8 @@ client: - {{elasticsearch}} port: 9200 {%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %} - http_auth: {{ ES_USER }}:{{ ES_PASS }} + username: {{ ES_USER }} + password: {{ ES_PASS }} {%- endif %} url_prefix: use_ssl: True diff --git a/salt/curator/init.sls b/salt/curator/init.sls index 1acea5d25..c604571ba 100644 --- a/salt/curator/init.sls +++ b/salt/curator/init.sls @@ -5,6 +5,9 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set REMOVECURATORCRON = False %} +{% set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %} +{% set HOTWARM = salt['pillar.get']('elasticsearch:hot_warm_enabled', False) %} + {% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone', 'so-manager'] %} {% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %} {% from "curator/map.jinja" import CURATOROPTIONS with context %} @@ -80,6 +83,7 @@ curclose: - user: 934 - group: 939 - mode: 755 + - template: jinja curdel: file.managed: @@ -89,6 +93,33 @@ curdel: - group: 939 - mode: 755 +curclusterclose: + file.managed: + - name: /usr/sbin/so-curator-cluster-close + - source: salt://curator/files/bin/so-curator-cluster-close + - user: 934 + - group: 939 + - mode: 755 + - template: jinja + +curclusterdelete: + file.managed: + - name: /usr/sbin/so-curator-cluster-delete + - source: salt://curator/files/bin/so-curator-cluster-delete + - user: 934 + - group: 939 + - mode: 755 + - template: jinja + +curclustercwarm: + file.managed: + - name: /usr/sbin/so-curator-cluster-warm + - source: salt://curator/files/bin/so-curator-cluster-warm + - user: 934 + - group: 939 + - mode: 755 + - template: jinja + so-curator: docker_container.{{ CURATOROPTIONS.status }}: {% if CURATOROPTIONS.status == 'running' %} @@ -148,62 +179,87 @@ delete_so-curator_so-status: {% if REMOVECURATORCRON %} so-curatorcloseddeletecron: - cron.absent: - - name: /usr/sbin/so-curator-closed-delete > /opt/so/log/curator/cron-closed-delete.log 2>&1 - - user: root + cron.absent: + - name: /usr/sbin/so-curator-closed-delete > /opt/so/log/curator/cron-closed-delete.log 2>&1 + - user: root so-curatorclosecron: - cron.absent: - - name: /usr/sbin/so-curator-close > /opt/so/log/curator/cron-close.log 2>&1 - - user: root + cron.absent: + - name: /usr/sbin/so-curator-close > /opt/so/log/curator/cron-close.log 2>&1 + - user: root so-curatordeletecron: - cron.absent: - - name: /usr/sbin/so-curator-delete > /opt/so/log/curator/cron-delete.log 2>&1 - - user: root + cron.absent: + - name: /usr/sbin/so-curator-delete > /opt/so/log/curator/cron-delete.log 2>&1 + - user: root {% else %} + {% if TRUECLUSTER is sameas true %} +so-curatorclusterclose: + cron.present: + - name: /usr/sbin/so-curator-cluster-close > /opt/so/log/curator/cron-close.log 2>&1 + - user: root + - minute: '2' + - hour: '*/1' + - daymonth: '*' + - month: '*' + - dayweek: '*' + +so-curatorclusterdelete: + cron.present: + - name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-delete.log 2>&1 + - user: root + - minute: '2' + - hour: '*/1' + - daymonth: '*' + - month: '*' + - dayweek: '*' + {% if HOTWARM is sameas true %} +so-curatorclusterwarm: + cron.present: + - name: /usr/sbin/so-curator-cluster-warm > /opt/so/log/curator/cron-warm.log 2>&1 + - user: root + - minute: '2' + - hour: '*/1' + - daymonth: '*' + - month: '*' + - dayweek: '*' + {% endif %} + + {% else %} so-curatorcloseddeletecron: - cron.present: - - name: /usr/sbin/so-curator-closed-delete > /opt/so/log/curator/cron-closed-delete.log 2>&1 - - user: root - - minute: '*' - - hour: '*' - - daymonth: '*' - - month: '*' - - dayweek: '*' + cron.present: + - name: /usr/sbin/so-curator-closed-delete > /opt/so/log/curator/cron-closed-delete.log 2>&1 + - user: root + - minute: '*/5' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' so-curatorclosecron: - cron.present: - - name: /usr/sbin/so-curator-close > /opt/so/log/curator/cron-close.log 2>&1 - - user: root - - minute: '*' - - hour: '*' - - daymonth: '*' - - month: '*' - - dayweek: '*' + cron.present: + - name: /usr/sbin/so-curator-close > /opt/so/log/curator/cron-close.log 2>&1 + - user: root + - minute: '*/5' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' so-curatordeletecron: - cron.present: - - name: /usr/sbin/so-curator-delete > /opt/so/log/curator/cron-delete.log 2>&1 - - user: root - - minute: '*' - - hour: '*' - - daymonth: '*' - - month: '*' - - dayweek: '*' - + cron.present: + - name: /usr/sbin/so-curator-delete > /opt/so/log/curator/cron-delete.log 2>&1 + - user: root + - minute: '*/5' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' + + {% endif %} {% endif %} - -# Begin Curator Cron Jobs - -# Close -# Delete -# Hot Warm -# Segment Merge - -# End Curator Cron Jobs {% endif %} {% else %} diff --git a/salt/curator/map.jinja b/salt/curator/map.jinja index 470143a4f..bbcff2a9c 100644 --- a/salt/curator/map.jinja +++ b/salt/curator/map.jinja @@ -4,10 +4,10 @@ {% do CURATOROPTIONS.update({'manage_sostatus': True}) %} # don't start the docker container if curator is disabled via pillar -{% if not ENABLED or grains.id.split('_')|last == 'manager'%} +{% if not ENABLED or (TRUECLUSTER and grains.id.split('_')|last == 'searchnode') or (not TRUECLUSTER and grains.id.split('_')|last == 'manager') %} {% do CURATOROPTIONS.update({'start': False}) %} {% do CURATOROPTIONS.update({'status': 'absent'}) %} - {% if grains.id.split('_')|last == 'manager' %} + {% if (TRUECLUSTER and grains.id.split('_')|last == 'searchnode') or (not TRUECLUSTER and grains.id.split('_')|last == 'manager') %} {% do CURATOROPTIONS.update({'manage_sostatus': False}) %} {% endif %} {% else %} diff --git a/salt/elasticsearch/config.map.jinja b/salt/elasticsearch/config.map.jinja new file mode 100644 index 000000000..1ca729143 --- /dev/null +++ b/salt/elasticsearch/config.map.jinja @@ -0,0 +1,26 @@ +{% import_yaml 'elasticsearch/defaults.yaml' as ESCONFIG with context %} + +{% if not salt['pillar.get']('elasticsearch:auth:enabled', False) %} + {% do ESCONFIG.elasticsearch.config.xpack.security.authc.anonymous.update({'username': 'anonymous_user', 'roles': 'superuser', 'authz_exception': 'true'}) %} +{% endif %} + +{% if salt['pillar.get']('elasticsearch:true_cluster', False) %} + {% if grains.id.split('_') | last in ['manager','managersearch'] %} + {% if salt['pillar.get']('nodestab', {}) %} + {% do ESCONFIG.elasticsearch.config.node.update({'roles': ['master', 'data', 'remote_cluster_client']}) %} + {% do ESCONFIG.elasticsearch.config.update({'discovery': {'seed_hosts': [grains.master]}}) %} + {% for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} + {% do ESCONFIG.elasticsearch.config.discovery.seed_hosts.append(SN.split('_')|first) %} + {% endfor %} + {% endif %} + {% if grains.id.split('_') | last == 'manager' %} + {% do ESCONFIG.elasticsearch.config.node.attr.update({'box_type': ''}) %} + {% endif %} + {% else %} + {% do ESCONFIG.elasticsearch.config.node.update({'roles': ['data', 'ingest']}) %} + {% do ESCONFIG.elasticsearch.config.node.attr.update({'box_type': 'hot'}) %} + {% do ESCONFIG.elasticsearch.config.update({'discovery': {'seed_hosts': [grains.master]}}) %} + {% endif %} +{% endif %} + +{% set ESCONFIG = salt['pillar.get']('elasticsearch:config', default=ESCONFIG.elasticsearch.config, merge=True) %} diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml new file mode 100644 index 000000000..e119fb62d --- /dev/null +++ b/salt/elasticsearch/defaults.yaml @@ -0,0 +1,62 @@ +elasticsearch: + config: + node: + name: {{ grains.host }} + attr: + box_type: hot + cluster: + name: {{ grains.host }} + routing: + allocation: + disk: + threshold_enabled: true + watermark: + low: 95% + high: 98% + flood_stage: 98% + network: + host: 0.0.0.0 + path: + logs: /var/log/elasticsearch + action: + destructive_requires_name: true + transport: + bind_host: 0.0.0.0 + publish_host: {{ grains.host }} + publish_port: 9300 + xpack: + ml: + enabled: false + security: + enabled: true + authc: + anonymous: + authz_exception: true + roles: [] + username: _anonymous + transport: + ssl: + enabled: true + verification_mode: none + key: /usr/share/elasticsearch/config/elasticsearch.key + certificate: /usr/share/elasticsearch/config/elasticsearch.crt + certificate_authorities: + - /usr/share/elasticsearch/config/ca.crt + http: + ssl: + enabled: true + client_authentication: none + key: /usr/share/elasticsearch/config/elasticsearch.key + certificate: /usr/share/elasticsearch/config/elasticsearch.crt + certificate_authorities: + - /usr/share/elasticsearch/config/ca.crt + script: + max_compilations_rate: 20000/1m + indices: + query: + bool: + max_clause_count: 1500 + + + + diff --git a/salt/elasticsearch/files/elasticsearch.yaml.jinja b/salt/elasticsearch/files/elasticsearch.yaml.jinja new file mode 100644 index 000000000..caf13173f --- /dev/null +++ b/salt/elasticsearch/files/elasticsearch.yaml.jinja @@ -0,0 +1 @@ +{{ ESCONFIG | yaml(False) }} diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml deleted file mode 100644 index 518cd74e9..000000000 --- a/salt/elasticsearch/files/elasticsearch.yml +++ /dev/null @@ -1,72 +0,0 @@ -{%- set NODE_ROUTE_TYPE = salt['pillar.get']('elasticsearch:node_route_type', 'hot') %} -{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip') %} -{%- set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %} -{%- if TRUECLUSTER is sameas true %} - {%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:true_cluster_name') %} -{%- else %} - {%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername') %} -{%- endif %} -{%- set NODE_ROLES = salt['pillar.get']('elasticsearch:node_roles', ['data', 'ingest']) %} -cluster.name: "{{ ESCLUSTERNAME }}" -network.host: 0.0.0.0 -path.logs: /var/log/elasticsearch -action.destructive_requires_name: true -transport.bind_host: 0.0.0.0 -transport.publish_host: {{ grains.host }} -transport.publish_port: 9300 -cluster.routing.allocation.disk.threshold_enabled: true -cluster.routing.allocation.disk.watermark.low: 95% -cluster.routing.allocation.disk.watermark.high: 98% -cluster.routing.allocation.disk.watermark.flood_stage: 98% -xpack.ml.enabled: false -xpack.security.enabled: true -xpack.security.transport.ssl.enabled: true -xpack.security.transport.ssl.verification_mode: none -xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key -xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt -xpack.security.transport.ssl.certificate_authorities: [ "/usr/share/elasticsearch/config/ca.crt" ] -xpack.security.http.ssl.enabled: true -xpack.security.http.ssl.client_authentication: none -xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key -xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt -xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt -{% if not salt['pillar.get']('elasticsearch:auth:enabled', False) %} -xpack.security.authc: - anonymous: - username: anonymous_user - roles: superuser - authz_exception: true -{% endif %} -node.name: {{ grains.host }} -script.max_compilations_rate: 20000/1m -{%- if TRUECLUSTER is sameas true %} - {%- if grains.role == 'so-manager' %} - {%- if salt['pillar.get']('nodestab', {}) %} -node.roles: [ master, data, remote_cluster_client ] -discovery.seed_hosts: - - {{ grains.master }} - {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} - - {{ SN.split('_')|first }} - {%- endfor %} - {%- endif %} - {%- elif grains.role == 'so-managersearch' %} - {%- if salt['pillar.get']('nodestab', {}) %} -node.roles: [ master, data, remote_cluster_client ] -discovery.seed_hosts: - - {{ grains.master }} - {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} - - {{ SN.split('_')|first }} - {%- endfor %} - {%- endif %} -node.attr.box_type: {{ NODE_ROUTE_TYPE }} - {%- else %} -node.roles: {{ NODE_ROLES }} -node.attr.box_type: {{ NODE_ROUTE_TYPE }} -discovery.seed_hosts: - - {{ grains.master }} - {%- endif %} -{%- endif %} -{%- if TRUECLUSTER is sameas false %} -node.attr.box_type: {{ NODE_ROUTE_TYPE }} -{%- endif %} -indices.query.bool.max_clause_count: 1500 diff --git a/salt/elasticsearch/files/ingest/import.wel b/salt/elasticsearch/files/ingest/import.wel index 5a04324b7..e75098f8f 100644 --- a/salt/elasticsearch/files/ingest/import.wel +++ b/salt/elasticsearch/files/ingest/import.wel @@ -4,6 +4,7 @@ { "remove": { "field": ["event.created","timestamp", "winlog.event_data.UtcTime", "event_record_id"], "ignore_failure": true } }, { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, { "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } }, + { "dissect": { "field": "log.file.name", "pattern" : "/tmp/%{import.id}.evtx" } }, { "pipeline": { "name": "common" } } ] } \ No newline at end of file diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 4045fa10f..9f475c2c3 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -15,6 +15,8 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states %} + + {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} @@ -35,7 +37,9 @@ {% endif %} {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} +{% set ROLES = salt['pillar.get']('elasticsearch:roles', {}) %} {% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %} +{% from 'elasticsearch/config.map.jinja' import ESCONFIG with context %} vm.max_map_count: @@ -119,6 +123,13 @@ estemplatedir: - group: 939 - makedirs: True +esrolesdir: + file.directory: + - name: /opt/so/conf/elasticsearch/roles + - user: 930 + - group: 939 + - makedirs: True + esingestconf: file.recurse: - name: /opt/so/conf/elasticsearch/ingest @@ -137,9 +148,11 @@ eslog4jfile: esyml: file.managed: - name: /opt/so/conf/elasticsearch/elasticsearch.yml - - source: salt://elasticsearch/files/elasticsearch.yml + - source: salt://elasticsearch/files/elasticsearch.yaml.jinja - user: 930 - group: 939 + - defaults: + ESCONFIG: {{ ESCONFIG }} - template: jinja #sync templates to /opt/so/conf/elasticsearch/templates @@ -157,6 +170,15 @@ es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}: - group: 939 {% endfor %} +esroles: + file.recurse: + - source: salt://elasticsearch/roles/ + - name: /opt/so/conf/elasticsearch/roles/ + - clean: True + - template: jinja + - user: 930 + - group: 939 + nsmesdir: file.directory: - name: /nsm/elasticsearch @@ -193,7 +215,7 @@ auth_users_inode: require: - file: auth_users cmd.run: - - name: cat /opt/so/conf/elasticsearch/users.tmp > /opt/so/conf/elasticsearch/users && chown 930:930 /opt/so/conf/elasticsearch/users && chmod 600 /opt/so/conf/elasticsearch/users + - name: cat /opt/so/conf/elasticsearch/users.tmp > /opt/so/conf/elasticsearch/users && chown 930:939 /opt/so/conf/elasticsearch/users && chmod 660 /opt/so/conf/elasticsearch/users - onchanges: - file: /opt/so/conf/elasticsearch/users.tmp @@ -201,7 +223,7 @@ auth_users_roles_inode: require: - file: auth_users_roles cmd.run: - - name: cat /opt/so/conf/elasticsearch/users_roles.tmp > /opt/so/conf/elasticsearch/users_roles && chown 930:930 /opt/so/conf/elasticsearch/users_roles && chmod 600 /opt/so/conf/elasticsearch/users_roles + - name: cat /opt/so/conf/elasticsearch/users_roles.tmp > /opt/so/conf/elasticsearch/users_roles && chown 930:939 /opt/so/conf/elasticsearch/users_roles && chmod 660 /opt/so/conf/elasticsearch/users_roles - onchanges: - file: /opt/so/conf/elasticsearch/users_roles.tmp @@ -283,7 +305,7 @@ so-elasticsearch-pipelines: - file: esyml - file: so-elasticsearch-pipelines-file -{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import'] and TEMPLATES %} +{% if TEMPLATES %} so-elasticsearch-templates: cmd.run: - name: /usr/sbin/so-elasticsearch-templates-load @@ -291,6 +313,12 @@ so-elasticsearch-templates: - template: jinja {% endif %} +so-elasticsearch-roles-load: + cmd.run: + - name: /usr/sbin/so-elasticsearch-roles-load + - cwd: /opt/so + - template: jinja + {% endif %} {# if grains['role'] != 'so-helix' #} {% else %} diff --git a/salt/elasticsearch/roles/analyst.json b/salt/elasticsearch/roles/analyst.json new file mode 100644 index 000000000..fc788e2f1 --- /dev/null +++ b/salt/elasticsearch/roles/analyst.json @@ -0,0 +1,63 @@ +{ + "cluster": [ + "cancel_task", + "create_snapshot", + "monitor", + "monitor_data_frame_transforms", + "monitor_ml", + "monitor_rollup", + "monitor_snapshot", + "monitor_text_structure", + "monitor_transform", + "monitor_watcher", + "read_ccr", + "read_ilm", + "read_pipeline", + "read_slm" + ], + "indices": [ + { + "names": [ + "so-*" + ], + "privileges": [ + "index", + "maintenance", + "monitor", + "read", + "read_cross_cluster", + "view_index_metadata" + ] + } + ], + "applications": [ + { + "application": "kibana-.kibana", + "privileges": [ + "feature_discover.all", + "feature_dashboard.all", + "feature_canvas.all", + "feature_maps.all", + "feature_ml.all", + "feature_logs.read", + "feature_visualize.all", + "feature_infrastructure.read", + "feature_apm.read", + "feature_uptime.read", + "feature_siem.read", + "feature_dev_tools.read", + "feature_advancedSettings.read", + "feature_indexPatterns.read", + "feature_savedObjectsManagement.read", + "feature_savedObjectsTagging.read", + "feature_fleet.all", + "feature_actions.read", + "feature_stackAlerts.read" + ], + "resources": [ + "*" + ] + } + ], + "run_as": [] +} \ No newline at end of file diff --git a/salt/elasticsearch/roles/auditor.json b/salt/elasticsearch/roles/auditor.json new file mode 100644 index 000000000..26df1207c --- /dev/null +++ b/salt/elasticsearch/roles/auditor.json @@ -0,0 +1,59 @@ +{ + "cluster": [ + "monitor", + "monitor_data_frame_transforms", + "monitor_ml", + "monitor_rollup", + "monitor_snapshot", + "monitor_text_structure", + "monitor_transform", + "monitor_watcher", + "read_ccr", + "read_ilm", + "read_pipeline", + "read_slm" + ], + "indices": [ + { + "names": [ + "so-*" + ], + "privileges": [ + "read", + "read_cross_cluster", + "monitor", + "view_index_metadata" + ] + } + ], + "applications": [ + { + "application": "kibana-.kibana", + "privileges": [ + "feature_discover.read", + "feature_dashboard.read", + "feature_canvas.read", + "feature_maps.read", + "feature_ml.read", + "feature_logs.read", + "feature_visualize.read", + "feature_infrastructure.read", + "feature_apm.read", + "feature_uptime.read", + "feature_siem.read", + "feature_dev_tools.read", + "feature_advancedSettings.read", + "feature_indexPatterns.read", + "feature_savedObjectsManagement.read", + "feature_savedObjectsTagging.read", + "feature_fleet.read", + "feature_actions.read", + "feature_stackAlerts.read" + ], + "resources": [ + "*" + ] + } + ], + "run_as": [] +} \ No newline at end of file diff --git a/salt/elasticsearch/roles/limited-analyst.json b/salt/elasticsearch/roles/limited-analyst.json new file mode 100644 index 000000000..2b3797dbc --- /dev/null +++ b/salt/elasticsearch/roles/limited-analyst.json @@ -0,0 +1,49 @@ +{ + "cluster": [ + ], + "indices": [ + { + "names": [ + "so-*" + ], + "privileges": [ + "index", + "maintenance", + "monitor", + "read", + "read_cross_cluster", + "view_index_metadata" + ] + } + ], + "applications": [ + { + "application": "kibana-.kibana", + "privileges": [ + "feature_discover.read", + "feature_dashboard.read", + "feature_canvas.read", + "feature_maps.read", + "feature_ml.read", + "feature_logs.read", + "feature_visualize.read", + "feature_infrastructure.read", + "feature_apm.read", + "feature_uptime.read", + "feature_siem.read", + "feature_dev_tools.read", + "feature_advancedSettings.read", + "feature_indexPatterns.read", + "feature_savedObjectsManagement.read", + "feature_savedObjectsTagging.read", + "feature_fleet.read", + "feature_actions.read", + "feature_stackAlerts.read" + ], + "resources": [ + "*" + ] + } + ], + "run_as": [] +} \ No newline at end of file diff --git a/salt/elasticsearch/roles/limited-auditor.json b/salt/elasticsearch/roles/limited-auditor.json new file mode 100644 index 000000000..ecab5016a --- /dev/null +++ b/salt/elasticsearch/roles/limited-auditor.json @@ -0,0 +1,47 @@ +{ + "cluster": [ + ], + "indices": [ + { + "names": [ + "so-*" + ], + "privileges": [ + "read", + "read_cross_cluster", + "monitor", + "view_index_metadata" + ] + } + ], + "applications": [ + { + "application": "kibana-.kibana", + "privileges": [ + "feature_discover.read", + "feature_dashboard.read", + "feature_canvas.read", + "feature_maps.read", + "feature_ml.read", + "feature_logs.read", + "feature_visualize.read", + "feature_infrastructure.read", + "feature_apm.read", + "feature_uptime.read", + "feature_siem.read", + "feature_dev_tools.read", + "feature_advancedSettings.read", + "feature_indexPatterns.read", + "feature_savedObjectsManagement.read", + "feature_savedObjectsTagging.read", + "feature_fleet.read", + "feature_actions.read", + "feature_stackAlerts.read" + ], + "resources": [ + "*" + ] + } + ], + "run_as": [] +} \ No newline at end of file diff --git a/salt/elasticsearch/templates/so/so-aws-template.json.jinja b/salt/elasticsearch/templates/so/so-aws-template.json.jinja new file mode 100644 index 000000000..19b23dfba --- /dev/null +++ b/salt/elasticsearch/templates/so/so-aws-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-aws:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-aws:refresh', '30s') %} +{ + "index_patterns": ["so-aws-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-azure-template.json.jinja b/salt/elasticsearch/templates/so/so-azure-template.json.jinja new file mode 100644 index 000000000..51a266479 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-azure-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-azure:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-azure:refresh', '30s') %} +{ + "index_patterns": ["so-azure-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-barracuda-template.json.jinja b/salt/elasticsearch/templates/so/so-barracuda-template.json.jinja new file mode 100644 index 000000000..66967d6d1 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-barracuda-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:refresh', '30s') %} +{ + "index_patterns": ["so-barracuda-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-bluecoat-template.json.jinja b/salt/elasticsearch/templates/so/so-bluecoat-template.json.jinja new file mode 100644 index 000000000..b1714183e --- /dev/null +++ b/salt/elasticsearch/templates/so/so-bluecoat-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:refresh', '30s') %} +{ + "index_patterns": ["so-bluecoat-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-cef-template.json.jinja b/salt/elasticsearch/templates/so/so-cef-template.json.jinja new file mode 100644 index 000000000..0081d42e1 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-cef-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-cef:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-cef:refresh', '30s') %} +{ + "index_patterns": ["so-cef-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-checkpoint-template.json.jinja b/salt/elasticsearch/templates/so/so-checkpoint-template.json.jinja new file mode 100644 index 000000000..5d41946cf --- /dev/null +++ b/salt/elasticsearch/templates/so/so-checkpoint-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:refresh', '30s') %} +{ + "index_patterns": ["so-checkpoint-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-cisco-template.json.jinja b/salt/elasticsearch/templates/so/so-cisco-template.json.jinja new file mode 100644 index 000000000..e6e6d14d0 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-cisco-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-cisco:refresh', '30s') %} +{ + "index_patterns": ["so-cisco-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-common-template.json b/salt/elasticsearch/templates/so/so-common-template.json.jinja similarity index 98% rename from salt/elasticsearch/templates/so/so-common-template.json rename to salt/elasticsearch/templates/so/so-common-template.json.jinja index 777bf3f53..4394ebb65 100644 --- a/salt/elasticsearch/templates/so/so-common-template.json +++ b/salt/elasticsearch/templates/so/so-common-template.json.jinja @@ -1,3 +1,4 @@ +{%- set INDEX_SORTING = salt['pillar.get']('elasticsearch:index_sorting', True) %} { "index_patterns": ["so-*"], "version":50001, @@ -8,6 +9,10 @@ "index.refresh_interval":"30s", "index.routing.allocation.require.box_type":"hot", "index.mapping.total_fields.limit": "1500", +{%- if INDEX_SORTING is sameas true %} + "index.sort.field": "@timestamp", + "index.sort.order": "desc", +{%- endif %} "analysis": { "analyzer": { "es_security_analyzer": { diff --git a/salt/elasticsearch/templates/so/so-cyberark-template.json.jinja b/salt/elasticsearch/templates/so/so-cyberark-template.json.jinja new file mode 100644 index 000000000..1647d600f --- /dev/null +++ b/salt/elasticsearch/templates/so/so-cyberark-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:refresh', '30s') %} +{ + "index_patterns": ["so-cyberark-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-cylance-template.json.jinja b/salt/elasticsearch/templates/so/so-cylance-template.json.jinja new file mode 100644 index 000000000..4ba7d0316 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-cylance-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-cylance:refresh', '30s') %} +{ + "index_patterns": ["so-cylance-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-elasticsearch-template.json.jinja b/salt/elasticsearch/templates/so/so-elasticsearch-template.json.jinja new file mode 100644 index 000000000..16aaaec13 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-elasticsearch-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:refresh', '30s') %} +{ + "index_patterns": ["so-elasticsearch-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-f5-template.json.jinja b/salt/elasticsearch/templates/so/so-f5-template.json.jinja new file mode 100644 index 000000000..682a37c59 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-f5-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-f5:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-f5:refresh', '30s') %} +{ + "index_patterns": ["so-f5-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-fortinet-template.json.jinja b/salt/elasticsearch/templates/so/so-fortinet-template.json.jinja new file mode 100644 index 000000000..616607f52 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-fortinet-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-zeek:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-zeek:refresh', '30s') %} +{ + "index_patterns": ["so-zeek-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-gcp-template.json.jinja b/salt/elasticsearch/templates/so/so-gcp-template.json.jinja new file mode 100644 index 000000000..4f1db4f20 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-gcp-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-gcp:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-gcp:refresh', '30s') %} +{ + "index_patterns": ["so-gcp-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-google_workspace-template.json.jinja b/salt/elasticsearch/templates/so/so-google_workspace-template.json.jinja new file mode 100644 index 000000000..5ae26780a --- /dev/null +++ b/salt/elasticsearch/templates/so/so-google_workspace-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:refresh', '30s') %} +{ + "index_patterns": ["so-google_workspace-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-imperva-template.json.jinja b/salt/elasticsearch/templates/so/so-imperva-template.json.jinja new file mode 100644 index 000000000..1f574f33a --- /dev/null +++ b/salt/elasticsearch/templates/so/so-imperva-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-imperva:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-imperva:refresh', '30s') %} +{ + "index_patterns": ["so-imperva-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-infoblox-template.json.jinja b/salt/elasticsearch/templates/so/so-infoblox-template.json.jinja new file mode 100644 index 000000000..de613de7f --- /dev/null +++ b/salt/elasticsearch/templates/so/so-infoblox-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:refresh', '30s') %} +{ + "index_patterns": ["so-infoblox-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-juniper-template.json.jinja b/salt/elasticsearch/templates/so/so-juniper-template.json.jinja new file mode 100644 index 000000000..f637271a9 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-juniper-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-juniper:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-juniper:refresh', '30s') %} +{ + "index_patterns": ["so-juniper-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-kibana-template.json.jinja b/salt/elasticsearch/templates/so/so-kibana-template.json.jinja new file mode 100644 index 000000000..fe2004b0e --- /dev/null +++ b/salt/elasticsearch/templates/so/so-kibana-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-kibana:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-kibana:refresh', '30s') %} +{ + "index_patterns": ["so-kibana-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-logstash-template.json.jinja b/salt/elasticsearch/templates/so/so-logstash-template.json.jinja new file mode 100644 index 000000000..2cf0aba42 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-logstash-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-logstash:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-logstash:refresh', '30s') %} +{ + "index_patterns": ["so-logstash-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-microsoft-template.json.jinja b/salt/elasticsearch/templates/so/so-microsoft-template.json.jinja new file mode 100644 index 000000000..3493ccbb2 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-microsoft-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:refresh', '30s') %} +{ + "index_patterns": ["so-microsoft-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-misp-template.json.jinja b/salt/elasticsearch/templates/so/so-misp-template.json.jinja new file mode 100644 index 000000000..67af1efde --- /dev/null +++ b/salt/elasticsearch/templates/so/so-misp-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-misp:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-misp:refresh', '30s') %} +{ + "index_patterns": ["so-misp-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-netflow-template.json.jinja b/salt/elasticsearch/templates/so/so-netflow-template.json.jinja new file mode 100644 index 000000000..62c0972bf --- /dev/null +++ b/salt/elasticsearch/templates/so/so-netflow-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-netflow:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-netflow:refresh', '30s') %} +{ + "index_patterns": ["so-netflow-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-netscout-template.json.jinja b/salt/elasticsearch/templates/so/so-netscout-template.json.jinja new file mode 100644 index 000000000..1dfe336d9 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-netscout-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-netscout:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-netscout:refresh', '30s') %} +{ + "index_patterns": ["so-netscout-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-o365-template.json.jinja b/salt/elasticsearch/templates/so/so-o365-template.json.jinja new file mode 100644 index 000000000..c1f4826f4 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-o365-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-o365:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-o365:refresh', '30s') %} +{ + "index_patterns": ["so-o365-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-okta-template.json.jinja b/salt/elasticsearch/templates/so/so-okta-template.json.jinja new file mode 100644 index 000000000..a4f2df44e --- /dev/null +++ b/salt/elasticsearch/templates/so/so-okta-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-okta:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-okta:refresh', '30s') %} +{ + "index_patterns": ["so-okta-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-proofpoint-template.json.jinja b/salt/elasticsearch/templates/so/so-proofpoint-template.json.jinja new file mode 100644 index 000000000..d7b0ecbe2 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-proofpoint-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-proofpoint:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-proofpoint:refresh', '30s') %} +{ + "index_patterns": ["so-proofpoint-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-radware-template.json.jinja b/salt/elasticsearch/templates/so/so-radware-template.json.jinja new file mode 100644 index 000000000..4efef6a4f --- /dev/null +++ b/salt/elasticsearch/templates/so/so-radware-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-radware:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-radware:refresh', '30s') %} +{ + "index_patterns": ["so-radware-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-redis-template.json.jinja b/salt/elasticsearch/templates/so/so-redis-template.json.jinja new file mode 100644 index 000000000..616607f52 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-redis-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-zeek:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-zeek:refresh', '30s') %} +{ + "index_patterns": ["so-zeek-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-snort-template.json.jinja b/salt/elasticsearch/templates/so/so-snort-template.json.jinja new file mode 100644 index 000000000..325f86bde --- /dev/null +++ b/salt/elasticsearch/templates/so/so-snort-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-snort:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-snort:refresh', '30s') %} +{ + "index_patterns": ["so-snort-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-snyk-template.json.jinja b/salt/elasticsearch/templates/so/so-snyk-template.json.jinja new file mode 100644 index 000000000..0c2d291cc --- /dev/null +++ b/salt/elasticsearch/templates/so/so-snyk-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-snyk:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-snyk:refresh', '30s') %} +{ + "index_patterns": ["so-snyk-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-sonicwall-template.json.jinja b/salt/elasticsearch/templates/so/so-sonicwall-template.json.jinja new file mode 100644 index 000000000..b912e4d66 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-sonicwall-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-sonicwall:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-sonicwall:refresh', '30s') %} +{ + "index_patterns": ["so-sonicwall-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-sophos-template.json.jinja b/salt/elasticsearch/templates/so/so-sophos-template.json.jinja new file mode 100644 index 000000000..689e19999 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-sophos-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-sophos:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-sophos:refresh', '30s') %} +{ + "index_patterns": ["so-sophos-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-squid-template.json.jinja b/salt/elasticsearch/templates/so/so-squid-template.json.jinja new file mode 100644 index 000000000..9398b8a99 --- /dev/null +++ b/salt/elasticsearch/templates/so/so-squid-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-squid:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-squid:refresh', '30s') %} +{ + "index_patterns": ["so-squid-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-tomcat-template.json.jinja b/salt/elasticsearch/templates/so/so-tomcat-template.json.jinja new file mode 100644 index 000000000..797e71bcf --- /dev/null +++ b/salt/elasticsearch/templates/so/so-tomcat-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-tomcat:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-tomcat:refresh', '30s') %} +{ + "index_patterns": ["so-tomcat-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/elasticsearch/templates/so/so-zscaler-template.json.jinja b/salt/elasticsearch/templates/so/so-zscaler-template.json.jinja new file mode 100644 index 000000000..01d1cab2d --- /dev/null +++ b/salt/elasticsearch/templates/so/so-zscaler-template.json.jinja @@ -0,0 +1,13 @@ +{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-zscaler:shards', 1) %} +{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %} +{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-zscaler:refresh', '30s') %} +{ + "index_patterns": ["so-zscaler-*"], + "version":50001, + "order":11, + "settings":{ + "number_of_replicas":{{ REPLICAS }}, + "number_of_shards":{{ SHARDS }}, + "index.refresh_interval":"{{ REFRESH }}" + } +} diff --git a/salt/kibana/etc/kibana.yml b/salt/kibana/etc/kibana.yml index 6fcafe68f..2819f301d 100644 --- a/salt/kibana/etc/kibana.yml +++ b/salt/kibana/etc/kibana.yml @@ -3,9 +3,11 @@ {%- set ES = salt['pillar.get']('manager:mainip', '') -%} {%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %} {%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} +{%- set URLBASE = salt['pillar.get']('global:url_base') %} server.name: kibana server.host: "0" server.basePath: /kibana +server.publicBaseUrl: https://{{ URLBASE }}/kibana elasticsearch.hosts: [ "https://{{ ES }}:9200" ] elasticsearch.ssl.verificationMode: none #kibana.index: ".kibana" diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 17b1ad9e0..1d6577e5f 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -124,6 +124,7 @@ syncesusers: - creates: - /opt/so/saltstack/local/salt/elasticsearch/files/users - /opt/so/saltstack/local/salt/elasticsearch/files/users_roles + - /opt/so/conf/soc/soc_users_roles - show_changes: False {% else %} diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index b85488b7f..f0308b868 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -167,6 +167,7 @@ http { proxy_pass http://{{ manager_ip }}:9822; proxy_read_timeout 90; proxy_connect_timeout 90; + proxy_set_header x-user-id ""; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; @@ -378,6 +379,7 @@ http { proxy_pass http://{{ manager_ip }}:9822/; proxy_read_timeout 90; proxy_connect_timeout 90; + proxy_set_header x-user-id ""; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; diff --git a/salt/pcap/files/config b/salt/pcap/files/config index 048775ef7..900234bc1 100644 --- a/salt/pcap/files/config +++ b/salt/pcap/files/config @@ -1,11 +1,13 @@ {%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %} {%- set diskfreepercentage = salt['pillar.get']('steno:diskfreepercentage', 10) %} +{%- set maxfiles = salt['pillar.get']('steno:maxfiles', 30000) %} + { "Threads": [ { "PacketsDirectory": "/nsm/pcap" , "IndexDirectory": "/nsm/pcapindex" - , "MaxDirectoryFiles": 30000 + , "MaxDirectoryFiles": {{ maxfiles }} , "DiskFreePercentage": {{ diskfreepercentage }} } ] @@ -15,4 +17,4 @@ , "Host": "127.0.0.1" , "Flags": ["-v", "--uid=stenographer", "--gid=stenographer"{{ BPF_COMPILED }}] , "CertPath": "/etc/stenographer/certs" -} \ No newline at end of file +} diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls index a64b9d90a..641300fdf 100644 --- a/salt/pcap/init.sls +++ b/salt/pcap/init.sls @@ -15,13 +15,14 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states %} +{% from "pcap/map.jinja" import STENOOPTIONS with context %} + {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} {% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %} {% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %} {% set BPF_COMPILED = "" %} -{% from "pcap/map.jinja" import STENOOPTIONS with context %} # PCAP Section diff --git a/salt/soc/files/kratos/schema.json b/salt/soc/files/kratos/schema.json index 986086936..19ee2197c 100644 --- a/salt/soc/files/kratos/schema.json +++ b/salt/soc/files/kratos/schema.json @@ -31,10 +31,6 @@ "type": "string", "title": "Last Name" }, - "role": { - "type": "string", - "title": "Role" - }, "status": { "type": "string", "title": "Status" diff --git a/salt/soc/files/soc/custom_roles b/salt/soc/files/soc/custom_roles new file mode 100644 index 000000000..b95b94da4 --- /dev/null +++ b/salt/soc/files/soc/custom_roles @@ -0,0 +1,23 @@ +# Define custom business role mappings, or remove mappings that come with +# the default SOC deployment. +# +# IMPORTANT: This file should be copied from the salt/default tree into +# the salt/local tree (preserving the same directory structure). +# Failure to do this will result in the customizations being +# overwritten on future upgrades. +# +# Syntax => prebuiltRoleX: customRoleY: op +# Explanation => roleY and roleZ are adjusted permissions of roleX, op is: +# + add the new permissions/role mappings (default) +# - remove existing "explicit" prebuilt permissions. This +# does not work with implictly inherited permissions. +# +# In the example below, we will define two new roles for segregating +# analysts into two regions. Then we will remove the ability for all +# analysts to see the roles of other analysts. (Seperately we will need to +# define these two new roles in Elasticsearch so that each analyst region +# can only see data from their specific region's indices, but that is out +# of scope from this file.) +# +# analyst: westcoast_analyst, eastcoast_analyst +# roles/read: user-monitor:- \ No newline at end of file diff --git a/salt/soc/files/soc/hunt.eventfields.json b/salt/soc/files/soc/hunt.eventfields.json index e8af03a5c..7964a360b 100644 --- a/salt/soc/files/soc/hunt.eventfields.json +++ b/salt/soc/files/soc/hunt.eventfields.json @@ -30,7 +30,7 @@ "::software": ["soc_timestamp", "source.ip", "software.name", "software.type" ], "::ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssh.version", "ssh.hassh_version", "ssh.direction", "ssh.client", "ssh.server", "log.id.uid" ], "::ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssl.server_name", "ssl.certificate.subject", "ssl.validation_status", "ssl.version", "log.id.uid" ], - "::syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ], + ":zeek:syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ], "::tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ], "::weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "weird.name", "log.id.uid" ], "::x509": ["soc_timestamp", "x509.certificate.subject", "x509.certificate.key.type", "x509.certificate.key.length", "x509.certificate.issuer", "log.id.fuid" ], @@ -40,5 +40,13 @@ ":strelka:file": ["soc_timestamp", "file.name", "file.size", "hash.md5", "file.source", "file.mime_type", "log.id.fuid" ], ":suricata:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.name", "rule.category", "event.severity_label", "log.id.uid", "network.community_id" ], ":sysmon:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ], - ":windows_eventlog:": ["soc_timestamp", "user.name" ] + ":windows_eventlog:": ["soc_timestamp", "user.name" ], + ":elasticsearch:": ["soc_timestamp", "agent.name", "message", "log.level", "metadata.version", "metadata.pipeline", "event.dataset" ], + ":kibana:": ["soc_timestamp", "host.name", "message", "kibana.log.meta.req.headers.x-real-ip", "event.dataset" ], + "::rootcheck": ["soc_timestamp", "host.name", "metadata.ip_address", "log.full", "event.dataset", "event.module" ], + "::ossec": ["soc_timestamp", "host.name", "metadata.ip_address", "log.full", "event.dataset", "event.module" ], + "::syscollector": ["soc_timestamp", "host.name", "metadata.ip_address", "wazuh.data.type", "log.full", "event.dataset", "event.module" ], + ":syslog:syslog": ["soc_timestamp", "host.name", "metadata.ip_address", "real_message", "syslog.priority", "syslog.application" ], + ":aws:": ["soc_timestamp", "aws.cloudtrail.event_category", "aws.cloudtrail.event_type", "event.provider", "event.action", "event.outcome", "cloud.region", "user.name", "source.ip", "source.geo.region_iso_code" ], + ":squid:": ["soc_timestamp", "url.original", "destination.ip", "destination.geo.country_iso_code", "user.name", "source.ip" ] } diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index fc6d5f28d..e33ea406b 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -3,7 +3,8 @@ {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %} {%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %} {%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') %} -{%- set OSQUERY = salt['pillar.get']('manager:osquery', '0') %} +{% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %} +{% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %} {%- set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} {%- set API_TIMEOUT = salt['pillar.get']('sensoroni:api_timeout_ms', 0) %} @@ -85,6 +86,16 @@ "statickeyauth": { "anonymousCidr": "{{ DNET }}/24", "apiKey": "{{ SENSORONIKEY }}" + }, + "staticrbac": { + "roleFiles": [ + "rbac/permissions", + "rbac/roles", + "rbac/custom_roles" + ], + "userFiles": [ + "rbac/users_roles" + ] } }, "client": { @@ -108,7 +119,7 @@ {%- if THEHIVE == 0 %} "toolTheHive", {%- endif %} - {%- if OSQUERY == 0 %} + {%- if not FLEETMANAGER and not FLEETNODE %} "toolFleet", {%- endif %} {%- if GRAFANA == 0 %} diff --git a/salt/soc/init.sls b/salt/soc/init.sls index b8cdb09ba..69cc54c82 100644 --- a/salt/soc/init.sls +++ b/salt/soc/init.sls @@ -62,6 +62,15 @@ soccustom: - mode: 600 - template: jinja +soccustomroles: + file.managed: + - name: /opt/so/conf/soc/custom_roles + - source: salt://soc/files/soc/custom_roles + - user: 939 + - group: 939 + - mode: 600 + - template: jinja + # we dont want this added too early in setup, so we add the onlyif to verify 'startup_states: highstate' # is in the minion config. That line is added before the final highstate during setup sosyncusers: @@ -81,6 +90,8 @@ so-soc: - /opt/so/conf/soc/motd.md:/opt/sensoroni/html/motd.md:ro - /opt/so/conf/soc/banner.md:/opt/sensoroni/html/login/banner.md:ro - /opt/so/conf/soc/custom.js:/opt/sensoroni/html/js/custom.js:ro + - /opt/so/conf/soc/custom_roles:/opt/sensoroni/rbac/custom_roles:ro + - /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw - /opt/so/log/soc/:/opt/sensoroni/logs/:rw {%- if salt['pillar.get']('nodestab', {}) %} - extra_hosts: diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls index c2c8dc1ac..724e5a617 100644 --- a/salt/soctopus/init.sls +++ b/salt/soctopus/init.sls @@ -10,7 +10,7 @@ soctopusdir: file.directory: - - name: /opt/so/conf/soctopus + - name: /opt/so/conf/soctopus/sigma-import - user: 939 - group: 939 - makedirs: True @@ -63,6 +63,7 @@ so-soctopus: - /opt/so/log/soctopus/:/var/log/SOCtopus/:rw - /opt/so/rules/elastalert/playbook:/etc/playbook-rules:rw - /opt/so/conf/navigator/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw + - /opt/so/conf/soctopus/sigma-import/:/SOCtopus/sigma-import/:rw {% if ISAIRGAP is sameas true %} - /nsm/repo/rules/sigma:/soctopus/sigma {% endif %} diff --git a/salt/strelka/defaults.yaml b/salt/strelka/defaults.yaml new file mode 100644 index 000000000..2a3805283 --- /dev/null +++ b/salt/strelka/defaults.yaml @@ -0,0 +1,9 @@ +strelka: + ignore: + - generic_anomalies.yar + - general_cloaking.yar + - thor_inverse_matches.yar + - yara_mixed_ext_vars.yar + - gen_susp_js_obfuscatorio.yar + - apt_flame2_orchestrator.yar + - apt_tetris.yar diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls index d90484fed..d66b2bef2 100644 --- a/salt/strelka/init.sls +++ b/salt/strelka/init.sls @@ -21,6 +21,8 @@ {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %} {% set ENGINE = salt['pillar.get']('global:mdengine', '') %} +{% import_yaml 'strelka/defaults.yaml' as strelka_config with context %} +{% set IGNORELIST = salt['pillar.get']('strelka:ignore', strelka_config.strelka.ignore, merge=True, merge_nested_lists=True) %} # Strelka config strelkaconfdir: @@ -54,6 +56,17 @@ strelkarules: - source: salt://strelka/rules - user: 939 - group: 939 + - clean: True + - exclude_pat: + {% for IGNOREDRULE in IGNORELIST %} + - {{ IGNOREDRULE }} + {% endfor %} + + {% for IGNOREDRULE in IGNORELIST %} +remove_rule_{{ IGNOREDRULE }}: + file.absent: + - name: /opt/so/conf/strelka/rules/signature-base/{{ IGNOREDRULE }} + {% endfor %} {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import'] %} strelkarepos: diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index 4cbda4bb4..73c4d2395 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -15,6 +15,8 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states and grains.role not in ['so-manager', 'so-managersearch'] %} +{% from "suricata/map.jinja" import SURICATAOPTIONS with context %} + {% set interface = salt['pillar.get']('sensor:interface', 'bond0') %} {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} @@ -136,9 +138,10 @@ suribpf: {% endif %} so-suricata: - docker_container.running: + docker_container.{{ SURICATAOPTIONS.status }}: + {% if SURICATAOPTIONS.status == 'running' %} - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} - - start: {{ START }} + - start: {{ SURICATAOPTIONS.start }} - privileged: True - environment: - INTERFACE={{ interface }} @@ -157,18 +160,27 @@ so-suricata: - file: /opt/so/conf/suricata/rules/ - file: /opt/so/conf/suricata/bpf + {% else %} {# if Suricata isn't enabled, then stop and remove the container #} + - force: True + {% endif %} + append_so-suricata_so-status.conf: file.append: - name: /opt/so/conf/so-status/so-status.conf - text: so-suricata - unless: grep -q so-suricata /opt/so/conf/so-status/so-status.conf -{% if grains.role == 'so-import' %} -disable_so-suricata_so-status.conf: + {% if not SURICATAOPTIONS.start %} +so-suricata_so-status.disabled: file.comment: - name: /opt/so/conf/so-status/so-status.conf - regex: ^so-suricata$ -{% endif %} + {% else %} +delete_so-suricata_so-status.disabled: + file.uncomment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-suricata$ + {% endif %} /usr/local/bin/surirotate: cron.absent: diff --git a/salt/suricata/map.jinja b/salt/suricata/map.jinja index ad4d70e80..2ffd5ad08 100644 --- a/salt/suricata/map.jinja +++ b/salt/suricata/map.jinja @@ -1,6 +1,15 @@ -# don't start the docker container if it is an import node -{% if grains.id.split('_')|last == 'import' %} - {% set START = False %} +{% set SURICATAOPTIONS = {} %} +{% set ENABLED = salt['pillar.get']('suricata:enabled', 'True') %} + +# don't start the docker container if it is an import node or disabled via pillar +{% if grains.id.split('_')|last == 'import' or ENABLED is sameas false %} + {% do SURICATAOPTIONS.update({'start': False}) %} {% else %} - {% set START = True %} + {% do SURICATAOPTIONS.update({'start': True}) %} +{% endif %} + +{% if ENABLED is sameas false %} + {% do SURICATAOPTIONS.update({'status': 'absent'}) %} +{% else %} + {% do SURICATAOPTIONS.update({'status': 'running'}) %} {% endif %} \ No newline at end of file diff --git a/salt/top.sls b/salt/top.sls index b38d315d8..e2c980e1d 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -189,6 +189,7 @@ base: {%- if KIBANA %} - kibana {%- endif %} + - curator {%- if ELASTALERT %} - elastalert {%- endif %} diff --git a/salt/zeek/init.sls b/salt/zeek/init.sls index ce0d6dccd..5f5adfaa6 100644 --- a/salt/zeek/init.sls +++ b/salt/zeek/init.sls @@ -1,7 +1,21 @@ +# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls in allowed_states %} -{% from "zeek/map.jinja" import START with context %} +{% from "zeek/map.jinja" import ZEEKOPTIONS with context %} {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} @@ -179,9 +193,10 @@ localzeeksync: LOCAL: {{ ZEEK.local | tojson }} so-zeek: - docker_container.running: + docker_container.{{ ZEEKOPTIONS.status }}: + {% if ZEEKOPTIONS.status == 'running' %} - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-zeek:{{ VERSION }} - - start: {{ START }} + - start: {{ ZEEKOPTIONS.start }} - privileged: True - ulimits: - core=0 @@ -204,6 +219,9 @@ so-zeek: - file: /opt/so/conf/zeek/zeekctl.cfg - file: /opt/so/conf/zeek/policy - file: /opt/so/conf/zeek/bpf + {% else %} {# if Zeek isn't enabled, then stop and remove the container #} + - force: True + {% endif %} append_so-zeek_so-status.conf: file.append: @@ -211,12 +229,17 @@ append_so-zeek_so-status.conf: - text: so-zeek - unless: grep -q so-zeek /opt/so/conf/so-status/so-status.conf -{% if grains.role == 'so-import' %} -disable_so-zeek_so-status.conf: + {% if not ZEEKOPTIONS.start %} +so-zeek_so-status.disabled: file.comment: - name: /opt/so/conf/so-status/so-status.conf - regex: ^so-zeek$ -{% endif %} + {% else %} +delete_so-zeek_so-status.disabled: + file.uncomment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-zeek$ + {% endif %} {% else %} diff --git a/salt/zeek/map.jinja b/salt/zeek/map.jinja index ad4d70e80..b5713c6d5 100644 --- a/salt/zeek/map.jinja +++ b/salt/zeek/map.jinja @@ -1,6 +1,15 @@ -# don't start the docker container if it is an import node -{% if grains.id.split('_')|last == 'import' %} - {% set START = False %} +{% set ZEEKOPTIONS = {} %} +{% set ENABLED = salt['pillar.get']('zeek:enabled', 'True') %} + +# don't start the docker container if it is an import node or disabled via pillar +{% if grains.id.split('_')|last == 'import' or ENABLED is sameas false %} + {% do ZEEKOPTIONS.update({'start': False}) %} {% else %} - {% set START = True %} + {% do ZEEKOPTIONS.update({'start': True}) %} +{% endif %} + +{% if ENABLED is sameas false %} + {% do ZEEKOPTIONS.update({'status': 'absent'}) %} +{% else %} + {% do ZEEKOPTIONS.update({'status': 'running'}) %} {% endif %} \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index 9a64a561e..06ec7238a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -121,7 +121,7 @@ add_web_user() { { echo "Attempting to add administrator user for web interface..."; export SKIP_STATE_APPLY=true - echo "$WEBPASSWD1" | /usr/sbin/so-user add "$WEBUSER"; + echo "$WEBPASSWD1" | /usr/sbin/so-user add "$WEBUSER" "superuser"; unset SKIP_STATE_APPLY echo "Add user result: $?"; } >> "/root/so-user-add.log" 2>&1 @@ -1724,18 +1724,20 @@ manager_global() { " hot_warm: False"\ "elastic:"\ " features: False"\ - "elasticsearch:"\ - " replicas: 0" >> "$global_pillar" + "elasticsearch:"\ >> "$global_pillar" if [ -n "$ESCLUSTERNAME" ]; then printf '%s\n'\ " true_cluster: True"\ - " true_cluster_name: '$ESCLUSTERNAME'" >> "$global_pillar" + " config:"\ + " cluster:"\ + " name: '$ESCLUSTERNAME'" >> "$global_pillar" else printf '%s\n'\ - " true_cluster: False"\ - " true_cluster_name: 'so'" >> "$global_pillar" + " true_cluster: False" >> "$global_pillar" fi + printf '%s\n'\ + " replicas: 0"\ " discovery_nodes: 1"\ " hot_warm_enabled: False"\ " cluster_routing_allocation_disk.threshold_enabled: true"\ diff --git a/setup/so-setup b/setup/so-setup index 958d8aea1..7d5ae0a0e 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -473,7 +473,6 @@ if [[ $is_manager && ! $is_eval ]]; then fi if [[ $is_manager ]]; then - whiptail_components_adv_warning whiptail_enable_components if [[ "$STRELKA" = 1 ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index 780411841..ed4067da1 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -110,13 +110,6 @@ whiptail_check_exitstatus() { esac } -whiptail_components_adv_warning() { - - [ -n "$TESTING" ] && return - - whiptail --title "$whiptail_title" --msgbox "Please keep in mind the more services that you enable the more RAM that is required." 8 75 -} - whiptail_create_admin_user() { [ -n "$TESTING" ] && return @@ -408,9 +401,10 @@ whiptail_enable_components() { PLAYBOOK=0 STRELKA=0 +description="Choose optional services to be enabled for this installation. Be aware that the more services you enable the more RAM that is required." if [[ $is_eval ]]; then COMPONENTS=$(whiptail --title "$whiptail_title" --checklist \ - "Select Components to install:" 20 75 8 \ + "$description" 20 75 8 \ GRAFANA "Enable Grafana for system monitoring" ON \ OSQUERY "Enable Fleet with osquery" ON \ WAZUH "Enable Wazuh" ON \ @@ -419,7 +413,7 @@ if [[ $is_eval ]]; then STRELKA "Enable Strelka" ON 3>&1 1>&2 2>&3) else COMPONENTS=$(whiptail --title "$whiptail_title" --checklist \ - "Select Components to install:" 20 75 7 \ + "$description" 20 75 7 \ OSQUERY "Enable Fleet with osquery" ON \ WAZUH "Enable Wazuh" ON \ THEHIVE "Enable TheHive" ON \ @@ -843,7 +837,7 @@ whiptail_invalid_pass_characters_warning() { [ -n "$TESTING" ] && return - whiptail --title "$whiptail_title" --msgbox "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password." 8 75 + whiptail --title "$whiptail_title" --msgbox "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password." 8 75 } whiptail_invalid_pass_warning() { @@ -1873,8 +1867,9 @@ whiptail_you_sure() { read -r -d '' you_sure_text <<- EOM Welcome to Security Onion Setup! - You can use Setup for lots of different use cases from a small standalone installation to a large distributed deployment for your enterprise. - + You can use Setup for lots of different use cases from a small standalone installation to a large distributed deployment for your enterprise. Don't forget to review the documentation at: + https://docs.securityonion.net + Setup uses keyboard navigation and you can use arrow keys to move around. Certain screens may provide a list and ask you to select one or more items from that list. You can use [SPACE] to select items and [ENTER] to proceed to the next screen. Would you like to continue? diff --git a/sigs/securityonion-2.3.80.iso.sig b/sigs/securityonion-2.3.80.iso.sig new file mode 100644 index 000000000..4fa76de2e Binary files /dev/null and b/sigs/securityonion-2.3.80.iso.sig differ