Merge pull request #5739 from Security-Onion-Solutions/dev

2.3.80
This commit is contained in:
Mike Reeves
2021-10-01 15:15:54 -04:00
committed by GitHub
218 changed files with 5137 additions and 433 deletions

1
HOTFIX
View File

@@ -1 +0,0 @@
CURATOR GRAFANA_DASH_ALLOW WAZUH

View File

@@ -1,6 +1,6 @@
## Security Onion 2.3.70
## Security Onion 2.3.80
Security Onion 2.3.70 is here!
Security Onion 2.3.80 is here!
## Screenshots

View File

@@ -1,18 +1,18 @@
### 2.3.70-WAZUH ISO image built on 2021/08/30
### 2.3.80 ISO image built on 2021/09/27
### Download and Verify
2.3.70-WAZUH ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.70-WAZUH.iso
2.3.80 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.3.80.iso
MD5: CEDEF3C38089896C252F9E3C75F7CB15
SHA1: FB420115C72DABDEB87C8B27F26E862C94628057
SHA256: CC3E75A97163E9CD255DA0D9C3EB11922FA045651827F291025398943C1BC230
MD5: 24F38563860416F4A8ABE18746913E14
SHA1: F923C005F54EA2A17AB225ADA0DA46042707AAD9
SHA256: 8E95D10AF664D9A406C168EC421D943CB23F0D0C1813C6C2DBA9B4E131984018
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.70-WAZUH.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.80.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.70-WAZUH.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.80.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.70-WAZUH.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.80.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.3.70-WAZUH.iso.sig securityonion-2.3.70-WAZUH.iso
gpg --verify securityonion-2.3.80.iso.sig securityonion-2.3.80.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Mon 30 Aug 2021 06:13:14 PM EDT using RSA key ID FE507013
gpg: Signature made Mon 27 Sep 2021 08:55:01 AM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

View File

@@ -1 +1 @@
2.3.70
2.3.80

View File

@@ -1,7 +1,7 @@
elasticsearch:
templates:
- so/so-beats-template.json.jinja
- so/so-common-template.json
- so/so-common-template.json.jinja
- so/so-firewall-template.json.jinja
- so/so-flow-template.json.jinja
- so/so-ids-template.json.jinja
@@ -10,4 +10,4 @@ elasticsearch:
- so/so-ossec-template.json.jinja
- so/so-strelka-template.json.jinja
- so/so-syslog-template.json.jinja
- so/so-zeek-template.json.jinja
- so/so-zeek-template.json.jinja

View File

@@ -1,7 +1,7 @@
elasticsearch:
templates:
- so/so-beats-template.json.jinja
- so/so-common-template.json
- so/so-common-template.json.jinja
- so/so-firewall-template.json.jinja
- so/so-flow-template.json.jinja
- so/so-ids-template.json.jinja

View File

@@ -1,7 +1,7 @@
elasticsearch:
templates:
- so/so-beats-template.json.jinja
- so/so-common-template.json
- so/so-common-template.json.jinja
- so/so-firewall-template.json.jinja
- so/so-flow-template.json.jinja
- so/so-ids-template.json.jinja

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common
salt-call state.highstate -linfo
salt-call state.highstate -l info

View File

@@ -99,6 +99,15 @@ check_password() {
return $?
}
check_password_and_exit() {
local password=$1
if ! check_password "$password"; then
echo "Password is invalid. Do not include single quotes, double quotes, dollar signs, and backslashes in the password."
exit 2
fi
return 0
}
check_elastic_license() {
[ -n "$TESTING" ] && return

View File

@@ -0,0 +1,57 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set mainint = salt['pillar.get']('host:mainint') %}
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
default_conf_dir=/opt/so/conf
ELASTICSEARCH_HOST="{{ MYIP }}"
ELASTICSEARCH_PORT=9200
# Define a default directory to load roles from
ELASTICSEARCH_ROLES="$default_conf_dir/elasticsearch/roles/"
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
fi
cd ${ELASTICSEARCH_ROLES}
echo "Loading templates..."
for role in *; do
name=$(echo "$role" | cut -d. -f1)
so-elasticsearch-query _security/role/$name -XPUT -d @"$role"
done
cd - >/dev/null

View File

@@ -41,10 +41,7 @@ if [[ $? == 0 ]]; then
fi
read -rs FLEET_PASS
if ! check_password "$FLEET_PASS"; then
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
exit 2
fi
check_password_and_exit "$FLEET_PASS"
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
if [[ $? -ne 0 ]]; then

View File

@@ -52,7 +52,7 @@ fi
read -rs FLEET_PASS
if ! check_password "$FLEET_PASS"; then
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password."
exit 2
fi

View File

@@ -17,6 +17,7 @@
# NOTE: This script depends on so-common
IMAGEREPO=security-onion-solutions
STATUS_CONF='/opt/so/conf/so-status/so-status.conf'
# shellcheck disable=SC2120
container_list() {
@@ -137,6 +138,11 @@ update_docker_containers() {
if [[ $result -eq 0 ]]; then
cat $SIGNPATH/KEYS | gpg --import - >> "$LOG_FILE" 2>&1
fi
# If downloading for soup, check if any optional images need to be pulled
if [[ $CURLTYPE == 'soup' ]]; then
grep -q "so-logscan" "$STATUS_CONF" && TRUSTED_CONTAINERS+=("so-logscan")
fi
# Download the containers from the interwebs
for i in "${TRUSTED_CONTAINERS[@]}"

View File

@@ -0,0 +1,172 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set MANAGER = salt['grains.get']('master') %}
{%- set VERSION = salt['pillar.get']('global:soversion') %}
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%}
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
{% set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{% set ES_PW = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
INDEX_DATE=$(date +'%Y.%m.%d')
RUNID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)
. /usr/sbin/so-common
function usage {
cat << EOF
Usage: $0 <evtx-file-1> [evtx-file-2] [evtx-file-*]
Imports one or more evtx files into Security Onion. The evtx files will be analyzed and made available for review in the Security Onion toolset.
EOF
}
function evtx2es() {
EVTX=$1
HASH=$2
docker run --rm \
-v "$EVTX:/tmp/$RUNID.evtx" \
--entrypoint evtx2es \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} \
--host {{ MANAGERIP }} --scheme https \
--index so-beats-$INDEX_DATE --pipeline import.wel \
--login {{ES_USER}} --pwd {{ES_PW}} \
"/tmp/$RUNID.evtx" 1>/dev/null 2>/dev/null
docker run --rm \
-v "$EVTX:/tmp/import.evtx" \
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
-v "/nsm/import/evtx-start_oldest:/tmp/oldest" \
--entrypoint '/evtx_calc_timestamps.sh' \
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}
}
# if no parameters supplied, display usage
if [ $# -eq 0 ]; then
usage
exit 1
fi
# ensure this is a Manager node
require_manager
# verify that all parameters are files
for i in "$@"; do
if ! [ -f "$i" ]; then
usage
echo "\"$i\" is not a valid file!"
exit 2
fi
done
# track if we have any valid or invalid evtx
INVALID_EVTXS="no"
VALID_EVTXS="no"
# track oldest start and newest end so that we can generate the Kibana search hyperlink at the end
START_OLDEST="2050-12-31"
END_NEWEST="1971-01-01"
touch /nsm/import/evtx-start_oldest
touch /nsm/import/evtx-end_newest
echo $START_OLDEST > /nsm/import/evtx-start_oldest
echo $END_NEWEST > /nsm/import/evtx-end_newest
# paths must be quoted in case they include spaces
for EVTX in "$@"; do
EVTX=$(/usr/bin/realpath "$EVTX")
echo "Processing Import: ${EVTX}"
# generate a unique hash to assist with dedupe checks
HASH=$(md5sum "${EVTX}" | awk '{ print $1 }')
HASH_DIR=/nsm/import/${HASH}
echo "- assigning unique identifier to import: $HASH"
if [ -d $HASH_DIR ]; then
echo "- this EVTX has already been imported; skipping"
INVALID_EVTXS="yes"
else
VALID_EVTXS="yes"
EVTX_DIR=$HASH_DIR/evtx
mkdir -p $EVTX_DIR
# import evtx and write them to import ingest pipeline
echo "- importing logs to Elasticsearch..."
evtx2es "${EVTX}" $HASH
# compare $START to $START_OLDEST
START=$(cat /nsm/import/evtx-start_oldest)
START_COMPARE=$(date -d $START +%s)
START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s)
if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then
START_OLDEST=$START
fi
# compare $ENDNEXT to $END_NEWEST
END=$(cat /nsm/import/evtx-end_newest)
ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"`
ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s)
END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s)
if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then
END_NEWEST=$ENDNEXT
fi
cp -f "${EVTX}" "${EVTX_DIR}"/data.evtx
chmod 644 "${EVTX_DIR}"/data.evtx
fi # end of valid evtx
echo
done # end of for-loop processing evtx files
# remove temp files
echo "Cleaning up:"
for TEMP_EVTX in ${TEMP_EVTXS[@]}; do
echo "- removing temporary evtx $TEMP_EVTX"
rm -f $TEMP_EVTX
done
# output final messages
if [ "$INVALID_EVTXS" = "yes" ]; then
echo
echo "Please note! One or more evtx was invalid! You can scroll up to see which ones were invalid."
fi
START_OLDEST_FORMATTED=`date +%Y-%m-%d --date="$START_OLDEST"`
START_OLDEST_SLASH=$(echo $START_OLDEST_FORMATTED | sed -e 's/-/%2F/g')
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
if [ "$VALID_EVTXS" = "yes" ]; then
cat << EOF
Import complete!
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
https://{{ URLBASE }}/#/hunt?q=import.id:${RUNID}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC
or you can manually set your Time Range to be (in UTC):
From: $START_OLDEST_FORMATTED To: $END_NEWEST
Please note that it may take 30 seconds or more for events to appear in Hunt.
EOF
fi

View File

@@ -0,0 +1,22 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
ENABLEPLAY=${1:-False}
docker exec so-soctopus /usr/local/bin/python -c "import playbook; print(playbook.play_import($ENABLEPLAY))"

View File

@@ -20,6 +20,9 @@
appliance_check() {
{%- if salt['grains.get']('sosmodel', '') %}
APPLIANCE=1
{%- if grains['sosmodel'] in ['SO2AMI01', 'SO2GCI01', 'SO2AZI01'] %}
exit 0
{%- endif %}
DUDEYOUGOTADELL=$(dmidecode |grep Dell)
if [[ -n $DUDEYOUGOTADELL ]]; then
APPTYPE=dell
@@ -90,7 +93,7 @@ check_software_raid
{%- endif %}
if [[ -n $SWRAID ]]; then
if [[ $SWRAID == '0' && BOSSRAID == '0' ]]; then
if [[ $SWRAID == '0' && $BOSSRAID == '0' ]]; then
RAIDSTATUS=0
else
RAIDSTATUS=1

View File

@@ -1,13 +1,10 @@
#!/bin/bash
got_root() {
# Make sure you are root
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
. /usr/sbin/so-common
}
argstr=""
for arg in "$@"; do
argstr="${argstr} \"${arg}\""
done
got_root
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat $1"
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"

View File

@@ -41,10 +41,7 @@ if [[ $? == 0 ]]; then
fi
read -rs THEHIVE_PASS
if ! check_password "$THEHIVE_PASS"; then
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
exit 2
fi
check_password_and_exit "$THEHIVE_PASS"
# Create new user in TheHive
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")

View File

@@ -42,7 +42,7 @@ fi
read -rs THEHIVE_PASS
if ! check_password "$THEHIVE_PASS"; then
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password."
exit 2
fi

View File

@@ -18,11 +18,17 @@
source $(dirname $0)/so-common
if [[ $# -lt 1 || $# -gt 2 ]]; then
echo "Usage: $0 <list|add|update|enable|disable|validate|valemail|valpass> [email]"
DEFAULT_ROLE=analyst
if [[ $# -lt 1 || $# -gt 3 ]]; then
echo "Usage: $0 <operation> [email] [role]"
echo ""
echo " where <operation> is one of the following:"
echo ""
echo " list: Lists all user email addresses currently defined in the identity system"
echo " add: Adds a new user to the identity system; requires 'email' parameter"
echo " add: Adds a new user to the identity system; requires 'email' parameter, while 'role' parameter is optional and defaults to $DEFAULT_ROLE"
echo " addrole: Grants a role to an existing user; requires 'email' and 'role' parameters"
echo " delrole: Removes a role from an existing user; requires 'email' and 'role' parameters"
echo " update: Updates a user's password; requires 'email' parameter"
echo " enable: Enables a user; requires 'email' parameter"
echo " disable: Disables a user; requires 'email' parameter"
@@ -36,14 +42,18 @@ fi
operation=$1
email=$2
role=$3
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434}
databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite}
bcryptRounds=${BCRYPT_ROUNDS:-12}
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles}
socRolesFile=${SOC_ROLES_FILE:-/opt/so/conf/soc/soc_users_roles}
esUID=${ELASTIC_UID:-930}
esGID=${ELASTIC_GID:-930}
soUID=${SOCORE_UID:-939}
soGID=${SOCORE_GID:-939}
function lock() {
# Obtain file descriptor lock
@@ -80,7 +90,7 @@ function findIdByEmail() {
email=$1
response=$(curl -Ss -L ${kratosUrl}/identities)
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
identityId=$(echo "${response}" | jq -r ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
echo $identityId
}
@@ -89,17 +99,20 @@ function validatePassword() {
len=$(expr length "$password")
if [[ $len -lt 6 ]]; then
echo "Password does not meet the minimum requirements"
exit 2
fail "Password does not meet the minimum requirements"
fi
check_password_and_exit "$password"
}
function validateEmail() {
email=$1
# (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])
if [[ ! "$email" =~ ^[[:alnum:]._%+-]+@[[:alnum:].-]+\.[[:alpha:]]{2,}$ ]]; then
echo "Email address is invalid"
exit 3
fail "Email address is invalid"
fi
if [[ "$email" =~ [A-Z] ]]; then
fail "Email addresses cannot contain uppercase letters"
fi
}
@@ -127,21 +140,47 @@ function updatePassword() {
validatePassword "$password"
fi
if [[ -n $identityId ]]; then
if [[ -n "$identityId" ]]; then
# Generate password hash
passwordHash=$(hashPassword "$password")
# Update DB with new hash
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to update password"
fi
}
function createElasticFile() {
function createFile() {
filename=$1
tmpFile=${filename}
truncate -s 0 "$tmpFile"
chmod 600 "$tmpFile"
chown "${esUID}:${esGID}" "$tmpFile"
uid=$2
gid=$3
mkdir -p $(dirname "$filename")
truncate -s 0 "$filename"
chmod 600 "$filename"
chown "${uid}:${gid}" "$filename"
}
function ensureRoleFileExists() {
if [[ ! -f "$socRolesFile" || ! -s "$socRolesFile" ]]; then
# Generate the new users file
rolesTmpFile="${socRolesFile}.tmp"
createFile "$rolesTmpFile" "$soUID" "$soGID"
if [[ -f "$databasePath" ]]; then
echo "Migrating roles to new file: $socRolesFile"
echo "select 'superuser:' || id from identities;" | sqlite3 "$databasePath" \
>> "$rolesTmpFile"
[[ $? != 0 ]] && fail "Unable to read identities from database"
echo "The following users have all been migrated with the super user role:"
cat "${rolesTmpFile}"
else
echo "Database file does not exist yet, installation is likely not yet complete."
fi
mv "${rolesTmpFile}" "${socRolesFile}"
fi
}
function syncElasticSystemUser() {
@@ -172,33 +211,31 @@ function syncElasticSystemRole() {
}
function syncElastic() {
echo "Syncing users between SOC and Elastic..."
echo "Syncing users and roles between SOC and Elastic..."
usersTmpFile="${elasticUsersFile}.tmp"
createFile "${usersTmpFile}" "$esUID" "$esGID"
rolesTmpFile="${elasticRolesFile}.tmp"
createElasticFile "${usersTmpFile}"
createElasticFile "${rolesTmpFile}"
createFile "${rolesTmpFile}" "$esUID" "$esGID"
authPillarJson=$(lookup_salt_value "auth" "elasticsearch" "pillar" "json")
syncElasticSystemUser "$authPillarJson" "so_elastic_user" "$usersTmpFile"
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
syncElasticSystemUser "$authPillarJson" "so_kibana_user" "$usersTmpFile"
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile"
syncElasticSystemUser "$authPillarJson" "so_logstash_user" "$usersTmpFile"
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
syncElasticSystemUser "$authPillarJson" "so_beats_user" "$usersTmpFile"
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
syncElasticSystemUser "$authPillarJson" "so_monitor_user" "$usersTmpFile"
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_collector" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_agent" "$rolesTmpFile"
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "monitoring_user" "$rolesTmpFile"
if [[ -f "$databasePath" ]]; then
# Generate the new users file
if [[ -f "$databasePath" && -f "$socRolesFile" ]]; then
# Append the SOC users
echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \
"from identity_credential_identifiers ici, identity_credentials ic " \
"where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \
@@ -208,17 +245,18 @@ function syncElastic() {
>> "$usersTmpFile"
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
# Generate the new users_roles file
echo "select 'superuser:' || ici.identifier " \
"from identity_credential_identifiers ici, identity_credentials ic " \
"where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \
"order by ici.identifier;" | \
sqlite3 "$databasePath" \
>> "$rolesTmpFile"
[[ $? != 0 ]] && fail "Unable to read credential IDs from database"
# Append the user roles
while IFS="" read -r rolePair || [ -n "$rolePair" ]; do
userId=$(echo "$rolePair" | cut -d: -f2)
role=$(echo "$rolePair" | cut -d: -f1)
echo "select '$role:' || ici.identifier " \
"from identity_credential_identifiers ici, identity_credentials ic " \
"where ici.identity_credential_id=ic.id and ic.identity_id = '$userId';" | \
sqlite3 "$databasePath" >> "$rolesTmpFile"
done < "$socRolesFile"
else
echo "Database file does not exist yet, skipping users export"
echo "Database file or soc roles file does not exist yet, skipping users export"
fi
if [[ -s "${usersTmpFile}" ]]; then
@@ -236,15 +274,22 @@ function syncElastic() {
}
function syncAll() {
ensureRoleFileExists
# Check if a sync is needed. Sync is not needed if the following are true:
# - user database entries are all older than the elastic users file
# - soc roles file last modify date is older than the elastic roles file
if [[ -z "$FORCE_SYNC" && -f "$databasePath" && -f "$elasticUsersFile" ]]; then
usersFileAgeSecs=$(echo $(($(date +%s) - $(date +%s -r "$elasticUsersFile"))))
staleCount=$(echo "select count(*) from identity_credentials where updated_at >= Datetime('now', '-${usersFileAgeSecs} seconds');" \
| sqlite3 "$databasePath")
if [[ "$staleCount" == "0" ]]; then
if [[ "$staleCount" == "0" && "$elasticRolesFile" -nt "$socRolesFile" ]]; then
return 1
fi
fi
syncElastic
return 0
}
@@ -252,11 +297,64 @@ function listUsers() {
response=$(curl -Ss -L ${kratosUrl}/identities)
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
users=$(echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort)
for user in $users; do
roles=$(grep "$user" "$elasticRolesFile" | cut -d: -f1 | tr '\n' ' ')
echo "$user: $roles"
done
}
function addUserRole() {
email=$1
role=$2
adjustUserRole "$email" "$role" "add"
}
function deleteUserRole() {
email=$1
role=$2
adjustUserRole "$email" "$role" "del"
}
function adjustUserRole() {
email=$1
role=$2
op=$3
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
ensureRoleFileExists
filename="$socRolesFile"
hasRole=0
grep "$role:" "$socRolesFile" | grep -q "$identityId" && hasRole=1
if [[ "$op" == "add" ]]; then
if [[ "$hasRole" == "1" ]]; then
echo "User '$email' already has the role: $role"
return 1
else
echo "$role:$identityId" >> "$filename"
fi
elif [[ "$op" == "del" ]]; then
if [[ "$hasRole" -ne 1 ]]; then
fail "User '$email' does not have the role: $role"
else
sed "/^$role:$identityId\$/d" "$filename" > "$filename.tmp"
cat "$filename".tmp > "$filename"
rm -f "$filename".tmp
fi
else
fail "Unsupported role adjustment operation: $op"
fi
return 0
}
function createUser() {
email=$1
role=$2
now=$(date -u +%FT%TZ)
addUserJson=$(cat <<EOF
@@ -270,16 +368,17 @@ EOF
response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
identityId=$(echo "${response}" | jq ".id")
if [[ ${identityId} == "null" ]]; then
identityId=$(echo "${response}" | jq -r ".id")
if [[ "${identityId}" == "null" ]]; then
code=$(echo "${response}" | jq ".error.code")
[[ "${code}" == "409" ]] && fail "User already exists"
reason=$(echo "${response}" | jq ".error.message")
[[ $? == 0 ]] && fail "Unable to add user: ${reason}"
else
updatePassword "$identityId"
addUserRole "$email" "$role"
fi
updatePassword $identityId
}
function updateStatus() {
@@ -292,17 +391,17 @@ function updateStatus() {
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
oldConfig=$(echo "select config from identity_credentials where identity_id='${identityId}';" | sqlite3 "$databasePath")
if [[ "$status" == "locked" ]]; then
config=$(echo $oldConfig | sed -e 's/hashed/locked/')
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to lock credential record"
echo "delete from sessions where identity_id=${identityId};" | sqlite3 "$databasePath"
echo "delete from sessions where identity_id='${identityId}';" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to invalidate sessions"
else
config=$(echo $oldConfig | sed -e 's/locked/hashed/')
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
[[ $? != 0 ]] && fail "Unable to unlock credential record"
fi
@@ -318,7 +417,7 @@ function updateUser() {
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
updatePassword $identityId
updatePassword "$identityId"
}
function deleteUser() {
@@ -329,6 +428,11 @@ function deleteUser() {
response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
rolesTmpFile="${socRolesFile}.tmp"
createFile "$rolesTmpFile" "$soUID" "$soGID"
grep -v "$id" "$socRolesFile" > "$rolesTmpFile"
mv "$rolesTmpFile" "$socRolesFile"
}
case "${operation}" in
@@ -339,7 +443,7 @@ case "${operation}" in
lock
validateEmail "$email"
updatePassword
createUser "$email"
createUser "$email" "${role:-$DEFAULT_ROLE}"
syncAll
echo "Successfully added new user to SOC"
check_container thehive && echo "$password" | so-thehive-user-add "$email"
@@ -351,6 +455,31 @@ case "${operation}" in
listUsers
;;
"addrole")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
[[ "$role" == "" ]] && fail "Role must be provided"
lock
validateEmail "$email"
if addUserRole "$email" "$role"; then
syncElastic
echo "Successfully added role to user"
fi
;;
"delrole")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"
[[ "$role" == "" ]] && fail "Role must be provided"
lock
validateEmail "$email"
deleteUserRole "$email" "$role"
syncElastic
echo "Successfully removed role from user"
;;
"update")
verifyEnvironment
[[ "$email" == "" ]] && fail "Email address must be provided"

View File

@@ -1,5 +1,4 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
@@ -20,13 +19,8 @@ echo "Starting to check for yara rule updates at $(date)..."
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
mkdir -p $output_dir
repos="$output_dir/repos.txt"
ignorefile="$output_dir/ignore.txt"
deletecounter=0
newcounter=0
updatecounter=0
{% if ISAIRGAP is sameas true %}
@@ -35,58 +29,21 @@ echo "Airgap mode enabled."
clone_dir="/nsm/repo/rules/strelka"
repo_name="signature-base"
mkdir -p /opt/so/saltstack/default/salt/strelka/rules/signature-base
# Ensure a copy of the license is available for the rules
[ -f $clone_dir/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
# Copy over rules
for i in $(find $clone_dir/yara -name "*.yar*"); do
rule_name=$(echo $i | awk -F '/' '{print $NF}')
repo_sum=$(sha256sum $i | awk '{print $1}')
# Check rules against those in ignore list -- don't copy if ignored.
if ! grep -iq $rule_name $ignorefile; then
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
# For existing rules, check to see if they need to be updated, by comparing checksums
if [ $existing_rules -gt 0 ];then
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
if [ "$repo_sum" != "$local_sum" ]; then
echo "Checksums do not match!"
echo "Updating $rule_name..."
cp $i $output_dir/$repo_name;
((updatecounter++))
fi
else
# If rule doesn't exist already, we'll add it
echo "Adding new rule: $rule_name..."
cp $i $output_dir/$repo_name
((newcounter++))
fi
fi;
done
# Check to see if we have any old rules that need to be removed
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
is_repo_rule=$(find $clone_dir -name "$i" | wc -l)
if [ $is_repo_rule -eq 0 ]; then
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
rm $output_dir/$repo_name/$i
((deletecounter++))
fi
echo "Adding rule: $rule_name..."
cp $i $output_dir/$repo_name
((newcounter++))
done
echo "Done!"
if [ "$newcounter" -gt 0 ];then
echo "$newcounter new rules added."
fi
if [ "$updatecounter" -gt 0 ];then
echo "$updatecounter rules updated."
fi
if [ "$deletecounter" -gt 0 ];then
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
echo "$newcounter rules added."
fi
{% else %}
@@ -99,69 +56,32 @@ if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
if ! $(echo "$repo" | grep -qE '^#'); then
# Remove old repo if existing bc of previous error condition or unexpected disruption
repo_name=`echo $repo | awk -F '/' '{print $NF}'`
[ -d $repo_name ] && rm -rf $repo_name
[ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name
# Clone repo and make appropriate directories for rules
git clone $repo $clone_dir/$repo_name
echo "Analyzing rules from $clone_dir/$repo_name..."
mkdir -p $output_dir/$repo_name
# Ensure a copy of the license is available for the rules
[ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
# Copy over rules
for i in $(find $clone_dir/$repo_name -name "*.yar*"); do
rule_name=$(echo $i | awk -F '/' '{print $NF}')
repo_sum=$(sha256sum $i | awk '{print $1}')
# Check rules against those in ignore list -- don't copy if ignored.
if ! grep -iq $rule_name $ignorefile; then
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
# For existing rules, check to see if they need to be updated, by comparing checksums
if [ $existing_rules -gt 0 ];then
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
if [ "$repo_sum" != "$local_sum" ]; then
echo "Checksums do not match!"
echo "Updating $rule_name..."
cp $i $output_dir/$repo_name;
((updatecounter++))
fi
else
# If rule doesn't exist already, we'll add it
echo "Adding new rule: $rule_name..."
cp $i $output_dir/$repo_name
((newcounter++))
fi
fi;
done
# Check to see if we have any old rules that need to be removed
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
is_repo_rule=$(find $clone_dir/$repo_name -name "$i" | wc -l)
if [ $is_repo_rule -eq 0 ]; then
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
rm $output_dir/$repo_name/$i
((deletecounter++))
fi
done
rm -rf $clone_dir/$repo_name
fi
done < $repos
echo "Adding rule: $rule_name..."
cp $i $output_dir/$repo_name
((newcounter++))
done
rm -rf $clone_dir/$repo_name
fi
done < $repos
echo "Done!"
if [ "$newcounter" -gt 0 ];then
echo "$newcounter new rules added."
echo "$newcounter rules added."
fi
if [ "$updatecounter" -gt 0 ];then
echo "$updatecounter rules updated."
fi
if [ "$deletecounter" -gt 0 ];then
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
fi
else
echo "Server returned $gh_status status code."
echo "No connectivity to Github...exiting..."

View File

@@ -27,6 +27,7 @@ SOUP_LOG=/root/soup.log
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
WHATWOULDYOUSAYYAHDOHERE=soup
whiptail_title='Security Onion UPdater'
NOTIFYCUSTOMELASTICCONFIG=false
check_err() {
local exit_code=$1
@@ -105,17 +106,20 @@ add_common() {
airgap_mounted() {
# Let's see if the ISO is already mounted.
if [ -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
echo "The ISO is already mounted"
else
echo ""
cat << EOF
if [[ -z $ISOLOC ]]; then
echo "This is airgap. Ask for a location."
echo ""
cat << EOF
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.
EOF
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
fi
if [[ -f $ISOLOC ]]; then
# Mounting the ISO image
mkdir -p /tmp/soagupdate
@@ -131,7 +135,7 @@ EOF
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
ln -s $ISOLOC /tmp/soagupdate
echo "Found the update content"
else
elif [[ -b $ISOLOC ]]; then
mkdir -p /tmp/soagupdate
mount $ISOLOC /tmp/soagupdate
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
@@ -140,7 +144,11 @@ EOF
exit 0
else
echo "Device has been mounted!"
fi
fi
else
echo "Could not find Security Onion ISO content at ${ISOLOC}"
echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
exit 0
fi
fi
}
@@ -150,7 +158,7 @@ airgap_update_dockers() {
# Let's copy the tarball
if [[ ! -f $AGDOCKER/registry.tar ]]; then
echo "Unable to locate registry. Exiting"
exit 1
exit 0
else
echo "Stopping the registry docker"
docker stop so-dockerregistry
@@ -282,25 +290,31 @@ check_os_updates() {
OSUPDATES=$(yum -q list updates | wc -l)
fi
if [[ "$OSUPDATES" -gt 0 ]]; then
echo $NEEDUPDATES
echo ""
read -p "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
if [[ "$confirm" == [cC] ]]; then
if [[ -z $UNATTENDED ]]; then
echo "$NEEDUPDATES"
echo ""
read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
if [[ "$confirm" == [cC] ]]; then
echo "Continuing without updating packages"
elif [[ "$confirm" == [uU] ]]; then
elif [[ "$confirm" == [uU] ]]; then
echo "Applying Grid Updates"
set +e
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
set -e
else
update_flag=true
else
echo "Exiting soup"
exit 0
fi
else
update_flag=true
fi
else
echo "Looks like you have an updated OS"
echo "Looks like you have an updated OS"
fi
if [[ $update_flag == true ]]; then
set +e
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
set -e
fi
}
clean_dockers() {
@@ -372,6 +386,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_2.3.5X_to_2.3.80
true
}
@@ -609,6 +624,46 @@ EOF
INSTALLEDVERSION=2.3.50
}
up_2.3.5X_to_2.3.80() {
# Remove watermark settings from global.sls
sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls
# Add new indices to the global
sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
# Do some pillar formatting
tc=$(grep -w true_cluster /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print tolower($2)'}| xargs)
if [[ "$tc" == "true" ]]; then
tcname=$(grep -w true_cluster_name /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print $2'})
sed -i "/^elasticsearch:/a \\ config: \n cluster: \n name: $tcname" /opt/so/saltstack/local/pillar/global.sls
sed -i '/ true_cluster_name/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ esclustername/d' /opt/so/saltstack/local/pillar/global.sls
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ ${file} != *"manager.sls"* ]]; then
noderoutetype=$(grep -w node_route_type $file | awk -F: {'print $2'})
if [ -n "$noderoutetype" ]; then
sed -i "/^elasticsearch:/a \\ config: \n node: \n attr: \n box_type: $noderoutetype" $file
sed -i '/ node_route_type/d' $file
noderoutetype=''
fi
fi
done
fi
# check for local es config to inform user that the config in local is now ignored and those options need to be placed in the pillar
if [ -f "/opt/so/saltstack/local/salt/elasticsearch/files/elasticsearch.yml" ]; then
NOTIFYCUSTOMELASTICCONFIG=true
fi
INSTALLEDVERSION=2.3.80
}
verify_upgradespace() {
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
if [ "$CURRENTSPACE" -lt "10" ]; then
@@ -624,7 +679,7 @@ upgrade_space() {
clean_dockers
if ! verify_upgradespace; then
echo "There is not enough space to perform the upgrade. Please free up space and try again"
exit 1
exit 0
fi
else
echo "You have enough space for upgrade. Proceeding with soup."
@@ -649,8 +704,8 @@ thehive_maint() {
done
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
echo "Migrating thehive databases if needed."
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate"
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate"
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1
fi
}
@@ -774,39 +829,23 @@ verify_latest_update_script() {
}
main() {
set -e
set +e
trap 'check_err $?' EXIT
echo "### Preparing soup at $(date) ###"
while getopts ":b" opt; do
case "$opt" in
b ) # process option b
shift
BATCHSIZE=$1
if ! [[ "$BATCHSIZE" =~ ^[0-9]+$ ]]; then
echo "Batch size must be a number greater than 0."
exit 1
fi
;;
\? )
echo "Usage: cmd [-b]"
;;
esac
done
echo "Checking to see if this is an airgap install."
echo ""
check_airgap
if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then
echo "Missing file argument (-f <FILENAME>) for unattended airgap upgrade."
exit 0
fi
echo "Checking to see if this is a manager."
echo ""
require_manager
set_minionid
echo "Checking to see if this is an airgap install."
echo ""
check_airgap
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
echo ""
if [[ $is_airgap -eq 0 ]]; then
# Let's mount the ISO since this is airgap
echo "This is airgap. Ask for a location."
airgap_mounted
else
echo "Cloning Security Onion github repo into $UPDATE_DIR."
@@ -894,7 +933,7 @@ main() {
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 1
exit 0
else
echo "Salt upgrade success."
echo ""
@@ -953,8 +992,6 @@ main() {
set +e
salt-call state.highstate -l info queue=True
set -e
echo ""
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
echo ""
echo "Stopping Salt Master to remove ACL"
@@ -977,6 +1014,13 @@ main() {
[[ $is_airgap -eq 0 ]] && unmount_update
thehive_maint
echo ""
echo "Upgrade to $NEWVERSION complete."
# Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error
set +e
echo "Checking the number of minions."
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
if [[ $is_airgap -eq 0 ]]; then
@@ -987,8 +1031,10 @@ main() {
fi
fi
echo "Checking for local modifications."
check_local_mods
echo "Checking sudoers file."
check_sudoers
if [[ -n $lsl_msg ]]; then
@@ -1026,10 +1072,56 @@ EOF
fi
fi
if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then
cat << EOF
A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80.
If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at https://docs.securityonion.net/en/2.3/elasticsearch.html.
EOF
fi
echo "### soup has been served at $(date) ###"
}
cat << EOF
while getopts ":b:f:y" opt; do
case ${opt} in
b )
BATCHSIZE="$OPTARG"
if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then
echo "Batch size must be a number greater than 0."
exit 1
fi
;;
y )
if [[ ! -f /opt/so/state/yeselastic.txt ]]; then
echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
exit 1
else
UNATTENDED=true
fi
;;
f )
ISOLOC="$OPTARG"
;;
\? )
echo "Usage: soup [-b] [-y] [-f <iso location>]"
exit 1
;;
: )
echo "Invalid option: $OPTARG requires an argument"
exit 1
;;
esac
done
shift $((OPTIND - 1))
if [[ -z $UNATTENDED ]]; then
cat << EOF
SOUP - Security Onion UPdater
@@ -1041,7 +1133,9 @@ Press Enter to continue or Ctrl-C to cancel.
EOF
read -r input
read -r input
fi
echo "### Preparing soup at $(date) ###"
main "$@" | tee -a $SOUP_LOG

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-aws:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close aws indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-aws.*|so-aws.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete aws indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-aws.*|so-aws.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-aws
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-azure:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close azure indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-azure.*|so-azure.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete azure indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-azure.*|so-azure.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-azure
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close barracuda indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-barracuda.*|so-barracuda.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete barracuda indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-barracuda.*|so-barracuda.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-barracuda
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete beats indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-beats.*|so-beats.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-beats
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close bluecoat indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete bluecoat indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-bluecoat
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cef:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close cef indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-cef.*|so-cef.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete cef indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-cef.*|so-cef.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-cef
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close checkpoint indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-checkpoint.*|so-checkpoint.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete checkpoint indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-checkpoint.*|so-checkpoint.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-checkpoint
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cisco:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close cisco indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-cisco.*|so-cisco.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete cisco indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-cisco.*|so-cisco.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-cisco
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close cyberark indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-cyberark.*|so-cyberark.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete cyberark indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-cyberark.*|so-cyberark.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-cyberark
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cylance:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close cylance indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-cylance.*|so-cylance.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete cylance indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-cylance.*|so-cylance.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-cylance
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close elasticsearch indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-elasticsearch.*|so-elasticsearch.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete elasticsearch indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-elasticsearch.*|so-elasticsearch.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-elasticsearch
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-f5:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close f5 indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-f5.*|so-f5.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-f5:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete f5 indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-f5.*|so-f5.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-f5:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-f5
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete firewall indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-firewall.*|so-firewall.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-firewall
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close fortinet indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-fortinet.*|so-fortinet.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete fortinet indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-fortinet.*|so-fortinet.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-fortinet
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-gcp:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close gcp indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-gcp.*|so-gcp.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-gcp:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete gcp indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-gcp.*|so-gcp.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-gcp:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-gcp
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close google_workspace indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-google_workspace.*|so-google_workspace.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete google_workspace indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-google_workspace.*|so-google_workspace.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-google_workspace
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-ids:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete IDS indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-ids.*|so-ids.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-ids:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-ids
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-imperva:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close imperva indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-imperva.*|so-imperva.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-imperva:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete imperva indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-imperva.*|so-imperva.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-imperva:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-imperva
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-import:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete import indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-import.*|so-import.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-import:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-import
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close infoblox indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-infoblox.*|so-infoblox.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete infoblox indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-infoblox.*|so-infoblox.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-infoblox
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-juniper:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close juniper indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-juniper.*|so-juniper.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-juniper:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete juniper indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-juniper.*|so-juniper.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-aws
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-kibana:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close kibana indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-kibana.*|so-kibana.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kibana:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete kibana indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-kibana.*|so-kibana.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kibana:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-kibana
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-logstash:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close logstash indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-logstash.*|so-logstash.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-logstash:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete logstash indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-logstash.*|so-logstash.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-logstash:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-logstash
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close microsoft indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-microsoft.*|so-microsoft.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete microsoft indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-microsoft.*|so-microsoft.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-microsoft
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-misp:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close misp indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-misp.*|so-misp.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-misp:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete misp indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-misp.*|so-misp.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-misp:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-misp
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-netflow:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close netflow indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-netflow.*|so-netflow.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-netflow:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete netflow indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-netflow.*|so-netflow.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-netflow:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-netflow
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-netscout:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close netscout indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-netscout.*|so-netscout.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

View File

@@ -0,0 +1,29 @@
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-netscout:delete', 365) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete netscout indices when older than {{ DELETE_DAYS }} days.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-netscout.*|so-netscout.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ DELETE_DAYS }}
exclude:

View File

@@ -0,0 +1,24 @@
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-netscout:warm', 7) -%}
actions:
1:
action: allocation
description: "Apply shard allocation filtering rules to the specified indices"
options:
key: box_type
value: warm
allocation_type: require
wait_for_completion: true
timeout_override:
continue_if_exception: false
disable_action: false
filters:
- filtertype: pattern
kind: prefix
value: so-netscout
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{ WARM_DAYS }}

View File

@@ -0,0 +1,29 @@
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-o365:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close o365 indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: regex
value: '^(logstash-o365.*|so-o365.*)$'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: {{cur_close_days}}
exclude:

Some files were not shown because too many files have changed in this diff Show More