mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
use elastic map file
This commit is contained in:
@@ -2,8 +2,7 @@
|
||||
{% if sls in allowed_states %}
|
||||
|
||||
{% set role = grains.id.split('_') | last %}
|
||||
{% set ELASTICUSER = salt['pillar.get']('elasticsearch:auth:user', '' ) %}
|
||||
{% set ELASTICPASS = salt['pillar.get']('elasticsearch:auth:pass', '' ) %}
|
||||
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
|
||||
|
||||
# Remove variables.txt from /tmp - This is temp
|
||||
rmvariablesfile:
|
||||
@@ -181,11 +180,7 @@ utilsyncscripts:
|
||||
- template: jinja
|
||||
- source: salt://common/tools/sbin
|
||||
- defaults:
|
||||
ELASTICCURL: "curl"
|
||||
{% if salt['pillar.get']('elasticsearch:auth_enabled', False) %}
|
||||
- context:
|
||||
ELASTICCURL: "curl --user {{ELASTICUSER}}:{{ELASTICPASS}}"
|
||||
{% endif %}
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
|
||||
|
||||
{% if role in ['eval', 'standalone', 'sensor', 'heavynode'] %}
|
||||
|
||||
@@ -30,7 +30,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -51,7 +51,7 @@ cd ${ELASTICSEARCH_TEMPLATES}
|
||||
|
||||
|
||||
echo "Loading templates..."
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; {{ ELASTICCURL }} -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
echo
|
||||
|
||||
cd - >/dev/null
|
||||
|
||||
@@ -34,7 +34,7 @@ overlimit() {
|
||||
|
||||
closedindices() {
|
||||
|
||||
INDICES=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
INDICES=$({{ ELASTICCURL }} -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
[ $? -eq 1 ] && return false
|
||||
echo ${INDICES} | grep -q -E "(logstash-|so-)"
|
||||
}
|
||||
@@ -49,10 +49,10 @@ while overlimit && closedindices; do
|
||||
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
|
||||
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
|
||||
# Finally, select the first entry in that sorted list.
|
||||
OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
OLDEST_INDEX=$({{ ELASTICCURL }} -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
|
||||
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
|
||||
curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
{{ ELASTICCURL }} -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
|
||||
# Finally, write a log entry that says we deleted it.
|
||||
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %}
|
||||
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
|
||||
# Curator
|
||||
# Create the group
|
||||
curatorgroup:
|
||||
@@ -66,6 +67,8 @@ curcloseddeldel:
|
||||
- group: 939
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
- defaults:
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
|
||||
curclose:
|
||||
file.managed:
|
||||
@@ -147,4 +150,4 @@ append_so-curator_so-status.conf:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
7
salt/elasticsearch/auth.map.jinja
Normal file
7
salt/elasticsearch/auth.map.jinja
Normal file
@@ -0,0 +1,7 @@
|
||||
{% set ELASTICAUTH = salt['pillar.filter_by']({
|
||||
True: {
|
||||
'user': salt['pillar.get']('elasticsearch:auth:user'),
|
||||
'pass': salt['pillar.get']('elasticsearch:auth:pass'),
|
||||
'elasticcurl':'curl --user {{ELASTICAUTH.user}}:{{ELASTICAUTH.pass}}'},
|
||||
False: {'elasticcurl': 'curl'},
|
||||
}, pillar='elasticsearch:auth:enabled') %}
|
||||
@@ -27,7 +27,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -47,9 +47,9 @@ fi
|
||||
cd ${ELASTICSEARCH_INGEST_PIPELINES}
|
||||
|
||||
echo "Loading pipelines..."
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
for i in *; do echo $i; RESPONSE=$({{ ELASTICCURL }} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
echo
|
||||
|
||||
cd - >/dev/null
|
||||
|
||||
exit $RETURN_CODE
|
||||
exit $RETURN_CODE
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
|
||||
{% if sls in allowed_states %}
|
||||
{% set ELASTICUSER = salt['pillar.get']('elasticsearch:auth:user', '' ) %}
|
||||
{% set ELASTICPASS = salt['pillar.get']('elasticsearch:auth:pass', '' ) %}
|
||||
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
|
||||
|
||||
# This state is for checking things
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
@@ -15,11 +14,7 @@ crossclusterson:
|
||||
- source: salt://utility/bin/crossthestreams
|
||||
- template: jinja
|
||||
- defaults:
|
||||
ELASTICCURL: "curl"
|
||||
{% if salt['pillar.get']('elasticsearch:auth_enabled', False) %}
|
||||
- context:
|
||||
ELASTICCURL: "curl --user {{ELASTICUSER}}:{{ELASTICPASS}}"
|
||||
{% endif %}
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
|
||||
{% endif %}
|
||||
{% if grains['role'] in ['so-eval', 'so-import'] %}
|
||||
@@ -31,11 +26,7 @@ fixsearch:
|
||||
- source: salt://utility/bin/eval
|
||||
- template: jinja
|
||||
- defaults:
|
||||
ELASTICCURL: "curl"
|
||||
{% if salt['pillar.get']('elasticsearch:auth_enabled', False) %}
|
||||
- context:
|
||||
ELASTICCURL: "curl --user {{ELASTICUSER}}:{{ELASTICPASS}}"
|
||||
{% endif %}
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
Reference in New Issue
Block a user