mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-08 18:22:47 +01:00
Merge pull request #4530 from Security-Onion-Solutions/kilo
Elastic auth
This commit is contained in:
@@ -67,3 +67,7 @@ peer:
|
||||
reactor:
|
||||
- 'so/fleet':
|
||||
- salt://reactor/fleet.sls
|
||||
- 'salt/beacon/*/watch_sqlite_db//opt/so/conf/kratos/db/sqlite.db':
|
||||
- salt://reactor/kratos.sls
|
||||
|
||||
|
||||
|
||||
@@ -22,6 +22,9 @@ base:
|
||||
'*_manager or *_managersearch':
|
||||
- match: compound
|
||||
- data.*
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- secrets
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
@@ -38,6 +41,9 @@ base:
|
||||
- secrets
|
||||
- healthcheck.eval
|
||||
- elasticsearch.eval
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
@@ -46,6 +52,9 @@ base:
|
||||
- logstash.manager
|
||||
- logstash.search
|
||||
- elasticsearch.search
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- data.*
|
||||
- zeeklogs
|
||||
- secrets
|
||||
@@ -88,5 +97,8 @@ base:
|
||||
- zeeklogs
|
||||
- secrets
|
||||
- elasticsearch.eval
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
{% if sls in allowed_states %}
|
||||
|
||||
{% set role = grains.id.split('_') | last %}
|
||||
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
|
||||
|
||||
# Remove variables.txt from /tmp - This is temp
|
||||
rmvariablesfile:
|
||||
@@ -95,7 +96,6 @@ commonpkgs:
|
||||
- netcat
|
||||
- python3-mysqldb
|
||||
- sqlite3
|
||||
- argon2
|
||||
- libssl-dev
|
||||
- python3-dateutil
|
||||
- python3-m2crypto
|
||||
@@ -128,7 +128,6 @@ commonpkgs:
|
||||
- net-tools
|
||||
- curl
|
||||
- sqlite
|
||||
- argon2
|
||||
- mariadb-devel
|
||||
- nmap-ncat
|
||||
- python3
|
||||
@@ -169,6 +168,14 @@ alwaysupdated:
|
||||
Etc/UTC:
|
||||
timezone.system
|
||||
|
||||
elastic_curl_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/curl.config
|
||||
- source: salt://elasticsearch/curl.config
|
||||
- mode: 600
|
||||
- show_changes: False
|
||||
- makedirs: True
|
||||
|
||||
# Sync some Utilities
|
||||
utilsyncscripts:
|
||||
file.recurse:
|
||||
@@ -178,6 +185,8 @@ utilsyncscripts:
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- source: salt://common/tools/sbin
|
||||
- defaults:
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
|
||||
{% if role in ['eval', 'standalone', 'sensor', 'heavynode'] %}
|
||||
# Add sensor cleanup
|
||||
|
||||
@@ -252,6 +252,7 @@ lookup_salt_value() {
|
||||
key=$1
|
||||
group=$2
|
||||
kind=$3
|
||||
output=${4:-newline_values_only}
|
||||
|
||||
if [ -z "$kind" ]; then
|
||||
kind=pillar
|
||||
@@ -261,7 +262,7 @@ lookup_salt_value() {
|
||||
group=${group}:
|
||||
fi
|
||||
|
||||
salt-call --no-color ${kind}.get ${group}${key} --out=newline_values_only
|
||||
salt-call --no-color ${kind}.get ${group}${key} --out=${output}
|
||||
}
|
||||
|
||||
lookup_pillar() {
|
||||
@@ -509,13 +510,14 @@ wait_for_web_response() {
|
||||
url=$1
|
||||
expected=$2
|
||||
maxAttempts=${3:-300}
|
||||
curlcmd=${4:-curl}
|
||||
logfile=/root/wait_for_web_response.log
|
||||
truncate -s 0 "$logfile"
|
||||
attempt=0
|
||||
while [[ $attempt -lt $maxAttempts ]]; do
|
||||
attempt=$((attempt+1))
|
||||
echo "Waiting for value '$expected' at '$url' ($attempt/$maxAttempts)"
|
||||
result=$(curl -ks -L $url)
|
||||
result=$($curlcmd -ks -L $url)
|
||||
exitcode=$?
|
||||
|
||||
echo "--------------------------------------------------" >> $logfile
|
||||
|
||||
@@ -145,9 +145,9 @@ EOF
|
||||
rulename=$(echo ${raw_rulename,,} | sed 's/ /_/g')
|
||||
|
||||
cat << EOF >> "$rulename.yaml"
|
||||
# Elasticsearch Host
|
||||
es_host: elasticsearch
|
||||
es_port: 9200
|
||||
# Elasticsearch Host Override (optional)
|
||||
# es_host: elasticsearch
|
||||
# es_port: 9200
|
||||
|
||||
# (Required)
|
||||
# Rule name, must be unique
|
||||
|
||||
62
salt/common/tools/sbin/so-elastic-auth
Normal file
62
salt/common/tools/sbin/so-elastic-auth
Normal file
@@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
ES_AUTH_PILLAR=${ELASTIC_AUTH_PILLAR:-/opt/so/saltstack/local/pillar/elasticsearch/auth.sls}
|
||||
ES_USERS_FILE=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
|
||||
|
||||
authEnable=${1:-true}
|
||||
|
||||
if ! grep -q "enabled: " "$ES_AUTH_PILLAR"; then
|
||||
echo "Elastic auth pillar file is invalid. Unable to proceed."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$authEnable" == "true" ]]; then
|
||||
if grep -q "enabled: False" "$ES_AUTH_PILLAR"; then
|
||||
sed -i 's/enabled: False/enabled: True/g' "$ES_AUTH_PILLAR"
|
||||
if [[ -z "$ELASTIC_AUTH_SKIP_HIGHSTATE" ]]; then
|
||||
echo "Applying highstate - this may take a few minutes..."
|
||||
salt-call state.highstate queue=True
|
||||
fi
|
||||
echo "Elastic auth is now enabled."
|
||||
if grep -q "argon" "$ES_USERS_FILE"; then
|
||||
echo ""
|
||||
echo "IMPORTANT: The following users will need to change their password, after logging into SOC, in order to access Kibana:"
|
||||
grep argon "$ES_USERS" | cut -d ":" -f 1
|
||||
fi
|
||||
else
|
||||
echo "Auth is already enabled."
|
||||
fi
|
||||
elif [[ "$authEnable" == "false" ]]; then
|
||||
if grep -q "enabled: True" "$ES_AUTH_PILLAR"; then
|
||||
sed -i 's/enabled: True/enabled: False/g' "$ES_AUTH_PILLAR"
|
||||
if [[ -z "$ELASTIC_AUTH_SKIP_HIGHSTATE" ]]; then
|
||||
echo "Applying highstate - this may take a few minutes..."
|
||||
salt-call state.highstate queue=True
|
||||
fi
|
||||
echo "Elastic auth is now disabled."
|
||||
else
|
||||
echo "Auth is already disabled."
|
||||
fi
|
||||
else
|
||||
echo "Usage: $0 <true|false>"
|
||||
echo ""
|
||||
echo "Enables Elastic authentication. Defaults to true."
|
||||
echo ""
|
||||
fi
|
||||
@@ -50,7 +50,7 @@ done
|
||||
if [ $SKIP -ne 1 ]; then
|
||||
# List indices
|
||||
echo
|
||||
curl -k -L https://{{ NODEIP }}:9200/_cat/indices?v
|
||||
{{ ELASTICCURL }} -k -L https://{{ NODEIP }}:9200/_cat/indices?v
|
||||
echo
|
||||
# Inform user we are about to delete all data
|
||||
echo
|
||||
@@ -89,10 +89,10 @@ fi
|
||||
# Delete data
|
||||
echo "Deleting data..."
|
||||
|
||||
INDXS=$(curl -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
INDXS=$({{ ELASTICCURL }} -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
for INDX in ${INDXS}
|
||||
do
|
||||
curl -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
{{ ELASTICCURL }} -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
#Start Logstash/Filebeat
|
||||
|
||||
@@ -18,4 +18,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_cat/indices?pretty
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_cat/indices?pretty
|
||||
|
||||
@@ -21,5 +21,5 @@ THEHIVEESPORT=9400
|
||||
|
||||
echo "Removing read only attributes for indices..."
|
||||
echo
|
||||
curl -s -k -XPUT -H "Content-Type: application/json" -L https://$IP:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
curl -XPUT -H "Content-Type: application/json" -L http://$IP:9400/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
{{ ELASTICCURL }} -s -k -XPUT -H "Content-Type: application/json" -L https://$IP:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
{{ ELASTICCURL }} -XPUT -H "Content-Type: application/json" -L http://$IP:9400/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
fi
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq .
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq .
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq .
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq .
|
||||
fi
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
. /usr/sbin/so-common
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
fi
|
||||
|
||||
37
salt/common/tools/sbin/so-elasticsearch-query
Normal file
37
salt/common/tools/sbin/so-elasticsearch-query
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Submit a cURL request to the local Security Onion Elasticsearch host."
|
||||
echo ""
|
||||
echo "Usage: $0 <PATH> [ARGS,...]"
|
||||
echo ""
|
||||
echo "Where "
|
||||
echo " PATH represents the elastic function being requested."
|
||||
echo " ARGS is used to specify additional, optional curl parameters."
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 /"
|
||||
echo " $0 '*:so-*/_search' -d '{\"query\": {\"match_all\": {}},\"size\": 1}' | jq"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
QUERYPATH=$1
|
||||
shift
|
||||
|
||||
{{ ELASTICCURL }} -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${QUERYPATH}" "$@"
|
||||
@@ -18,4 +18,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_cat/shards?pretty
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_cat/shards?pretty
|
||||
|
||||
@@ -18,4 +18,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
curl -s -k -L -XDELETE https://{{ NODEIP }}:9200/_template/$1
|
||||
{{ ELASTICCURL }} -s -k -L -XDELETE https://{{ NODEIP }}:9200/_template/$1
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq .
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_template/* | jq .
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq .
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq .
|
||||
fi
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
. /usr/sbin/so-common
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq
|
||||
fi
|
||||
|
||||
@@ -30,7 +30,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -51,7 +51,7 @@ cd ${ELASTICSEARCH_TEMPLATES}
|
||||
|
||||
|
||||
echo "Loading templates..."
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; {{ ELASTICCURL }} -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
echo
|
||||
|
||||
cd - >/dev/null
|
||||
|
||||
@@ -31,7 +31,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -48,8 +48,8 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
echo
|
||||
fi
|
||||
echo "Testing to see if the pipelines are already applied"
|
||||
ESVER=$(curl -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" |jq .version.number |tr -d \")
|
||||
PIPELINES=$(curl -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"/_ingest/pipeline/filebeat-$ESVER-suricata-eve-pipeline | jq . | wc -c)
|
||||
ESVER=$({{ ELASTICCURL }} -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" |jq .version.number |tr -d \")
|
||||
PIPELINES=$({{ ELASTICCURL }} -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"/_ingest/pipeline/filebeat-$ESVER-suricata-eve-pipeline | jq . | wc -c)
|
||||
|
||||
if [[ "$PIPELINES" -lt 5 ]]; then
|
||||
echo "Setting up ingest pipeline(s)"
|
||||
|
||||
@@ -15,4 +15,4 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
curl -X GET -k -L "https://localhost:9200/_cat/indices?v&s=index"
|
||||
{{ ELASTICCURL }} -X GET -k -L "https://localhost:9200/_cat/indices?v&s=index"
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic"
|
||||
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic" 300 "{{ ELASTICCURL }}"
|
||||
## This hackery will be removed if using Elastic Auth ##
|
||||
|
||||
# Let's snag a cookie from Kibana
|
||||
THECOOKIE=$(curl -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||
THECOOKIE=$({{ ELASTICCURL }} -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||
|
||||
# Disable certain Features from showing up in the Kibana UI
|
||||
echo
|
||||
echo "Setting up default Space:"
|
||||
curl -b "sid=$THECOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet"]} ' >> /opt/so/log/kibana/misc.log
|
||||
echo
|
||||
{{ ELASTICCURL }} -b "sid=$THECOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet"]} ' >> /opt/so/log/kibana/misc.log
|
||||
echo
|
||||
|
||||
@@ -39,10 +39,11 @@ email=$2
|
||||
|
||||
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434}
|
||||
databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite}
|
||||
argon2Iterations=${ARGON2_ITERATIONS:-3}
|
||||
argon2Memory=${ARGON2_MEMORY:-14}
|
||||
argon2Parallelism=${ARGON2_PARALLELISM:-2}
|
||||
argon2HashSize=${ARGON2_HASH_SIZE:-32}
|
||||
bcryptRounds=${BCRYPT_ROUNDS:-12}
|
||||
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
|
||||
elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles}
|
||||
esUID=${ELASTIC_UID:-930}
|
||||
esGID=${ELASTIC_GID:-930}
|
||||
|
||||
function fail() {
|
||||
msg=$1
|
||||
@@ -58,7 +59,7 @@ function require() {
|
||||
|
||||
# Verify this environment is capable of running this script
|
||||
function verifyEnvironment() {
|
||||
require "argon2"
|
||||
require "htpasswd"
|
||||
require "jq"
|
||||
require "curl"
|
||||
require "openssl"
|
||||
@@ -95,6 +96,16 @@ function validateEmail() {
|
||||
fi
|
||||
}
|
||||
|
||||
function hashPassword() {
|
||||
password=$1
|
||||
|
||||
passwordHash=$(echo "${password}" | htpasswd -niBC $bcryptRounds SOUSER)
|
||||
passwordHash=$(echo "$passwordHash" | cut -c 11-)
|
||||
passwordHash="\$2a${passwordHash}" # still waiting for https://github.com/elastic/elasticsearch/issues/51132
|
||||
echo "$passwordHash"
|
||||
}
|
||||
|
||||
|
||||
function updatePassword() {
|
||||
identityId=$1
|
||||
|
||||
@@ -111,15 +122,127 @@ function updatePassword() {
|
||||
|
||||
if [[ -n $identityId ]]; then
|
||||
# Generate password hash
|
||||
salt=$(openssl rand -hex 8)
|
||||
passwordHash=$(echo "${password}" | argon2 ${salt} -id -t $argon2Iterations -m $argon2Memory -p $argon2Parallelism -l $argon2HashSize -e)
|
||||
|
||||
passwordHash=$(hashPassword "$password")
|
||||
# Update DB with new hash
|
||||
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"${passwordHash}\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to update password"
|
||||
fi
|
||||
}
|
||||
|
||||
function createElasticFile() {
|
||||
filename=$1
|
||||
tmpFile=${filename}
|
||||
truncate -s 0 "$tmpFile"
|
||||
chmod 600 "$tmpFile"
|
||||
chown "${esUID}:${esGID}" "$tmpFile"
|
||||
}
|
||||
|
||||
function syncElasticSystemUser() {
|
||||
json=$1
|
||||
userid=$2
|
||||
usersFile=$3
|
||||
|
||||
user=$(echo "$json" | jq -r ".local.users.$userid.user")
|
||||
pass=$(echo "$json" | jq -r ".local.users.$userid.pass")
|
||||
|
||||
[[ -z "$user" || -z "$pass" ]] && fail "Elastic auth credentials for system user '$userid' are missing"
|
||||
hash=$(hashPassword "$pass")
|
||||
|
||||
echo "${user}:${hash}" >> "$usersFile"
|
||||
}
|
||||
|
||||
function syncElasticSystemRole() {
|
||||
json=$1
|
||||
userid=$2
|
||||
role=$3
|
||||
rolesFile=$4
|
||||
|
||||
user=$(echo "$json" | jq -r ".local.users.$userid.user")
|
||||
|
||||
[[ -z "$user" ]] && fail "Elastic auth credentials for system user '$userid' are missing"
|
||||
|
||||
echo "${role}:${user}" >> "$rolesFile"
|
||||
}
|
||||
|
||||
function syncElastic() {
|
||||
echo "Syncing users between SOC and Elastic..."
|
||||
usersTmpFile="${elasticUsersFile}.tmp"
|
||||
rolesTmpFile="${elasticRolesFile}.tmp"
|
||||
createElasticFile "${usersTmpFile}"
|
||||
createElasticFile "${rolesTmpFile}"
|
||||
|
||||
authPillarJson=$(lookup_salt_value "auth" "elasticsearch" "pillar" "json")
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_elastic_user" "$usersTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_kibana_user" "$usersTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile"
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_logstash_user" "$usersTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_beats_user" "$usersTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_monitor_user" "$usersTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_collector" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_agent" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "monitoring_user" "$rolesTmpFile"
|
||||
|
||||
if [[ -f "$databasePath" ]]; then
|
||||
# Generate the new users file
|
||||
echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \
|
||||
"from identity_credential_identifiers ici, identity_credentials ic " \
|
||||
"where ici.identity_credential_id=ic.id and ic.config like '%hashed_password%' " \
|
||||
"order by ici.identifier;" | \
|
||||
sqlite3 "$databasePath" | \
|
||||
jq -r '.user + ":" + .data.hashed_password' \
|
||||
>> "$usersTmpFile"
|
||||
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
|
||||
|
||||
# Generate the new users_roles file
|
||||
|
||||
echo "select 'superuser:' || ici.identifier " \
|
||||
"from identity_credential_identifiers ici, identity_credentials ic " \
|
||||
"where ici.identity_credential_id=ic.id and ic.config like '%hashed_password%' " \
|
||||
"order by ici.identifier;" | \
|
||||
sqlite3 "$databasePath" \
|
||||
>> "$rolesTmpFile"
|
||||
[[ $? != 0 ]] && fail "Unable to read credential IDs from database"
|
||||
else
|
||||
echo "Database file does not exist yet, skipping users export"
|
||||
fi
|
||||
|
||||
if [[ -s "${usersTmpFile}" ]]; then
|
||||
mv "${usersTmpFile}" "${elasticUsersFile}"
|
||||
mv "${rolesTmpFile}" "${elasticRolesFile}"
|
||||
|
||||
if [[ -z "$SKIP_STATE_APPLY" ]]; then
|
||||
echo "Applying elastic state locally; This can take a few minutes..."
|
||||
echo "Applying elastic state locally at $(date)" >> /opt/so/log/soc/sync.log 2>&1
|
||||
salt-call state.apply elasticsearch queue=True >> /opt/so/log/soc/sync.log 2>&1
|
||||
echo "Applying elastic state to elastic minions; This can take a few minutes..."
|
||||
echo "Applying elastic state to elastic minions at $(date)" >> /opt/so/log/soc/sync.log 2>&1
|
||||
salt -C 'G@role:so-node or G@role:so-heavynode' state.apply elasticsearch queue=True >> /opt/so/log/soc/sync.log 2>&1
|
||||
fi
|
||||
else
|
||||
echo "Newly generated users/roles files are incomplete; aborting."
|
||||
fi
|
||||
}
|
||||
|
||||
function syncAll() {
|
||||
if [[ -n "$STALE_MIN" ]]; then
|
||||
staleCount=$(echo "select count(*) from identity_credentials where updated_at >= Datetime('now', '-${STALE_MIN} minutes');" \
|
||||
| sqlite3 "$databasePath")
|
||||
if [[ "$staleCount" == "0" ]]; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
syncElastic
|
||||
return 0
|
||||
}
|
||||
|
||||
function listUsers() {
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
@@ -211,9 +334,10 @@ case "${operation}" in
|
||||
validateEmail "$email"
|
||||
updatePassword
|
||||
createUser "$email"
|
||||
syncAll
|
||||
echo "Successfully added new user to SOC"
|
||||
check_container thehive && echo $password | so-thehive-user-add "$email"
|
||||
check_container fleet && echo $password | so-fleet-user-add "$email"
|
||||
check_container thehive && echo "$password" | so-thehive-user-add "$email"
|
||||
check_container fleet && echo "$password" | so-fleet-user-add "$email"
|
||||
;;
|
||||
|
||||
"list")
|
||||
@@ -226,6 +350,7 @@ case "${operation}" in
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
updateUser "$email"
|
||||
syncAll
|
||||
echo "Successfully updated user"
|
||||
;;
|
||||
|
||||
@@ -234,6 +359,7 @@ case "${operation}" in
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
updateStatus "$email" 'active'
|
||||
syncAll
|
||||
echo "Successfully enabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" true
|
||||
check_container fleet && so-fleet-user-enable "$email" true
|
||||
@@ -244,6 +370,7 @@ case "${operation}" in
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
updateStatus "$email" 'locked'
|
||||
syncAll
|
||||
echo "Successfully disabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
@@ -254,11 +381,16 @@ case "${operation}" in
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
deleteUser "$email"
|
||||
syncAll
|
||||
echo "Successfully deleted user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
;;
|
||||
|
||||
"sync")
|
||||
syncAll && echo "Synchronization completed at $(date)"
|
||||
;;
|
||||
|
||||
"validate")
|
||||
validateEmail "$email"
|
||||
updatePassword
|
||||
@@ -280,4 +412,4 @@ case "${operation}" in
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
exit 0
|
||||
|
||||
@@ -392,7 +392,7 @@ rc1_to_rc2() {
|
||||
local NAME=$(echo $p | awk '{print $1}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
echo "Removing the old cross cluster config for $NAME"
|
||||
curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_cluster/settings -d '{"persistent":{"cluster":{"remote":{"'$NAME'":{"skip_unavailable":null,"seeds":null}}}}}'
|
||||
{{ ELASTICCURL }} -XPUT -H 'Content-Type: application/json' http://localhost:9200/_cluster/settings -d '{"persistent":{"cluster":{"remote":{"'$NAME'":{"skip_unavailable":null,"seeds":null}}}}}'
|
||||
done </tmp/nodes.txt
|
||||
# Add the nodes back using hostname
|
||||
while read p; do
|
||||
@@ -400,7 +400,7 @@ rc1_to_rc2() {
|
||||
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
echo "Adding the new cross cluster config for $NAME"
|
||||
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
|
||||
{{ ELASTICCURL }} -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
|
||||
done </tmp/nodes.txt
|
||||
|
||||
INSTALLEDVERSION=rc.2
|
||||
|
||||
@@ -34,7 +34,7 @@ overlimit() {
|
||||
|
||||
closedindices() {
|
||||
|
||||
INDICES=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
INDICES=$({{ ELASTICCURL }} -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
[ $? -eq 1 ] && return false
|
||||
echo ${INDICES} | grep -q -E "(logstash-|so-)"
|
||||
}
|
||||
@@ -49,10 +49,10 @@ while overlimit && closedindices; do
|
||||
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
|
||||
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
|
||||
# Finally, select the first entry in that sorted list.
|
||||
OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
OLDEST_INDEX=$({{ ELASTICCURL }} -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
|
||||
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
|
||||
curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
{{ ELASTICCURL }} -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
|
||||
# Finally, write a log entry that says we deleted it.
|
||||
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
{% elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
|
||||
{%- set elasticsearch = salt['pillar.get']('manager:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
@@ -11,6 +13,8 @@ client:
|
||||
hosts:
|
||||
- {{elasticsearch}}
|
||||
port: 9200
|
||||
username: {{ ES_USER }}
|
||||
password: {{ ES_PASS }}
|
||||
url_prefix:
|
||||
use_ssl: True
|
||||
certificate:
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %}
|
||||
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
|
||||
# Curator
|
||||
# Create the group
|
||||
curatorgroup:
|
||||
@@ -48,6 +49,7 @@ curconf:
|
||||
- source: salt://curator/files/curator.yml
|
||||
- user: 934
|
||||
- group: 939
|
||||
- mode: 660
|
||||
- template: jinja
|
||||
|
||||
curcloseddel:
|
||||
@@ -66,6 +68,8 @@ curcloseddeldel:
|
||||
- group: 939
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
- defaults:
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
|
||||
curclose:
|
||||
file.managed:
|
||||
@@ -147,4 +151,4 @@ append_so-curator_so-status.conf:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
elastalert:
|
||||
config:
|
||||
rules_folder: /opt/elastalert/rules/
|
||||
@@ -19,8 +21,8 @@ elastalert:
|
||||
use_ssl: true
|
||||
verify_certs: false
|
||||
#es_send_get_body_as: GET
|
||||
#es_username: someusername
|
||||
#es_password: somepassword
|
||||
es_username: {{ ES_USER }}
|
||||
es_password: {{ ES_PASS }}
|
||||
writeback_index: elastalert_status
|
||||
alert_time_limit:
|
||||
days: 2
|
||||
@@ -45,4 +47,4 @@ elastalert:
|
||||
level: INFO
|
||||
handlers:
|
||||
- file
|
||||
propagate: false
|
||||
propagate: false
|
||||
|
||||
@@ -19,9 +19,14 @@ class PlaybookESAlerter(Alerter):
|
||||
today = strftime("%Y.%m.%d", gmtime())
|
||||
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S"'.000Z', gmtime())
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
creds = None
|
||||
if 'elasticsearch_user' in self.rule and 'elasticsearch_pass' in self.rule:
|
||||
creds = (self.rule['elasticsearch_user'], self.rule['elasticsearch_pass'])
|
||||
|
||||
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
|
||||
url = f"https://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
|
||||
requests.post(url, data=json.dumps(payload), headers=headers, verify=False)
|
||||
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
|
||||
|
||||
def get_info(self):
|
||||
return {'type': 'PlaybookESAlerter'}
|
||||
@@ -99,6 +99,7 @@ elastaconf:
|
||||
elastalert_config: {{ elastalert_config.elastalert.config }}
|
||||
- user: 933
|
||||
- group: 933
|
||||
- mode: 660
|
||||
- template: jinja
|
||||
|
||||
wait_for_elasticsearch:
|
||||
|
||||
7
salt/elasticsearch/auth.map.jinja
Normal file
7
salt/elasticsearch/auth.map.jinja
Normal file
@@ -0,0 +1,7 @@
|
||||
{% set ELASTICAUTH = salt['pillar.filter_by']({
|
||||
True: {
|
||||
'user': salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user'),
|
||||
'pass': salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass'),
|
||||
'elasticcurl':'curl -K /opt/so/conf/elasticsearch/curl.config' },
|
||||
False: {'elasticcurl': 'curl'},
|
||||
}, pillar='elasticsearch:auth:enabled', default=False) %}
|
||||
39
salt/elasticsearch/auth.sls
Normal file
39
salt/elasticsearch/auth.sls
Normal file
@@ -0,0 +1,39 @@
|
||||
{% set so_elastic_user_pass = salt['random.get_str'](20) %}
|
||||
{% set so_kibana_user_pass = salt['random.get_str'](20) %}
|
||||
{% set so_logstash_user_pass = salt['random.get_str'](20) %}
|
||||
{% set so_beats_user_pass = salt['random.get_str'](20) %}
|
||||
{% set so_monitor_user_pass = salt['random.get_str'](20) %}
|
||||
|
||||
elastic_auth_pillar:
|
||||
file.managed:
|
||||
- name: /opt/so/saltstack/local/pillar/elasticsearch/auth.sls
|
||||
- mode: 600
|
||||
- reload_pillar: True
|
||||
- contents: |
|
||||
elasticsearch:
|
||||
auth:
|
||||
enabled: False
|
||||
users:
|
||||
so_elastic_user:
|
||||
user: so_elastic
|
||||
pass: {{ so_elastic_user_pass }}
|
||||
so_kibana_user:
|
||||
user: so_kibana
|
||||
pass: {{ so_kibana_user_pass }}
|
||||
so_logstash_user:
|
||||
user: so_logstash
|
||||
pass: {{ so_logstash_user_pass }}
|
||||
so_beats_user:
|
||||
user: so_beats
|
||||
pass: {{ so_beats_user_pass }}
|
||||
so_monitor_user:
|
||||
user: so_monitor
|
||||
pass: {{ so_monitor_user_pass }}
|
||||
# since we are generating a random password, and we don't want that to happen everytime
|
||||
# a highstate runs, we only manage the file each user isn't present in the file. if the
|
||||
# pillar file doesn't exists, then the default vault provided to pillar.get should not
|
||||
# be within the file either, so it should then be created
|
||||
- unless:
|
||||
{% for so_app_user, values in salt['pillar.get']('elasticsearch:auth:users', {'so_noapp_user': {'user': 'r@NDumu53Rd0NtDOoP'}}).items() %}
|
||||
- grep {{ values.user }} /opt/so/saltstack/local/pillar/elasticsearch/auth.sls
|
||||
{% endfor%}
|
||||
1
salt/elasticsearch/files/curl.config.template
Normal file
1
salt/elasticsearch/files/curl.config.template
Normal file
@@ -0,0 +1 @@
|
||||
user = "{{ salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user') }}:{{ salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass') }}"
|
||||
@@ -30,11 +30,13 @@ xpack.security.http.ssl.client_authentication: none
|
||||
xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key
|
||||
xpack.security.http.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt
|
||||
xpack.security.http.ssl.certificate_authorities: /usr/share/elasticsearch/config/ca.crt
|
||||
{% if not salt['pillar.get']('elasticsearch:auth:enabled', False) %}
|
||||
xpack.security.authc:
|
||||
anonymous:
|
||||
username: anonymous_user
|
||||
roles: superuser
|
||||
authz_exception: true
|
||||
{% endif %}
|
||||
node.name: {{ grains.host }}
|
||||
script.max_compilations_rate: 20000/1m
|
||||
{%- if TRUECLUSTER is sameas true %}
|
||||
|
||||
@@ -27,7 +27,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -47,9 +47,9 @@ fi
|
||||
cd ${ELASTICSEARCH_INGEST_PIPELINES}
|
||||
|
||||
echo "Loading pipelines..."
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
for i in *; do echo $i; RESPONSE=$({{ ELASTICCURL }} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
echo
|
||||
|
||||
cd - >/dev/null
|
||||
|
||||
exit $RETURN_CODE
|
||||
exit $RETURN_CODE
|
||||
|
||||
@@ -35,6 +35,8 @@
|
||||
{% endif %}
|
||||
|
||||
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
|
||||
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
|
||||
|
||||
|
||||
vm.max_map_count:
|
||||
sysctl.present:
|
||||
@@ -169,6 +171,40 @@ eslogdir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
auth_users:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/users.tmp
|
||||
- source: salt://elasticsearch/files/users
|
||||
- user: 930
|
||||
- group: 930
|
||||
- mode: 600
|
||||
- show_changes: False
|
||||
|
||||
auth_users_roles:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/users_roles.tmp
|
||||
- source: salt://elasticsearch/files/users_roles
|
||||
- user: 930
|
||||
- group: 930
|
||||
- mode: 600
|
||||
- show_changes: False
|
||||
|
||||
auth_users_inode:
|
||||
require:
|
||||
- file: auth_users
|
||||
cmd.run:
|
||||
- name: cat /opt/so/conf/elasticsearch/users.tmp > /opt/so/conf/elasticsearch/users && chown 930:930 /opt/so/conf/elasticsearch/users && chmod 600 /opt/so/conf/elasticsearch/users
|
||||
- onchanges:
|
||||
- file: /opt/so/conf/elasticsearch/users.tmp
|
||||
|
||||
auth_users_roles_inode:
|
||||
require:
|
||||
- file: auth_users_roles
|
||||
cmd.run:
|
||||
- name: cat /opt/so/conf/elasticsearch/users_roles.tmp > /opt/so/conf/elasticsearch/users_roles && chown 930:930 /opt/so/conf/elasticsearch/users_roles && chmod 600 /opt/so/conf/elasticsearch/users_roles
|
||||
- onchanges:
|
||||
- file: /opt/so/conf/elasticsearch/users_roles.tmp
|
||||
|
||||
so-elasticsearch:
|
||||
docker_container.running:
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }}
|
||||
@@ -213,6 +249,10 @@ so-elasticsearch:
|
||||
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
|
||||
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
|
||||
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
|
||||
{% if salt['pillar.get']('elasticsearch:auth:enabled', False) %}
|
||||
- /opt/so/conf/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles:ro
|
||||
- /opt/so/conf/elasticsearch/users:/usr/share/elasticsearch/config/users:ro
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: cacertz
|
||||
- file: esyml
|
||||
@@ -232,6 +272,8 @@ so-elasticsearch-pipelines-file:
|
||||
- group: 939
|
||||
- mode: 754
|
||||
- template: jinja
|
||||
- defaults:
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
|
||||
so-elasticsearch-pipelines:
|
||||
cmd.run:
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
{%- else %}
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_beats_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_beats_user:pass', '') %}
|
||||
|
||||
|
||||
{%- set HOSTNAME = salt['grains.get']('host', '') %}
|
||||
@@ -266,6 +268,8 @@ output.{{ type }}:
|
||||
output.elasticsearch:
|
||||
enabled: true
|
||||
hosts: ["https://{{ MANAGER }}:9200"]
|
||||
username: "{{ ES_USER }}"
|
||||
password: "{{ ES_PASS }}"
|
||||
ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
|
||||
pipelines:
|
||||
- pipeline: "%{[module]}.%{[dataset]}"
|
||||
|
||||
@@ -3,8 +3,12 @@
|
||||
{%- else %}
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_beats_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_beats_user:pass', '') %}
|
||||
|
||||
output.elasticsearch:
|
||||
enabled: true
|
||||
hosts: ["https://{{ MANAGER }}:9200"]
|
||||
username: "{{ ES_USER }}"
|
||||
password: "{{ ES_PASS }}"
|
||||
ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@@ -83,6 +84,7 @@ filebeatmoduleconfsync:
|
||||
- source: salt://filebeat/etc/module-setup.yml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 660
|
||||
- template: jinja
|
||||
|
||||
sodefaults_module_conf:
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#!/bin/bash
|
||||
# {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
|
||||
# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%}
|
||||
# {%- set MANAGER = salt['pillar.get']('global:url_base', '') %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
@@ -8,19 +6,10 @@
|
||||
# Copy template file
|
||||
cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_objects.ndjson
|
||||
|
||||
# {% if FLEET_NODE or FLEET_MANAGER %}
|
||||
# Fleet IP
|
||||
#sed -i "s/FLEETPLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
|
||||
# {% endif %}
|
||||
|
||||
# SOCtopus and Manager
|
||||
sed -i "s/PLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
|
||||
|
||||
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic"
|
||||
## This hackery will be removed if using Elastic Auth ##
|
||||
|
||||
# Let's snag a cookie from Kibana
|
||||
THECOOKIE=$(curl -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic" 300 "{{ ELASTICCURL }}"
|
||||
|
||||
# Load saved objects
|
||||
curl -b "sid=$THECOOKIE" -L -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson >> /opt/so/log/kibana/misc.log
|
||||
{{ ELASTICCURL }} -L -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson >> /opt/so/log/kibana/misc.log
|
||||
|
||||
@@ -1,20 +1,24 @@
|
||||
---
|
||||
# Default Kibana configuration from kibana-docker.
|
||||
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:pass', '') %}
|
||||
server.name: kibana
|
||||
server.host: "0"
|
||||
server.basePath: /kibana
|
||||
elasticsearch.hosts: [ "https://{{ ES }}:9200" ]
|
||||
elasticsearch.ssl.verificationMode: none
|
||||
#kibana.index: ".kibana"
|
||||
#elasticsearch.username: elastic
|
||||
#elasticsearch.password: changeme
|
||||
elasticsearch.username: {{ ES_USER }}
|
||||
elasticsearch.password: {{ ES_PASS }}
|
||||
#xpack.monitoring.ui.container.elasticsearch.enabled: true
|
||||
elasticsearch.requestTimeout: 90000
|
||||
logging.dest: /var/log/kibana/kibana.log
|
||||
telemetry.enabled: false
|
||||
security.showInsecureClusterWarning: false
|
||||
{% if not salt['pillar.get']('elasticsearch:auth:enabled', False) %}
|
||||
xpack.security.authc.providers:
|
||||
anonymous.anonymous1:
|
||||
order: 0
|
||||
credentials: "elasticsearch_anonymous_user"
|
||||
{% endif %}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
|
||||
|
||||
# Add ES Group
|
||||
kibanasearchgroup:
|
||||
@@ -34,6 +35,7 @@ synckibanaconfig:
|
||||
- source: salt://kibana/etc
|
||||
- user: 932
|
||||
- group: 939
|
||||
- file_mode: 660
|
||||
- template: jinja
|
||||
|
||||
kibanalogdir:
|
||||
@@ -63,6 +65,8 @@ kibanabin:
|
||||
- source: salt://kibana/bin/so-kibana-config-load
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
- defaults:
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
|
||||
# Start the kibana docker
|
||||
so-kibana:
|
||||
@@ -113,4 +117,4 @@ so-kibana-config-load:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -78,6 +78,7 @@ ls_pipeline_{{PL}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}:
|
||||
{% endif %}
|
||||
- user: 931
|
||||
- group: 939
|
||||
- mode: 660
|
||||
- makedirs: True
|
||||
{% endfor %}
|
||||
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [module] =~ "zeek" and "import" not in [tags] {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-zeek"
|
||||
template_name => "so-zeek"
|
||||
template => "/templates/so-zeek-template.json"
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if "import" in [tags] {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-import"
|
||||
template_name => "so-import"
|
||||
template => "/templates/so-import-template.json"
|
||||
|
||||
@@ -3,10 +3,14 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [event_type] == "sflow" {
|
||||
elasticsearch {
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-flow"
|
||||
template_name => "so-flow"
|
||||
template => "/templates/so-flow-template.json"
|
||||
|
||||
@@ -3,10 +3,14 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [event_type] == "ids" and "import" not in [tags] {
|
||||
elasticsearch {
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-ids"
|
||||
template_name => "so-ids"
|
||||
template => "/templates/so-ids-template.json"
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [module] =~ "syslog" {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-syslog"
|
||||
template_name => "so-syslog"
|
||||
template => "/templates/so-syslog-template.json"
|
||||
|
||||
@@ -3,18 +3,22 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [metadata][pipeline] {
|
||||
elasticsearch {
|
||||
id => "filebeat_modules_metadata_pipeline"
|
||||
pipeline => "%{[metadata][pipeline]}"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-%{[event][module]}-%{+YYYY.MM.dd}"
|
||||
template_name => "so-common"
|
||||
template => "/templates/so-common-template.json"
|
||||
template_overwrite => true
|
||||
ssl => true
|
||||
ssl_certificate_verification => false
|
||||
}
|
||||
}
|
||||
if [metadata][pipeline] {
|
||||
elasticsearch {
|
||||
id => "filebeat_modules_metadata_pipeline"
|
||||
pipeline => "%{[metadata][pipeline]}"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-%{[event][module]}-%{+YYYY.MM.dd}"
|
||||
template_name => "so-common"
|
||||
template => "/templates/so-common-template.json"
|
||||
template_overwrite => true
|
||||
ssl => true
|
||||
ssl_certificate_verification => false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,11 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [module] =~ "osquery" and "live_query" not in [dataset] {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-osquery"
|
||||
template_name => "so-osquery"
|
||||
template => "/templates/so-osquery-template.json"
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
|
||||
|
||||
filter {
|
||||
@@ -30,6 +32,8 @@ output {
|
||||
elasticsearch {
|
||||
pipeline => "osquery.live_query"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-osquery"
|
||||
template_name => "so-osquery"
|
||||
template => "/templates/so-osquery-template.json"
|
||||
|
||||
@@ -3,10 +3,14 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [dataset] =~ "firewall" {
|
||||
elasticsearch {
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-firewall"
|
||||
template_name => "so-firewall"
|
||||
template => "/templates/so-firewall-template.json"
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [module] =~ "suricata" and "import" not in [tags] {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-ids"
|
||||
template_name => "so-ids"
|
||||
template => "/templates/so-ids-template.json"
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if "beat-ext" in [tags] and "import" not in [tags] {
|
||||
elasticsearch {
|
||||
pipeline => "beats.common"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-beats"
|
||||
template_name => "so-beats"
|
||||
template => "/templates/so-beats-template.json"
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [module] =~ "ossec" {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-ossec"
|
||||
template_name => "so-ossec"
|
||||
template => "/templates/so-ossec-template.json"
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass', '') %}
|
||||
output {
|
||||
if [module] =~ "strelka" {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
user => "{{ ES_USER }}"
|
||||
password => "{{ ES_PASS }}"
|
||||
index => "so-strelka"
|
||||
template_name => "so-strelka"
|
||||
template => "/templates/so-strelka-template.json"
|
||||
|
||||
@@ -20,6 +20,10 @@
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %}
|
||||
|
||||
include:
|
||||
- elasticsearch.auth
|
||||
- salt.minion
|
||||
|
||||
socore_own_saltstack:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack
|
||||
@@ -102,6 +106,26 @@ strelka_yara_update:
|
||||
- name: '/usr/sbin/so-yara-update >> /nsm/strelka/log/yara-update.log 2>&1'
|
||||
- hour: '7'
|
||||
- minute: '1'
|
||||
|
||||
elastic_curl_config_distributed:
|
||||
file.managed:
|
||||
- name: /opt/so/saltstack/local/salt/elasticsearch/curl.config
|
||||
- source: salt://elasticsearch/files/curl.config.template
|
||||
- template: jinja
|
||||
- mode: 600
|
||||
- show_changes: False
|
||||
|
||||
# Must run before elasticsearch docker container is started!
|
||||
syncesusers:
|
||||
cmd.run:
|
||||
- name: so-user sync
|
||||
- env:
|
||||
- SKIP_STATE_APPLY: 'true'
|
||||
- creates:
|
||||
- /opt/so/saltstack/local/salt/elasticsearch/files/users
|
||||
- /opt/so/saltstack/local/salt/elasticsearch/files/users_roles
|
||||
- show_changes: False
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
|
||||
@@ -149,6 +149,12 @@ http {
|
||||
root /opt/socore/html;
|
||||
index index.html;
|
||||
|
||||
add_header Content-Security-Policy "default-src 'self' 'unsafe-inline' 'unsafe-eval' https: data:; frame-ancestors 'self'";
|
||||
add_header X-Frame-Options SAMEORIGIN;
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload";
|
||||
|
||||
ssl_certificate "/etc/pki/nginx/server.crt";
|
||||
ssl_certificate_key "/etc/pki/nginx/server.key";
|
||||
ssl_session_cache shared:SSL:1m;
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
{% from 'salt/map.jinja' import PYINOTIFYPACKAGE with context%}
|
||||
{% from 'salt/map.jinja' import PYTHONINSTALLER with context%}
|
||||
|
||||
patch_package:
|
||||
pkg.installed:
|
||||
- name: patch
|
||||
- name: patch
|
||||
|
||||
pyinotify:
|
||||
{{PYTHONINSTALLER}}.installed:
|
||||
- name: {{ PYINOTIFYPACKAGE }}
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
{% set PYTHON3INFLUX= 'influxdb == ' ~ PYTHONINFLUXVERSION %}
|
||||
{% set PYTHON3INFLUXDEPS= ['certifi', 'chardet', 'python-dateutil', 'pytz', 'requests'] %}
|
||||
{% set PYTHONINSTALLER = 'pip' %}
|
||||
{% set PYINOTIFYPACKAGE = 'pyinotify' %}
|
||||
{% else %}
|
||||
{% set SPLITCHAR = '-' %}
|
||||
{% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %}
|
||||
@@ -21,6 +22,7 @@
|
||||
{% set PYTHON3INFLUX= 'securityonion-python3-influxdb' %}
|
||||
{% set PYTHON3INFLUXDEPS= ['python36-certifi', 'python36-chardet', 'python36-dateutil', 'python36-pytz', 'python36-requests'] %}
|
||||
{% set PYTHONINSTALLER = 'pkg' %}
|
||||
{% set PYINOTIFYPACKAGE = 'securityonion-python3-pyinotify' %}
|
||||
{% endif %}
|
||||
|
||||
{% set INSTALLEDSALTVERSION = salt['pkg.version']('salt-minion').split(SPLITCHAR)[0] %}
|
||||
@@ -33,4 +35,4 @@
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -41,12 +41,8 @@ serve:
|
||||
base_url: https://{{ WEBACCESS }}/kratos/
|
||||
|
||||
hashers:
|
||||
argon2:
|
||||
parallelism: 2
|
||||
memory: 16384
|
||||
iterations: 3
|
||||
salt_length: 16
|
||||
key_length: 32
|
||||
bcrypt:
|
||||
cost: 12
|
||||
|
||||
identity:
|
||||
default_schema_url: file:///kratos-conf/schema.json
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
]},
|
||||
{ "name": "actionPcap", "description": "actionPcapHelp", "icon": "fa-stream", "target": "",
|
||||
"links": [
|
||||
"/joblookup?esid={:soc_id}",
|
||||
"/joblookup?ncid={:network.community_id}"
|
||||
"/joblookup?esid={:soc_id}&time={:@timestamp}",
|
||||
"/joblookup?ncid={:network.community_id}&time={:@timestamp}"
|
||||
]},
|
||||
{ "name": "actionCyberChef", "description": "actionCyberChefHelp", "icon": "fas fa-bread-slice", "target": "_blank",
|
||||
"links": [
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
{%- import_json "soc/files/soc/menu.actions.json" as menu_actions %}
|
||||
{%- import_json "soc/files/soc/tools.json" as tools %}
|
||||
{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
{
|
||||
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
|
||||
@@ -47,8 +49,8 @@
|
||||
{%- endfor %}
|
||||
],
|
||||
{%- endif %}
|
||||
"username": "",
|
||||
"password": "",
|
||||
"username": "{{ ES_USER }}",
|
||||
"password": "{{ ES_PASS }}",
|
||||
"cacheMs": {{ ES_FIELDCAPS_CACHE }},
|
||||
"verifyCert": false,
|
||||
"timeoutMs": {{ API_TIMEOUT }}
|
||||
|
||||
@@ -62,6 +62,11 @@ soccustom:
|
||||
- mode: 600
|
||||
- template: jinja
|
||||
|
||||
sosyncusers:
|
||||
cron.present:
|
||||
- user: root
|
||||
- name: 'PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin STALE_MIN=1 /usr/sbin/so-user sync &>> /opt/so/log/soc/sync.log'
|
||||
|
||||
so-soc:
|
||||
docker_container.running:
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-soc:{{ VERSION }}
|
||||
|
||||
@@ -3,13 +3,14 @@
|
||||
{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %}
|
||||
{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %}
|
||||
{%- set PLAYBOOK_KEY = salt['pillar.get']('playbook:api_key', '') %}
|
||||
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
[es]
|
||||
es_url = https://{{MANAGER}}:9200
|
||||
es_ip = {{MANAGER}}
|
||||
es_user =
|
||||
es_pass =
|
||||
es_user = {{ ES_USER }}
|
||||
es_pass = {{ ES_PASS }}
|
||||
es_index_pattern = so-*
|
||||
es_verifycert = no
|
||||
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
{% set ES = salt['pillar.get']('global:managerip', '') %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
alert: modules.so.playbook-es.PlaybookESAlerter
|
||||
elasticsearch_host: "{{ ES }}:9200"
|
||||
elasticsearch_user: "{{ ES_USER }}"
|
||||
elasticsearch_pass: "{{ ES_PASS }}"
|
||||
play_title: ""
|
||||
play_url: "https://{{ ES }}/playbook/issues/6000"
|
||||
sigma_level: ""
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
{% set es = salt['pillar.get']('global:url_base', '') %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
alert:
|
||||
- "modules.so.playbook-es.PlaybookESAlerter"
|
||||
|
||||
elasticsearch_host: "{{ es }}:9200"
|
||||
elasticsearch_user: "{{ ES_USER }}"
|
||||
elasticsearch_pass: "{{ ES_PASS }}"
|
||||
play_title: ""
|
||||
play_id: ""
|
||||
event.module: "playbook"
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
{% set es = salt['pillar.get']('global:url_base', '') %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
alert:
|
||||
- "modules.so.playbook-es.PlaybookESAlerter"
|
||||
|
||||
elasticsearch_host: "{{ es }}:9200"
|
||||
elasticsearch_user: "{{ ES_USER }}"
|
||||
elasticsearch_pass: "{{ ES_PASS }}"
|
||||
play_title: ""
|
||||
event.module: "playbook"
|
||||
event.dataset: "alert"
|
||||
|
||||
@@ -21,6 +21,7 @@ soctopus-sync:
|
||||
- source: salt://soctopus/files/templates
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 600
|
||||
- template: jinja
|
||||
|
||||
soctopusconf:
|
||||
@@ -43,6 +44,7 @@ playbookrulesdir:
|
||||
- name: /opt/so/rules/elastalert/playbook
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 660
|
||||
- makedirs: True
|
||||
|
||||
playbookrulessync:
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
|
||||
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
{% set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') %}
|
||||
{% set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %}
|
||||
{% set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %}
|
||||
@@ -620,10 +622,14 @@
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
|
||||
[[inputs.elasticsearch]]
|
||||
servers = ["https://{{ MANAGER }}:9200"]
|
||||
username = "{{ ES_USER }}"
|
||||
password = "{{ ES_PASS }}"
|
||||
insecure_skip_verify = true
|
||||
{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
|
||||
[[inputs.elasticsearch]]
|
||||
servers = ["https://{{ NODEIP }}:9200"]
|
||||
username = "{{ ES_USER }}"
|
||||
password = "{{ ES_PASS }}"
|
||||
insecure_skip_verify = true
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ tgrafconf:
|
||||
- name: /opt/so/conf/telegraf/etc/telegraf.conf
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 660
|
||||
- template: jinja
|
||||
- source: salt://telegraf/etc/telegraf.conf
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 30 ]]; do
|
||||
curl -k --output /dev/null --silent --head --fail -L https://{{ ES }}:9200
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://{{ ES }}:9200
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -28,7 +28,7 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
fi
|
||||
|
||||
echo "Applying cross cluster search config..."
|
||||
curl -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings \
|
||||
{{ ELASTICCURL }} -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
|
||||
|
||||
@@ -36,7 +36,7 @@ echo "Applying cross cluster search config..."
|
||||
{%- if TRUECLUSTER is sameas false %}
|
||||
{%- if salt['pillar.get']('nodestab', {}) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
curl -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
|
||||
{{ ELASTICCURL }} -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
@@ -6,7 +6,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 30 ]]; do
|
||||
curl -k --output /dev/null --silent --head --fail -L https://{{ ES }}:9200
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://{{ ES }}:9200
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -26,6 +26,6 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
fi
|
||||
|
||||
echo "Applying cross cluster search config..."
|
||||
curl -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings \
|
||||
{{ ELASTICCURL }} -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ grains.host }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
|
||||
|
||||
@@ -1,27 +1,31 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
|
||||
|
||||
# This state is for checking things
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
# Make sure Cross Cluster is good. Will need some logic once we have hot/warm
|
||||
crossclusterson:
|
||||
cmd.script:
|
||||
- shell: /bin/bash
|
||||
- cwd: /opt/so
|
||||
- runas: socore
|
||||
- source: salt://utility/bin/crossthestreams
|
||||
- template: jinja
|
||||
- defaults:
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
|
||||
{% endif %}
|
||||
{% if grains['role'] in ['so-eval', 'so-import'] %}
|
||||
{% endif %}
|
||||
{% if grains['role'] in ['so-eval', 'so-import'] %}
|
||||
fixsearch:
|
||||
cmd.script:
|
||||
- shell: /bin/bash
|
||||
- cwd: /opt/so
|
||||
- runas: socore
|
||||
- source: salt://utility/bin/eval
|
||||
- template: jinja
|
||||
{% endif %}
|
||||
- defaults:
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
|
||||
@@ -120,7 +120,9 @@ add_web_user() {
|
||||
wait_for_file /opt/so/conf/kratos/db/db.sqlite 30 5
|
||||
{
|
||||
echo "Attempting to add administrator user for web interface...";
|
||||
export SKIP_STATE_APPLY=true
|
||||
echo "$WEBPASSWD1" | /usr/sbin/so-user add "$WEBUSER";
|
||||
unset SKIP_STATE_APPLY
|
||||
echo "Add user result: $?";
|
||||
} >> "/root/so-user-add.log" 2>&1
|
||||
}
|
||||
@@ -2080,7 +2082,7 @@ saltify() {
|
||||
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT')
|
||||
reserve_group_ids >> "$setup_log" 2>&1
|
||||
if [[ ! $is_iso ]]; then
|
||||
logCmd "yum -y install sqlite argon2 curl mariadb-devel"
|
||||
logCmd "yum -y install sqlite curl mariadb-devel"
|
||||
fi
|
||||
# Download Ubuntu Keys in case manager updates = 1
|
||||
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
|
||||
@@ -2176,7 +2178,7 @@ saltify() {
|
||||
|
||||
retry 50 10 "apt-get update" >> "$setup_log" 2>&1 || exit 1
|
||||
set_progress_str 6 'Installing various dependencies'
|
||||
retry 50 10 "apt-get -y install sqlite3 argon2 libssl-dev" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-get -y install sqlite3 libssl-dev" >> "$setup_log" 2>&1 || exit 1
|
||||
set_progress_str 7 'Installing salt-master'
|
||||
retry 50 10 "apt-get -y install salt-master=3003+ds-1" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-mark hold salt-master" >> "$setup_log" 2>&1 || exit 1
|
||||
|
||||
@@ -751,6 +751,7 @@ echo "1" > /root/accept_changes
|
||||
|
||||
set_progress_str 60 "$(print_salt_state_apply 'manager')"
|
||||
salt-call state.apply -l info manager >> $setup_log 2>&1
|
||||
ELASTIC_AUTH_SKIP_HIGHSTATE=true bash /opt/so/saltstack/default/salt/common/tools/sbin/so-elastic-auth
|
||||
fi
|
||||
|
||||
set_progress_str 61 "$(print_salt_state_apply 'firewall')"
|
||||
@@ -916,7 +917,7 @@ echo "1" > /root/accept_changes
|
||||
checkin_at_boot >> $setup_log 2>&1
|
||||
|
||||
set_progress_str 95 'Verifying setup'
|
||||
salt-call -l info state.highstate >> $setup_log 2>&1
|
||||
salt-call -l info state.highstate queue=True >> $setup_log 2>&1
|
||||
|
||||
} | progress
|
||||
|
||||
|
||||
Reference in New Issue
Block a user