mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
[fix] Add -L option to curl to respect redirects
This commit is contained in:
@@ -43,7 +43,7 @@ fi
|
||||
read -rs CORTEX_PASS
|
||||
|
||||
# Create new user in Cortex
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }")
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }")
|
||||
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully added user to Cortex."
|
||||
else
|
||||
|
||||
@@ -46,7 +46,7 @@ case "${2^^}" in
|
||||
;;
|
||||
esac
|
||||
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }")
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_IP/cortex/api/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }")
|
||||
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully updated user in Cortex."
|
||||
else
|
||||
|
||||
@@ -51,9 +51,9 @@ if [ $SKIP -ne 1 ]; then
|
||||
# List indices
|
||||
echo
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -k https://{{ NODEIP }}:9200/_cat/indices?v
|
||||
curl -k -L https://{{ NODEIP }}:9200/_cat/indices?v
|
||||
{% else %}
|
||||
curl {{ NODEIP }}:9200/_cat/indices?v
|
||||
curl -L {{ NODEIP }}:9200/_cat/indices?v
|
||||
{% endif %}
|
||||
echo
|
||||
# Inform user we are about to delete all data
|
||||
@@ -94,16 +94,16 @@ fi
|
||||
echo "Deleting data..."
|
||||
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
INDXS=$(curl -s -XGET -k https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
INDXS=$(curl -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
{% else %}
|
||||
INDXS=$(curl -s -XGET {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
INDXS=$(curl -s -XGET -L {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
{% endif %}
|
||||
for INDX in ${INDXS}
|
||||
do
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -XDELETE -k https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
curl -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
{% else %}
|
||||
curl -XDELETE "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
curl -XDELETE -L "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
{% endif %}
|
||||
done
|
||||
|
||||
|
||||
@@ -22,5 +22,5 @@ THEHIVEESPORT=9400
|
||||
echo "Removing read only attributes for indices..."
|
||||
echo
|
||||
for p in $ESPORT $THEHIVEESPORT; do
|
||||
curl -XPUT -H "Content-Type: application/json" http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
curl -XPUT -H "Content-Type: application/json" -L http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
done
|
||||
|
||||
@@ -20,14 +20,14 @@
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
{% endif %}
|
||||
else
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
{% endif %}
|
||||
fi
|
||||
|
||||
@@ -18,14 +18,14 @@
|
||||
. /usr/sbin/so-common
|
||||
if [ "$1" == "" ]; then
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
{% endif %}
|
||||
else
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
{% endif %}
|
||||
fi
|
||||
|
||||
@@ -18,14 +18,14 @@
|
||||
. /usr/sbin/so-common
|
||||
if [ "$1" == "" ]; then
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
curl -s -L {{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
{% endif %}
|
||||
else
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_template/$1 | jq
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_template/$1 | jq
|
||||
curl -s -L {{ NODEIP }}:9200/_template/$1 | jq
|
||||
{% endif %}
|
||||
fi
|
||||
|
||||
@@ -31,9 +31,9 @@ COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -k --output /dev/null --silent --head --fail https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{% else %}
|
||||
curl --output /dev/null --silent --head --fail http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
curl --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{% endif %}
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
@@ -56,9 +56,9 @@ cd ${ELASTICSEARCH_TEMPLATES}
|
||||
|
||||
echo "Loading templates..."
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
{% else %}
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
{% endif %}
|
||||
echo
|
||||
|
||||
|
||||
@@ -59,6 +59,6 @@ if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully added user to Fleet"
|
||||
else
|
||||
echo "Unable to add user to Fleet; user might already exist"
|
||||
echo $resp
|
||||
echo "$MYSQL_OUTPUT"
|
||||
exit 2
|
||||
fi
|
||||
@@ -16,7 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -X GET -k https://localhost:9200/_cat/indices?v
|
||||
curl -X GET -k -L https://localhost:9200/_cat/indices?v
|
||||
{% else %}
|
||||
curl -X GET localhost:9200/_cat/indices?v
|
||||
curl -X GET -L localhost:9200/_cat/indices?v
|
||||
{% endif %}
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
KIBANA_HOST={{ MANAGER }}
|
||||
KSO_PORT=5601
|
||||
OUTFILE="saved_objects.ndjson"
|
||||
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
|
||||
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -L $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
|
||||
|
||||
# Clean up using PLACEHOLDER
|
||||
sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE
|
||||
|
||||
@@ -47,7 +47,7 @@ if ! check_password "$THEHIVE_PASS"; then
|
||||
fi
|
||||
|
||||
# Create new user in TheHive
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHIVE_IP/thehive/api/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
|
||||
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully added user to TheHive"
|
||||
else
|
||||
|
||||
@@ -46,7 +46,7 @@ case "${2^^}" in
|
||||
;;
|
||||
esac
|
||||
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }")
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHIVE_IP/thehive/api/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }")
|
||||
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully updated user in TheHive"
|
||||
else
|
||||
|
||||
@@ -56,14 +56,14 @@ function verifyEnvironment() {
|
||||
require "openssl"
|
||||
require "sqlite3"
|
||||
[[ ! -f $databasePath ]] && fail "Unable to find database file; specify path via KRATOS_DB_PATH environment variable"
|
||||
response=$(curl -Ss ${kratosUrl}/)
|
||||
response=$(curl -Ss -L ${kratosUrl}/)
|
||||
[[ "$response" != "404 page not found" ]] && fail "Unable to communicate with Kratos; specify URL via KRATOS_URL environment variable"
|
||||
}
|
||||
|
||||
function findIdByEmail() {
|
||||
email=$1
|
||||
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
||||
echo $identityId
|
||||
}
|
||||
@@ -113,7 +113,7 @@ function updatePassword() {
|
||||
}
|
||||
|
||||
function listUsers() {
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
|
||||
@@ -131,7 +131,7 @@ function createUser() {
|
||||
EOF
|
||||
)
|
||||
|
||||
response=$(curl -Ss ${kratosUrl}/identities -d "$addUserJson")
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
identityId=$(echo "${response}" | jq ".id")
|
||||
@@ -153,7 +153,7 @@ function updateStatus() {
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
response=$(curl -Ss "${kratosUrl}/identities/$identityId")
|
||||
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
|
||||
@@ -171,7 +171,7 @@ function updateStatus() {
|
||||
fi
|
||||
|
||||
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
|
||||
response=$(curl -Ss -XPUT ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
||||
response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
||||
[[ $? != 0 ]] && fail "Unable to mark user as locked"
|
||||
|
||||
}
|
||||
@@ -191,7 +191,7 @@ function deleteUser() {
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
response=$(curl -Ss -XDELETE "${kratosUrl}/identities/$identityId")
|
||||
response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
}
|
||||
|
||||
|
||||
@@ -37,9 +37,9 @@ LOG="/opt/so/log/curator/so-curator-closed-delete.log"
|
||||
# If both conditions are true, keep on looping until one of the conditions is false.
|
||||
while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do
|
||||
curl -s -k -L https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do
|
||||
{% else %}
|
||||
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do
|
||||
curl -s -L {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do
|
||||
{% endif %}
|
||||
|
||||
# We need to determine OLDEST_INDEX.
|
||||
@@ -48,16 +48,16 @@ curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " c
|
||||
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
|
||||
# Finally, select the first entry in that sorted list.
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
|
||||
OLDEST_INDEX=$(curl -s -k -L https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
|
||||
{% else %}
|
||||
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
|
||||
OLDEST_INDEX=$(curl -s -L {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
|
||||
{% endif %}
|
||||
|
||||
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
curl -XDELETE -k -L https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
{% else %}
|
||||
curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
curl -XDELETE -L {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
{% endif %}
|
||||
|
||||
# Finally, write a log entry that says we deleted it.
|
||||
|
||||
@@ -28,9 +28,9 @@ COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{% else %}
|
||||
curl ${ELASTICSEARCH_AUTH} --output /dev/null --silent --head --fail http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
curl ${ELASTICSEARCH_AUTH} --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{% endif %}
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
@@ -52,9 +52,9 @@ cd ${ELASTICSEARCH_INGEST_PIPELINES}
|
||||
|
||||
echo "Loading pipelines..."
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
{% else %}
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
{% endif %}
|
||||
echo
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 30 ]]; do
|
||||
curl --output /dev/null --silent --head --fail http://{{ ES }}:9200
|
||||
curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -28,7 +28,7 @@ MAX_WAIT=240
|
||||
|
||||
# Check to see if Kibana is available
|
||||
wait_step=0
|
||||
until curl -s -XGET http://{{ ES }}:5601 > /dev/null ; do
|
||||
until curl -s -XGET -L http://{{ ES }}:5601 > /dev/null ; do
|
||||
wait_step=$(( ${wait_step} + 1 ))
|
||||
echo "Waiting on Kibana...Attempt #$wait_step"
|
||||
if [ ${wait_step} -gt ${MAX_WAIT} ]; then
|
||||
@@ -42,12 +42,12 @@ wait_step=0
|
||||
# Apply Kibana template
|
||||
echo
|
||||
echo "Applying Kibana template..."
|
||||
curl -s -XPUT http://{{ ES }}:9200/_template/kibana \
|
||||
curl -s -XPUT -L http://{{ ES }}:9200/_template/kibana \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d'{"index_patterns" : ".kibana", "settings": { "number_of_shards" : 1, "number_of_replicas" : 0 }, "mappings" : { "search": {"properties": {"hits": {"type": "integer"}, "version": {"type": "integer"}}}}}'
|
||||
echo
|
||||
|
||||
curl -s -XPUT "{{ ES }}:9200/.kibana/_settings" \
|
||||
curl -s -XPUT -L "{{ ES }}:9200/.kibana/_settings" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d'{"index" : {"number_of_replicas" : 0}}'
|
||||
echo
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
|
||||
echo "Waiting for connection"
|
||||
until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do
|
||||
until $(curl --output /dev/null --silent --head -L http://{{ ip }}:1880); do
|
||||
echo '.'
|
||||
sleep 1
|
||||
done
|
||||
echo "Loading flows..."
|
||||
curl -XPOST -v -H "Content-Type: application/json" -d @$default_salt_dir/salt/nodered/so_flows.json {{ ip }}:1880/flows
|
||||
curl -XPOST -v -H "Content-Type: application/json" -d @$default_salt_dir/salt/nodered/so_flows.json -L {{ ip }}:1880/flows
|
||||
echo "Done loading..."
|
||||
|
||||
@@ -29,25 +29,25 @@ cortex_init(){
|
||||
|
||||
|
||||
# Migrate DB
|
||||
curl -v -k -XPOST "https://$CORTEX_IP:/cortex/api/maintenance/migrate"
|
||||
curl -v -k -XPOST -L "https://$CORTEX_IP:/cortex/api/maintenance/migrate"
|
||||
|
||||
# Create intial Cortex superadmin
|
||||
curl -v -k "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
|
||||
curl -v -k -L "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
|
||||
|
||||
# Create user-supplied org
|
||||
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
|
||||
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_IP/cortex/api/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
|
||||
|
||||
# Create user-supplied org user
|
||||
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
|
||||
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
|
||||
|
||||
# Enable URLScan.io Analyzer
|
||||
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
|
||||
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
|
||||
|
||||
# Enable Cert PassiveDNS Analyzer
|
||||
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
|
||||
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_IP/cortex/api/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
|
||||
|
||||
# Revoke $CORTEX_USER key
|
||||
curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" "https:///$CORTEX_IP/api/user/$CORTEX_USER/key"
|
||||
curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" -L "https:///$CORTEX_IP/api/user/$CORTEX_USER/key"
|
||||
|
||||
# Update SOCtopus config with apikey value
|
||||
#sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG
|
||||
|
||||
@@ -36,15 +36,15 @@ thehive_init(){
|
||||
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||
|
||||
# Migrate DB
|
||||
curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
|
||||
curl -v -k -XPOST -L "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
|
||||
|
||||
# Create intial TheHive user
|
||||
curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
|
||||
curl -v -k -L "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
|
||||
|
||||
# Pre-load custom fields
|
||||
#
|
||||
# reputation
|
||||
curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
|
||||
curl -v -k -L "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
|
||||
|
||||
|
||||
touch /opt/so/state/thehive.txt
|
||||
|
||||
@@ -9,7 +9,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 30 ]]; do
|
||||
curl --output /dev/null --silent --head --fail http://{{ ES }}:9200
|
||||
curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -29,7 +29,7 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
fi
|
||||
|
||||
echo "Applying cross cluster search config..."
|
||||
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
|
||||
curl -s -XPUT -L http://{{ ES }}:9200/_cluster/settings \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
|
||||
|
||||
@@ -37,6 +37,6 @@ echo "Applying cross cluster search config..."
|
||||
|
||||
{%- if salt['pillar.get']('nodestab', {}) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
|
||||
curl -XPUT -L http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
@@ -6,7 +6,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 30 ]]; do
|
||||
curl --output /dev/null --silent --head --fail http://{{ ES }}:9200
|
||||
curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -26,6 +26,6 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
fi
|
||||
|
||||
echo "Applying cross cluster search config..."
|
||||
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
|
||||
curl -s -XPUT -L http://{{ ES }}:9200/_cluster/settings \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ grains.host }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
|
||||
|
||||
@@ -52,7 +52,7 @@ register_agent() {
|
||||
echo ""
|
||||
echo "Adding agent:"
|
||||
echo "curl -s -u $USER:**** -k -X POST -d 'name=$AGENT_NAME&ip=$AGENT_IP' $PROTOCOL://$API_IP:$API_PORT/agents"
|
||||
API_RESULT=$(curl -s -u $USER:"$PASSWORD" -k -X POST -d 'name='$AGENT_NAME'&ip='$AGENT_IP $PROTOCOL://$API_IP:$API_PORT/agents)
|
||||
API_RESULT=$(curl -s -u $USER:"$PASSWORD" -k -X POST -d 'name='$AGENT_NAME'&ip='$AGENT_IP -L $PROTOCOL://$API_IP:$API_PORT/agents)
|
||||
echo -e $API_RESULT | grep -q "\"error\":0" 2>&1
|
||||
|
||||
if [ "$?" != "0" ]; then
|
||||
@@ -84,14 +84,14 @@ remove_agent() {
|
||||
echo "Found: $AGENT_ID"
|
||||
echo "Removing previous registration for '$AGENT_NAME' using ID: $AGENT_ID ..."
|
||||
# curl -u foo:bar -k -X DELETE "https://127.0.0.1:55000/agents/001
|
||||
REMOVE_AGENT=$(curl -s -u $USER:"$PASSWORD" -k -X DELETE $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID)
|
||||
REMOVE_AGENT=$(curl -s -u $USER:"$PASSWORD" -k -X DELETE -L $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID)
|
||||
echo -e $REMOVE_AGENT
|
||||
}
|
||||
|
||||
get_agent_id() {
|
||||
echo ""
|
||||
echo "Checking for Agent ID..."
|
||||
AGENT_ID=$(curl -s -u $USER:"$PASSWORD" -k -X GET $PROTOCOL://$API_IP:$API_PORT/agents/name/$AGENT_NAME | rev | cut -d: -f1 | rev | grep -o '".*"' | tr -d '"')
|
||||
AGENT_ID=$(curl -s -u $USER:"$PASSWORD" -k -X GET -L $PROTOCOL://$API_IP:$API_PORT/agents/name/$AGENT_NAME | rev | cut -d: -f1 | rev | grep -o '".*"' | tr -d '"')
|
||||
}
|
||||
|
||||
# MAIN
|
||||
@@ -136,7 +136,7 @@ shift $(($OPTIND - 1))
|
||||
|
||||
# Default action -> try to register the agent
|
||||
sleep 30s
|
||||
STATUS=$(curl -s -k -u $USER:$PASSWORD $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID | jq .data.status | sed s'/"//g')
|
||||
STATUS=$(curl -s -k -u $USER:$PASSWORD -L $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID | jq .data.status | sed s'/"//g')
|
||||
if [[ $STATUS == "Active" ]]; then
|
||||
echo "Agent $AGENT_ID already registered!"
|
||||
else
|
||||
|
||||
Reference in New Issue
Block a user