Merge branch 'dev' into patch_2.3.3

This commit is contained in:
Josh Patterson
2020-11-05 09:58:43 -05:00
committed by GitHub
71 changed files with 526 additions and 2155 deletions

12
.github/ISSUE_TEMPLATE vendored Normal file
View File

@@ -0,0 +1,12 @@
PLEASE STOP AND READ THIS INFORMATION!
If you are creating an issue just to ask a question, you will likely get faster and better responses by posting to our discussions forum instead:
https://securityonion.net/discuss
If you think you have found a possible bug or are observing a behavior that you weren't expecting, use the discussion forum to start a conversation about it instead of creating an issue.
If you are very familiar with the latest version of the product and are confident you have found a bug in Security Onion, you can continue with creating an issue here, but please make sure you have done the following:
- duplicated the issue on a fresh installation of the latest version
- provide information about your system and how you installed Security Onion
- include relevant log files
- include reproduction steps

View File

@@ -2,6 +2,13 @@
Security Onion 2.3.3 is here!
## Screenshots
Alerts
![Alerts](https://raw.githubusercontent.com/security-onion-solutions/securityonion/master/screenshots/alerts-1.png)
Hunt
![Hunt](https://raw.githubusercontent.com/security-onion-solutions/securityonion/master/screenshots/hunt-1.png)
### Release Notes

View File

@@ -1 +1 @@
2.3.3
2.3.10

View File

@@ -5,7 +5,6 @@
'so-telegraf',
'so-soc',
'so-kratos',
'so-aptcacherng',
'so-idstools',
'so-redis',
'so-elasticsearch',
@@ -15,4 +14,8 @@
'so-filebeat',
'so-soctopus'
]
} %}
} %}
{% if salt['pillar.get']('global:managerupdate') == 1 %}
{% do docker.containers.append('so-aptcacherng') %}
{% endif %}

View File

@@ -4,7 +4,6 @@
'so-telegraf',
'so-soc',
'so-kratos',
'so-aptcacherng',
'so-idstools',
'so-redis',
'so-logstash',
@@ -15,4 +14,8 @@
'so-filebeat',
'so-soctopus'
]
} %}
} %}
{% if salt['pillar.get']('global:managerupdate') == 1 %}
{% do docker.containers.append('so-aptcacherng') %}
{% endif %}

View File

@@ -4,7 +4,6 @@
'so-telegraf',
'so-soc',
'so-kratos',
'so-aptcacherng',
'so-idstools',
'so-redis',
'so-logstash',
@@ -19,4 +18,8 @@
'so-soctopus',
'so-sensoroni'
]
} %}
} %}
{% if salt['pillar.get']('global:managerupdate') == 1 %}
{% do docker.containers.append('so-aptcacherng') %}
{% endif %}

View File

@@ -48,6 +48,6 @@ check_container() {
check_password() {
local password=$1
echo "$password" | egrep -v "'|\"|\\\\" > /dev/null 2>&1
echo "$password" | egrep -v "'|\"|\\$|\\\\" > /dev/null 2>&1
return $?
}

View File

@@ -31,7 +31,7 @@ fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexkey)
CORTEX_IP=$(lookup_pillar managerip)
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
CORTEX_ORG_NAME=$(lookup_pillar cortexorgname)
CORTEX_USER=$USER
@@ -43,7 +43,7 @@ fi
read -rs CORTEX_PASS
# Create new user in Cortex
resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }")
resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }")
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully added user to Cortex."
else

View File

@@ -31,7 +31,7 @@ fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexkey)
CORTEX_IP=$(lookup_pillar managerip)
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
CORTEX_USER=$USER
case "${2^^}" in
@@ -46,7 +46,7 @@ case "${2^^}" in
;;
esac
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }")
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }")
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully updated user in Cortex."
else

View File

@@ -51,9 +51,9 @@ if [ $SKIP -ne 1 ]; then
# List indices
echo
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -k https://{{ NODEIP }}:9200/_cat/indices?v
curl -k -L https://{{ NODEIP }}:9200/_cat/indices?v
{% else %}
curl {{ NODEIP }}:9200/_cat/indices?v
curl -L {{ NODEIP }}:9200/_cat/indices?v
{% endif %}
echo
# Inform user we are about to delete all data
@@ -94,16 +94,16 @@ fi
echo "Deleting data..."
{% if grains['role'] in ['so-node','so-heavynode'] %}
INDXS=$(curl -s -XGET -k https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
INDXS=$(curl -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
{% else %}
INDXS=$(curl -s -XGET {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
INDXS=$(curl -s -XGET -L {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
{% endif %}
for INDX in ${INDXS}
do
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -XDELETE -k https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
curl -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
{% else %}
curl -XDELETE "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
curl -XDELETE -L "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
{% endif %}
done

View File

@@ -22,5 +22,5 @@ THEHIVEESPORT=9400
echo "Removing read only attributes for indices..."
echo
for p in $ESPORT $THEHIVEESPORT; do
curl -XPUT -H "Content-Type: application/json" http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
curl -XPUT -H "Content-Type: application/json" -L http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
done

View File

@@ -20,14 +20,14 @@
if [ "$1" == "" ]; then
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
{% else %}
curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
{% endif %}
else
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
{% else %}
curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
{% endif %}
fi

View File

@@ -18,14 +18,14 @@
. /usr/sbin/so-common
if [ "$1" == "" ]; then
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
{% else %}
curl -s {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
{% endif %}
else
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
{% else %}
curl -s {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
{% endif %}
fi

View File

@@ -18,14 +18,14 @@
. /usr/sbin/so-common
if [ "$1" == "" ]; then
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_template/* | jq 'keys'
curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys'
{% else %}
curl -s {{ NODEIP }}:9200/_template/* | jq 'keys'
curl -s -L {{ NODEIP }}:9200/_template/* | jq 'keys'
{% endif %}
else
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ NODEIP }}:9200/_template/$1 | jq
curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq
{% else %}
curl -s {{ NODEIP }}:9200/_template/$1 | jq
curl -s -L {{ NODEIP }}:9200/_template/$1 | jq
{% endif %}
fi

View File

@@ -31,9 +31,9 @@ COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -k --output /dev/null --silent --head --fail https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% else %}
curl --output /dev/null --silent --head --fail http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
curl --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% endif %}
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
@@ -56,9 +56,9 @@ cd ${ELASTICSEARCH_TEMPLATES}
echo "Loading templates..."
{% if grains['role'] in ['so-node','so-heavynode'] %}
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
{% else %}
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
{% endif %}
echo

View File

@@ -59,6 +59,6 @@ if [[ $? -eq 0 ]]; then
echo "Successfully added user to Fleet"
else
echo "Unable to add user to Fleet; user might already exist"
echo $resp
echo "$MYSQL_OUTPUT"
exit 2
fi

View File

@@ -16,7 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -X GET -k https://localhost:9200/_cat/indices?v
curl -X GET -k -L https://localhost:9200/_cat/indices?v
{% else %}
curl -X GET localhost:9200/_cat/indices?v
curl -X GET -L localhost:9200/_cat/indices?v
{% endif %}

View File

@@ -23,7 +23,7 @@
KIBANA_HOST={{ MANAGER }}
KSO_PORT=5601
OUTFILE="saved_objects.ndjson"
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -L $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
# Clean up using PLACEHOLDER
sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE

View File

@@ -27,10 +27,15 @@ ERROR_STRING="ERROR"
SUCCESS_STRING="OK"
PENDING_STRING="PENDING"
MISSING_STRING='MISSING'
DISABLED_STRING='DISABLED'
CALLER=$(ps -o comm= $PPID)
declare -a BAD_STATUSES=("removing" "paused" "exited" "dead")
declare -a PENDING_STATUSES=("paused" "created" "restarting")
declare -a GOOD_STATUSES=("running")
declare -a DISABLED_CONTAINERS=()
{%- if salt['pillar.get']('steno:enabled', 'True') is sameas false %}
DISABLED_CONTAINERS+=("so-steno")
{%- endif %}
declare -a temp_container_name_list=()
declare -a temp_container_state_list=()
@@ -104,6 +109,7 @@ populate_container_lists() {
parse_status() {
local container_state=${1}
local service_name=${2}
[[ $container_state = "missing" ]] && printf $MISSING_STRING && return 1
@@ -117,7 +123,13 @@ parse_status() {
# This is technically not needed since the default is error state
for state in "${BAD_STATUSES[@]}"; do
[[ $container_state = "$state" ]] && printf $ERROR_STRING && return 1
if [[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]]; then
printf $DISABLED_STRING
return 0
elif [[ $container_state = "$state" ]]; then
printf $ERROR_STRING
return 1
fi
done
printf $ERROR_STRING && return 1
@@ -127,7 +139,7 @@ parse_status() {
print_line() {
local service_name=${1}
local service_state="$( parse_status ${2} )"
local service_state="$( parse_status ${2} ${1} )"
local columns=$(tput cols)
local state_color="\e[0m"
@@ -137,7 +149,7 @@ print_line() {
state_color="\e[1;31m"
elif [[ $service_state = "$SUCCESS_STRING" ]]; then
state_color="\e[1;32m"
elif [[ $service_state = "$PENDING_STRING" ]]; then
elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]]; then
state_color="\e[1;33m"
fi

View File

@@ -31,7 +31,7 @@ fi
USER=$1
THEHIVE_KEY=$(lookup_pillar hivekey)
THEHIVE_IP=$(lookup_pillar managerip)
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
THEHIVE_USER=$USER
# Read password for new user from stdin
@@ -47,7 +47,7 @@ if ! check_password "$THEHIVE_PASS"; then
fi
# Create new user in TheHive
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully added user to TheHive"
else

View File

@@ -31,7 +31,7 @@ fi
USER=$1
THEHIVE_KEY=$(lookup_pillar hivekey)
THEHIVE_IP=$(lookup_pillar managerip)
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
THEHIVE_USER=$USER
case "${2^^}" in
@@ -46,7 +46,7 @@ case "${2^^}" in
;;
esac
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }")
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }")
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully updated user in TheHive"
else

View File

@@ -56,14 +56,14 @@ function verifyEnvironment() {
require "openssl"
require "sqlite3"
[[ ! -f $databasePath ]] && fail "Unable to find database file; specify path via KRATOS_DB_PATH environment variable"
response=$(curl -Ss ${kratosUrl}/)
response=$(curl -Ss -L ${kratosUrl}/)
[[ "$response" != "404 page not found" ]] && fail "Unable to communicate with Kratos; specify URL via KRATOS_URL environment variable"
}
function findIdByEmail() {
email=$1
response=$(curl -Ss ${kratosUrl}/identities)
response=$(curl -Ss -L ${kratosUrl}/identities)
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
echo $identityId
}
@@ -113,7 +113,7 @@ function updatePassword() {
}
function listUsers() {
response=$(curl -Ss ${kratosUrl}/identities)
response=$(curl -Ss -L ${kratosUrl}/identities)
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
@@ -131,7 +131,7 @@ function createUser() {
EOF
)
response=$(curl -Ss ${kratosUrl}/identities -d "$addUserJson")
response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
identityId=$(echo "${response}" | jq ".id")
@@ -153,7 +153,7 @@ function updateStatus() {
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
response=$(curl -Ss "${kratosUrl}/identities/$identityId")
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
@@ -171,7 +171,7 @@ function updateStatus() {
fi
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
response=$(curl -Ss -XPUT ${kratosUrl}/identities/$identityId -d "$updatedJson")
response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson")
[[ $? != 0 ]] && fail "Unable to mark user as locked"
}
@@ -191,7 +191,7 @@ function deleteUser() {
identityId=$(findIdByEmail "$email")
[[ ${identityId} == "" ]] && fail "User not found"
response=$(curl -Ss -XDELETE "${kratosUrl}/identities/$identityId")
response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId")
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
}

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
docker exec -it so-wazuh /usr/bin/node /var/ossec/api/configuration/auth/htpasswd /var/ossec/api/configuration/auth/user $1

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
docker exec -it so-wazuh /usr/bin/node /var/ossec/api/configuration/auth/htpasswd /var/ossec/api/configuration/auth/user $1

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
docker exec -it so-wazuh /usr/bin/node /var/ossec/api/configuration/auth/htpasswd -D /var/ossec/api/configuration/auth/user $1

View File

@@ -47,7 +47,7 @@ airgap_mounted() {
echo "If you just copied the .iso file over you can specify the path."
echo "If you burned the ISO to a disk the standard way you can specify the device."
echo "Example: /home/user/securityonion-2.X.0.iso"
echo "Example: /dev/cdrom"
echo "Example: /dev/sdx1"
echo ""
read -p 'Enter the location of the iso: ' ISOLOC
if [ -f $ISOLOC ]; then

View File

@@ -37,9 +37,9 @@ LOG="/opt/so/log/curator/so-curator-closed-delete.log"
# If both conditions are true, keep on looping until one of the conditions is false.
while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed > /dev/null; do
curl -s -k -L https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed > /dev/null; do
{% else %}
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed > /dev/null; do
curl -s -L {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed > /dev/null; do
{% endif %}
# We need to determine OLDEST_INDEX:
@@ -47,16 +47,16 @@ curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expa
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
# Finally, select the first entry in that sorted list.
{% if grains['role'] in ['so-node','so-heavynode'] %}
OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | sort -t- -k3 | head -1)
OLDEST_INDEX=$(curl -s -k -L https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | sort -t- -k3 | head -1)
{% else %}
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | sort -t- -k3 | head -1)
OLDEST_INDEX=$(curl -s -L {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | sort -t- -k3 | head -1)
{% endif %}
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
curl -XDELETE -k -L https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
{% else %}
curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
curl -XDELETE -L {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
{% endif %}
# Finally, write a log entry that says we deleted it.

View File

@@ -16,7 +16,7 @@ class PlaybookESAlerter(Alerter):
today = strftime("%Y.%m.%d", gmtime())
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
headers = {"Content-Type": "application/json"}
payload = {"rule": { "name": self.rule['play_title'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False)

View File

@@ -6,16 +6,16 @@
{ "rename": { "field": "message2.scan", "target_field": "scan", "ignore_missing": true } },
{ "rename": { "field": "message2.request", "target_field": "request", "ignore_missing": true } },
{ "rename": { "field": "scan.hash", "target_field": "hash", "ignore_missing": true } },
{ "rename": { "field": "scan.exiftool", "target_field": "exiftool", "ignore_missing": true } },
{ "grok": { "if": "ctx.request?.attributes?.filename != null", "field": "request.attributes.filename", "patterns": ["-%{WORD:log.id.fuid}-"], "ignore_failure": true } },
{ "foreach":
{
"if": "ctx.scan?.exiftool?.keys !=null",
"field": "scan.exiftool.keys",
"if": "ctx.exiftool?.keys !=null",
"field": "exiftool.keys",
"processor":{
"set": {
"field": "scan.exiftool.{{_ingest._value.key}}",
"value": "{{_ingest._value.value}}"
"append": {
"field": "scan.exiftool",
"value": "{{_ingest._value.key}}={{_ingest._value.value}}"
}
}
}
@@ -42,7 +42,8 @@
{ "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 70 && ctx.rule?.score <=89", "field": "event.severity", "value": 3, "override": true } },
{ "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 90", "field": "event.severity", "value": 4, "override": true } },
{ "set": { "field": "observer.name", "value": "{{agent.name}}" }},
{ "remove": { "field": ["host", "path", "message", "scan.exiftool.keys", "scan.yara.meta"], "ignore_missing": true } },
{ "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }},
{ "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -28,9 +28,9 @@ COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% else %}
curl ${ELASTICSEARCH_AUTH} --output /dev/null --silent --head --fail http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
curl ${ELASTICSEARCH_AUTH} --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% endif %}
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
@@ -52,9 +52,9 @@ cd ${ELASTICSEARCH_INGEST_PIPELINES}
echo "Loading pipelines..."
{% if grains['role'] in ['so-node','so-heavynode'] %}
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
{% else %}
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
{% endif %}
echo

View File

@@ -379,9 +379,14 @@
}
}
},
"scan":{
"scan":{
"type":"object",
"dynamic": true
"dynamic": true,
"properties":{
"exiftool":{
"type":"text"
}
}
},
"server":{
"type":"object",

View File

@@ -1,3 +1,4 @@
{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %}
firewall:
hostgroups:
anywhere:
@@ -9,7 +10,7 @@ firewall:
ips:
delete:
insert:
- 172.17.0.0/24
- {{ DNET }}/24
localhost:
ips:
delete:

View File

@@ -12,6 +12,8 @@
{% else %}
{% set MAINIP = salt['pillar.get']('global:managerip') %}
{% endif %}
{% set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %}
include:
- mysql
@@ -71,7 +73,7 @@ fleetdb:
fleetdbuser:
mysql_user.present:
- host: 172.17.0.0/255.255.0.0
- host: {{ DNET }}/255.255.0.0
- password: {{ FLEETPASS }}
- connection_host: {{ MAINIP }}
- connection_port: 3306
@@ -85,7 +87,7 @@ fleetdbpriv:
- grant: all privileges
- database: fleet.*
- user: fleetdbuser
- host: 172.17.0.0/255.255.0.0
- host: {{ DNET }}/255.255.0.0
- connection_host: {{ MAINIP }}
- connection_port: 3306
- connection_user: root

View File

@@ -58,11 +58,12 @@ rulesdir:
- makedirs: True
synclocalnidsrules:
file.managed:
- name: /opt/so/rules/nids/local.rules
- source: salt://idstools/local.rules
file.recurse:
- name: /opt/so/rules/nids/
- source: salt://idstools/
- user: 939
- group: 939
- include_pat: 'E@.rules'
so-idstools:
docker_container.running:
@@ -81,4 +82,4 @@ idstools_state_not_allowed:
test.fail_without_changes:
- name: idstools_state_not_allowed
{% endif%}
{% endif%}

View File

@@ -4,7 +4,7 @@ echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 30 ]]; do
curl --output /dev/null --silent --head --fail http://{{ ES }}:9200
curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
@@ -28,7 +28,7 @@ MAX_WAIT=240
# Check to see if Kibana is available
wait_step=0
until curl -s -XGET http://{{ ES }}:5601 > /dev/null ; do
until curl -s -XGET -L http://{{ ES }}:5601 > /dev/null ; do
wait_step=$(( ${wait_step} + 1 ))
echo "Waiting on Kibana...Attempt #$wait_step"
if [ ${wait_step} -gt ${MAX_WAIT} ]; then
@@ -42,12 +42,12 @@ wait_step=0
# Apply Kibana template
echo
echo "Applying Kibana template..."
curl -s -XPUT http://{{ ES }}:9200/_template/kibana \
curl -s -XPUT -L http://{{ ES }}:9200/_template/kibana \
-H 'Content-Type: application/json' \
-d'{"index_patterns" : ".kibana", "settings": { "number_of_shards" : 1, "number_of_replicas" : 0 }, "mappings" : { "search": {"properties": {"hits": {"type": "integer"}, "version": {"type": "integer"}}}}}'
echo
curl -s -XPUT "{{ ES }}:9200/.kibana/_settings" \
curl -s -XPUT -L "{{ ES }}:9200/.kibana/_settings" \
-H 'Content-Type: application/json' \
-d'{"index" : {"number_of_replicas" : 0}}'
echo

View File

@@ -173,6 +173,7 @@ so-logstash:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
{% if grains['role'] == 'so-heavynode' %}
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
{% else %}

View File

@@ -1,22 +1,26 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
{%- set role = grains.id.split('_') | last %}
{%- if role == 'fleet' %}
{% set mainint = salt['pillar.get']('host:mainint') %}
{% set main_ip = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
{%- endif %}
{%- set manager_ip = salt['pillar.get']('manager:mainip', '') %}
{%- set url_base = salt['pillar.get']('global:url_base') %}
{%- set fleet_manager = salt['pillar.get']('global:fleet_manager') %}
{%- set fleet_node = salt['pillar.get']('global:fleet_node') %}
{%- set fleet_ip = salt['pillar.get']('global:fleet_ip', None) %}
{%- set airgap = salt['pillar.get']('global:airgap', 'False') %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
worker_connections 1024;
}
http {
@@ -33,62 +37,17 @@ http {
types_hash_max_size 2048;
client_max_body_size 2500M;
server_tokens off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
#server {
# listen 80 default_server;
# listen [::]:80 default_server;
# server_name _;
# root /opt/socore/html;
# index index.html;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
# location / {
# }
# error_page 404 /404.html;
# location = /40x.html {
# }
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
#}
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
{%- if ISAIRGAP is sameas true %}
server {
listen 7788;
server_name _;
root /opt/socore/html/repo;
location /rules/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
}
{%- endif %}
{% if FLEET_MANAGER %}
{%- if fleet_manager or role == 'fleet' %}
server {
listen 8090 ssl http2 default_server;
server_name _;
server_name {{ url_base }};
root /opt/socore/html;
index blank.html;
@@ -100,20 +59,47 @@ http {
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ managerip }}:8080;
{%- if role == 'fleet' %}
grpc_pass grpcs://{{ main_ip }}:8080;
{%- else %}
grpc_pass grpcs://{{ manager_ip }}:8080;
{%- endif %}
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
}
}
{% endif %}
{%- endif %}
# Settings for a TLS enabled server.
{%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'fleet', 'import'] %}
server {
listen 443 ssl http2 default_server;
#listen [::]:443 ssl http2 default_server;
server_name _;
listen 80 default_server;
server_name _;
return 307 https://{{ url_base }}$request_uri;
}
server {
listen 443 ssl http2 default_server;
server_name _;
return 307 https://{{ url_base }}$request_uri;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1.2;
}
{%- endif %}
{%- if role == 'fleet' %}
server {
listen 443 ssl http2;
server_name {{ url_base }};
root /opt/socore/html;
index index.html;
@@ -123,12 +109,57 @@ http {
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1.2;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
location /fleet/ {
proxy_pass https://{{ main_ip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
{%- elif role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %}
{%- if airgap is sameas true %}
server {
listen 7788;
server_name {{ url_base }};
root /opt/socore/html/repo;
location /rules/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
}
{%- endif %}
server {
listen 443 ssl http2;
server_name {{ url_base }};
root /opt/socore/html;
index index.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1.2;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ managerip }}:9822;
proxy_pass http://{{ manager_ip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -142,7 +173,7 @@ http {
location / {
auth_request /auth/sessions/whoami;
proxy_pass http://{{ managerip }}:9822/;
proxy_pass http://{{ manager_ip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -156,7 +187,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ managerip }}:4433;
proxy_pass http://{{ manager_ip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -200,7 +231,7 @@ http {
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- if ISAIRGAP is sameas true %}
{%- if airgap is sameas true %}
location /repo/ {
allow all;
sendfile on;
@@ -210,13 +241,12 @@ http {
autoindex_format html;
autoindex_localtime on;
}
{%- endif %}
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:3000/;
proxy_pass http://{{ manager_ip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -229,7 +259,7 @@ http {
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:5601/;
proxy_pass http://{{ manager_ip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -240,7 +270,7 @@ http {
}
location /nodered/ {
proxy_pass http://{{ managerip }}:1880/;
proxy_pass http://{{ manager_ip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -253,7 +283,7 @@ http {
}
location /playbook/ {
proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_pass http://{{ manager_ip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -263,13 +293,16 @@ http {
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- if FLEET_NODE %}
{%- if fleet_node %}
location /fleet/ {
return 301 https://{{ FLEET_IP }}/fleet;
return 307 https://{{ fleet_ip }}/fleet;
}
{%- else %}
{%- else %}
location /fleet/ {
proxy_pass https://{{ managerip }}:8080;
proxy_pass https://{{ manager_ip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -278,10 +311,11 @@ http {
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- endif %}
{%- endif %}
location /thehive/ {
proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_pass http://{{ manager_ip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -293,7 +327,7 @@ http {
}
location /cortex/ {
proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_pass http://{{ manager_ip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -305,7 +339,7 @@ http {
}
location /soctopus/ {
proxy_pass http://{{ managerip }}:7000/;
proxy_pass http://{{ manager_ip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -331,7 +365,7 @@ http {
if ($http_authorization = "") {
return 403;
}
proxy_pass http://{{ managerip }}:9822/;
proxy_pass http://{{ manager_ip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -345,16 +379,12 @@ http {
location @error401 {
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
return 302 /auth/self-service/browser/flows/login;
return 302 /auth/self-service/login/browser;
}
#error_page 404 /404.html;
# location = /40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
{%- endif %}
}

View File

@@ -1,361 +0,0 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
client_max_body_size 2500M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
#server {
# listen 80 default_server;
# listen [::]:80 default_server;
# server_name _;
# root /opt/socore/html;
# index index.html;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
# location / {
# }
# error_page 404 /404.html;
# location = /40x.html {
# }
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
#}
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
{%- if ISAIRGAP is sameas true %}
server {
listen 7788;
server_name _;
root /opt/socore/html/repo;
location /rules/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
}
{%- endif %}
{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index blank.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
}
}
{% endif %}
# Settings for a TLS enabled server.
server {
listen 443 ssl http2 default_server;
#listen [::]:443 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index index.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location / {
auth_request /auth/sessions/whoami;
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cyberchef/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /navigator/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /packages/ {
try_files $uri =206;
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- if ISAIRGAP is sameas true %}
location /repo/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
{%- endif %}
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /nodered/ {
proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /playbook/ {
proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- if FLEET_NODE %}
location /fleet/ {
return 301 https://{{ FLEET_IP }}/fleet;
}
{%- else %}
location /fleet/ {
proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- endif %}
location /thehive/ {
proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cortex/ {
proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /soctopus/ {
proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/app/soc/ {
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
}
location /kibana/app/fleet/ {
rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent;
}
location /kibana/app/soctopus/ {
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
}
location /sensoroniagents/ {
if ($http_authorization = "") {
return 403;
}
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
error_page 401 = @error401;
location @error401 {
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
return 302 /auth/self-service/browser/flows/login;
}
#error_page 404 /404.html;
# location = /usr/share/nginx/html/40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
}

View File

@@ -1,100 +0,0 @@
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
server {
listen 8090 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index blank.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ MAINIP }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
}
}
server {
listen 443 ssl http2 default_server;
server_name _;
root /opt/socore/html/packages;
index index.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location /fleet/ {
proxy_pass https://{{ MAINIP }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
#error_page 404 /404.html;
# location = /40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
}

View File

@@ -1,89 +0,0 @@
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
# Settings for a TLS enabled server.
#
# server {
# listen 443 ssl http2 default_server;
# listen [::]:443 ssl http2 default_server;
# server_name _;
# root /usr/share/nginx/html;
#
# ssl_certificate "/etc/pki/nginx/server.crt";
# ssl_certificate_key "/etc/pki/nginx/private/server.key";
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 10m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
#
# # Load configuration files for the default server block.
# include /etc/nginx/default.d/*.conf;
#
# location / {
# }
#
# #error_page 404 /404.html;
# # location = /40x.html {
# #}
#
# error_page 500 502 503 504 /50x.html;
# location = /usr/share/nginx/html/50x.html {
# }
# }
}

View File

@@ -1,89 +0,0 @@
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
#error_page 404 /404.html;
# location = /40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
# Settings for a TLS enabled server.
#
# server {
# listen 443 ssl http2 default_server;
# listen [::]:443 ssl http2 default_server;
# server_name _;
# root /usr/share/nginx/html;
#
# ssl_certificate "/etc/pki/nginx/server.crt";
# ssl_certificate_key "/etc/pki/nginx/private/server.key";
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 10m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
#
# # Load configuration files for the default server block.
# include /etc/nginx/default.d/*.conf;
#
# location / {
# }
#
# error_page 404 /404.html;
# location = /40x.html {
# }
#
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
# }
}

View File

@@ -1,326 +0,0 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
client_max_body_size 2500M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
#server {
# listen 80 default_server;
# listen [::]:80 default_server;
# server_name _;
# root /opt/socore/html;
# index index.html;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
# location / {
# }
# error_page 404 /404.html;
# location = /40x.html {
# }
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
#}
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index blank.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
}
}
{% endif %}
# Settings for a TLS enabled server.
server {
listen 443 ssl http2 default_server;
#listen [::]:443 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index index.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location / {
auth_request /auth/sessions/whoami;
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cyberchef/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /navigator/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /packages/ {
try_files $uri =206;
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /nodered/ {
proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /playbook/ {
proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- if FLEET_NODE %}
location /fleet/ {
return 301 https://{{ FLEET_IP }}/fleet;
}
{%- else %}
location /fleet/ {
proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- endif %}
location /thehive/ {
proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cortex/ {
proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /soctopus/ {
proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/app/soc/ {
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
}
location /kibana/app/fleet/ {
rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent;
}
location /kibana/app/soctopus/ {
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
}
location /sensoroniagents/ {
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
error_page 401 = @error401;
location @error401 {
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
return 302 /auth/self-service/browser/flows/login;
}
#error_page 404 /404.html;
# location = /usr/share/nginx/html/40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
}

View File

@@ -1,360 +0,0 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
client_max_body_size 2500M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
#server {
# listen 80 default_server;
# listen [::]:80 default_server;
# server_name _;
# root /opt/socore/html;
# index index.html;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
# location / {
# }
# error_page 404 /404.html;
# location = /40x.html {
# }
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
#}
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
{%- if ISAIRGAP is sameas true %}
server {
listen 7788;
server_name _;
root /opt/socore/html/repo;
location /rules/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
}
{%- endif %}
{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index blank.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
}
}
{% endif %}
# Settings for a TLS enabled server.
server {
listen 443 ssl http2 default_server;
#listen [::]:443 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index index.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location / {
auth_request /auth/sessions/whoami;
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cyberchef/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /navigator/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /packages/ {
try_files $uri =206;
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /nodered/ {
proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /playbook/ {
proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- if ISAIRGAP is sameas true %}
location /repo/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
{%- endif %}
{%- if FLEET_NODE %}
location /fleet/ {
return 301 https://{{ FLEET_IP }}/fleet;
}
{%- else %}
location /fleet/ {
proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- endif %}
location /thehive/ {
proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cortex/ {
proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /soctopus/ {
proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/app/soc/ {
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
}
location /kibana/app/fleet/ {
rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent;
}
location /kibana/app/soctopus/ {
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
}
location /sensoroniagents/ {
if ($http_authorization = "") {
return 403;
}
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
error_page 401 = @error401;
location @error401 {
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
return 302 /auth/self-service/browser/flows/login;
}
#error_page 404 /404.html;
# location = /40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
}

View File

@@ -1,89 +0,0 @@
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
#error_page 404 /404.html;
# location = /40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
# Settings for a TLS enabled server.
#
# server {
# listen 443 ssl http2 default_server;
# listen [::]:443 ssl http2 default_server;
# server_name _;
# root /usr/share/nginx/html;
#
# ssl_certificate "/etc/pki/nginx/server.crt";
# ssl_certificate_key "/etc/pki/nginx/private/server.key";
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 10m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
#
# # Load configuration files for the default server block.
# include /etc/nginx/default.d/*.conf;
#
# location / {
# }
#
# error_page 404 /404.html;
# location = /40x.html {
# }
#
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
# }
}

View File

@@ -1,89 +0,0 @@
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
#error_page 404 /404.html;
# location = /40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
# Settings for a TLS enabled server.
#
# server {
# listen 443 ssl http2 default_server;
# listen [::]:443 ssl http2 default_server;
# server_name _;
# root /usr/share/nginx/html;
#
# ssl_certificate "/etc/pki/nginx/server.crt";
# ssl_certificate_key "/etc/pki/nginx/private/server.key";
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 10m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
#
# # Load configuration files for the default server block.
# include /etc/nginx/default.d/*.conf;
#
# location / {
# }
#
# error_page 404 /404.html;
# location = /40x.html {
# }
#
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
# }
}

View File

@@ -1,361 +0,0 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
client_max_body_size 2500M;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
#server {
# listen 80 default_server;
# listen [::]:80 default_server;
# server_name _;
# root /opt/socore/html;
# index index.html;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
# location / {
# }
# error_page 404 /404.html;
# location = /40x.html {
# }
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
#}
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
{%- if ISAIRGAP is sameas true %}
server {
listen 7788;
server_name _;
root /opt/socore/html/repo;
location /rules/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
}
{%- endif %}
{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index blank.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
}
}
{% endif %}
# Settings for a TLS enabled server.
server {
listen 443 ssl http2 default_server;
#listen [::]:443 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index index.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location / {
auth_request /auth/sessions/whoami;
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cyberchef/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /navigator/ {
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /packages/ {
try_files $uri =206;
auth_request /auth/sessions/whoami;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- if ISAIRGAP is sameas true %}
location /repo/ {
allow all;
sendfile on;
sendfile_max_chunk 1m;
autoindex on;
autoindex_exact_size off;
autoindex_format html;
autoindex_localtime on;
}
{%- endif %}
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /nodered/ {
proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /playbook/ {
proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- if FLEET_NODE %}
location /fleet/ {
return 301 https://{{ FLEET_IP }}/fleet;
}
{%- else %}
location /fleet/ {
proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
{%- endif %}
location /thehive/ {
proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /cortex/ {
proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /soctopus/ {
proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
location /kibana/app/soc/ {
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
}
location /kibana/app/fleet/ {
rewrite ^/kibana/app/fleet/(.*) /fleet/$1 permanent;
}
location /kibana/app/soctopus/ {
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
}
location /sensoroniagents/ {
if ($http_authorization = "") {
return 403;
}
proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme;
}
error_page 401 = @error401;
location @error401 {
add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400";
return 302 /auth/self-service/browser/flows/login;
}
#error_page 404 /404.html;
# location = /40x.html {
#}
error_page 500 502 503 504 /50x.html;
location = /usr/share/nginx/html/50x.html {
}
}
}

View File

@@ -31,7 +31,7 @@ nginxconf:
- user: 939
- group: 939
- template: jinja
- source: salt://nginx/etc/nginx.conf.{{ grains.role }}
- source: salt://nginx/etc/nginx.conf
nginxlogdir:
file.directory:

View File

@@ -3,10 +3,10 @@
default_salt_dir=/opt/so/saltstack/default
echo "Waiting for connection"
until $(curl --output /dev/null --silent --head http://{{ ip }}:1880); do
until $(curl --output /dev/null --silent --head -L http://{{ ip }}:1880); do
echo '.'
sleep 1
done
echo "Loading flows..."
curl -XPOST -v -H "Content-Type: application/json" -d @$default_salt_dir/salt/nodered/so_flows.json {{ ip }}:1880/flows
curl -XPOST -v -H "Content-Type: application/json" -d @$default_salt_dir/salt/nodered/so_flows.json -L {{ ip }}:1880/flows
echo "Done loading..."

View File

@@ -1,4 +1,4 @@
{%- set MANAGER = salt['grains.get']('master') -%}
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%}
{%- set CHECKININTERVALMS = salt['pillar.get']('pcap:sensor_checkin_interval_ms', 10000) -%}
{
@@ -6,7 +6,7 @@
"logLevel":"info",
"agent": {
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},
"serverUrl": "https://{{ MANAGER }}/sensoroniagents",
"serverUrl": "https://{{ URLBASE }}/sensoroniagents",
"verifyCert": false,
"modules": {
"importer": {},

View File

@@ -23,7 +23,7 @@
{% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %}
{% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %}
{% set BPF_COMPILED = "" %}
{% from "pcap/map.jinja" import START with context %}
{% from "pcap/map.jinja" import STENOOPTIONS with context %}
# PCAP Section
@@ -135,9 +135,9 @@ sensoronilog:
- makedirs: True
so-steno:
docker_container.running:
docker_container.{{ STENOOPTIONS.status }}:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-steno:{{ VERSION }}
- start: {{ START }}
- start: {{ STENOOPTIONS.start }}
- network_mode: host
- privileged: True
- port_bindings:

View File

@@ -1,6 +1,15 @@
# don't start the docker container if it is an import node
{% if grains.id.split('_')|last == 'import' %}
{% set START = False %}
{% set STENOOPTIONS = {} %}
{% set ENABLED = salt['pillar.get']('steno:enabled', 'True') %}
# don't start the docker container if it is an import node or disabled via pillar
{% if grains.id.split('_')|last == 'import' or ENABLED is sameas false %}
{% do STENOOPTIONS.update({'start': False}) %}
{% else %}
{% set START = True %}
{% do STENOOPTIONS.update({'start': True}) %}
{% endif %}
{% if ENABLED is sameas false %}
{% do STENOOPTIONS.update({'status': 'stopped'}) %}
{% else %}
{% do STENOOPTIONS.update({'status': 'running'}) %}
{% endif %}

View File

@@ -10,6 +10,8 @@
{% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
{%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db', None) -%}
{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %}
include:
- mysql
@@ -19,7 +21,7 @@ create_playbookdbuser:
- mysql.user_create:
- user: playbookdbuser
- password: {{ PLAYBOOKPASS }}
- host: 172.17.0.0/255.255.0.0
- host: {{ DNET }}/255.255.255.0
- connection_host: {{ MAINIP }}
- connection_port: 3306
- connection_user: root
@@ -28,7 +30,7 @@ create_playbookdbuser:
query_playbookdbuser_grants:
mysql_query.run:
- database: playbook
- query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'172.17.0.0/255.255.0.0';"
- query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DNET }}/255.255.255.0';"
- connection_host: {{ MAINIP }}
- connection_port: 3306
- connection_user: root

View File

@@ -2,7 +2,7 @@
{%- set KRATOSKEY = salt['pillar.get']('kratos:kratoskey', '') -%}
selfservice:
strategies:
methods:
password:
enabled: true

View File

@@ -1,6 +1,6 @@
[
{ "name": "", "description": "actionHuntHelp", "icon": "fa-crosshairs", "link": "/#/hunt?q=\"{value}\" | groupby event.module event.dataset", "target": "" },
{ "name": "", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" },
{ "name": "", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" },
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" }
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "link": "/#/hunt?q=\"{value}\" | groupby event.module event.dataset", "target": "" },
{ "name": "actionPcap", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" },
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" },
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "fa-external-link-alt", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" }
]

View File

@@ -1,5 +1,6 @@
[
{ "name": "", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" },
{ "name": "", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" },
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" }
{ "name": "actionHunt", "description": "actionHuntHelp", "icon": "fa-crosshairs", "link": "/#/hunt?q=\"{value}\" | groupby event.module event.dataset", "target": "" },
{ "name": "actionPcap", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" },
{ "name": "actionGoogle", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" },
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "fa-external-link-alt", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" }
]

View File

@@ -9,6 +9,8 @@
{%- import_json "soc/files/soc/hunt.queries.json" as hunt_queries %}
{%- import_json "soc/files/soc/hunt.actions.json" as hunt_actions %}
{%- import_json "soc/files/soc/hunt.eventfields.json" as hunt_eventfields %}
{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %}
{
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
"server": {
@@ -33,7 +35,7 @@
{%- if salt['pillar.get']('nodestab', {}) %}
"remoteHostUrls": [
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
"https://{{ SN.split('_')|first }}:9200"{{ "," if not loop.last }}
"https://{{ SN.split('_')|first }}:9200"{{ "," if not loop.last else ""}}
{%- endfor %}
],
{%- endif %}
@@ -49,7 +51,7 @@
},
{% endif %}
"statickeyauth": {
"anonymousCidr": "172.17.0.0/24",
"anonymousCidr": "{{ DNET }}/24",
"apiKey": "{{ SENSORONIKEY }}"
}
},
@@ -70,7 +72,7 @@
"relativeTimeValue": 24,
"relativeTimeUnit": 30,
"mostRecentlyUsedLimit": 5,
"dismissEnabled": false,
"ackEnabled": false,
"escalateEnabled": {{ 'true' if THEHIVEKEY != '' else 'false' }},
"eventFields": {{ hunt_eventfields | json }},
"queryBaseFilter": "",
@@ -87,7 +89,7 @@
"relativeTimeValue": 24,
"relativeTimeUnit": 30,
"mostRecentlyUsedLimit": 5,
"dismissEnabled": true,
"ackEnabled": true,
"escalateEnabled": {{ 'true' if THEHIVEKEY != '' else 'false' }},
"eventFields": {{ alerts_eventfields | json }},
"queryBaseFilter": "event.dataset:alert",

View File

@@ -1,4 +1,5 @@
{%- set MANAGER = salt['pillar.get']('global:url_base', '') %}
{%- set URLBASE = salt['pillar.get']('global:url_base', '') %}
{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %}
{%- set PLAYBOOK_KEY = salt['pillar.get']('playbook:api_key', '') %}
@@ -14,7 +15,7 @@ es_verifycert = no
[cortex]
auto_analyze_alerts = no
cortex_url = https://{{MANAGER}}/cortex/
cortex_url = https://{{URLBASE}}/cortex/
cortex_key = {{ CORTEXKEY }}
supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
@@ -35,7 +36,7 @@ grr_user = YOURGRRUSER
grr_pass = YOURGRRPASS
[hive]
hive_url = https://{{MANAGER}}/thehive/
hive_url = https://{{URLBASE}}/thehive/
hive_key = {{ HIVEKEY }}
hive_tlp = 3
hive_verifycert = no
@@ -66,7 +67,7 @@ soc_url = http://{{MANAGER}}:9822
[playbook]
playbook_url = http://{{MANAGER}}:3200/playbook
playbook_ext_url = https://{{MANAGER}}/playbook
playbook_ext_url = https://{{URLBASE}}/playbook
playbook_key = {{ PLAYBOOK_KEY }}
playbook_verifycert = no
playbook_unit_test_index = playbook-testing

View File

@@ -7,9 +7,9 @@ af-packet:
use-mmap: yes
threads: {{ salt['pillar.get']('sensor:suriprocs', salt['pillar.get']('sensor:suripins') | length) }}
tpacket-v3: yes
ring-size: {{ salt['pillar.get']('sensor:suriringsize', '2048') }}
ring-size: {{ salt['pillar.get']('sensor:suriringsize', '5000') }}
- interface: default
#threads: auto
#use-mmap: no
#tpacket-v3: yes
{% endload %}
{% endload %}

View File

@@ -6,6 +6,7 @@
# WARNING: If you deploy your application on several servers, make sure to use the same key.
play.http.secret.key="{{ CORTEXPLAYSECRET }}"
play.http.context=/cortex/
pidfile.path = "/dev/null"
search.uri = "http://{{ MANAGERIP }}:9400"
# Elasticsearch

View File

@@ -1,5 +1,5 @@
#!/bin/bash
# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
# {%- set URLBASE = salt['pillar.get']('global:url_base', '') %}
# {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %}
# {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %}
# {%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') %}
@@ -17,7 +17,7 @@ cortex_clean(){
cortex_init(){
sleep 60
CORTEX_IP="{{MANAGERIP}}"
CORTEX_API_URL="{{URLBASE}}/cortex/api"
CORTEX_USER="{{CORTEXUSER}}"
CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
CORTEX_KEY="{{CORTEXKEY}}"
@@ -29,31 +29,30 @@ cortex_init(){
# Migrate DB
curl -v -k -XPOST "https://$CORTEX_IP:/cortex/api/maintenance/migrate"
curl -v -k -XPOST -L "https://$CORTEX_API_URL/maintenance/migrate"
# Create intial Cortex superadmin
curl -v -k "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
curl -v -k -L "https://$CORTEX_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
# Create user-supplied org
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
# Create user-supplied org user
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
# Enable URLScan.io Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
# Enable Cert PassiveDNS Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
# Revoke $CORTEX_USER key
curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" "https:///$CORTEX_IP/api/user/$CORTEX_USER/key"
curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" -L "https://$CORTEX_API_URL/user/$CORTEX_USER/key"
# Update SOCtopus config with apikey value
#sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG
touch /opt/so/state/cortex.txt
}
if [ -f /opt/so/state/cortex.txt ]; then
@@ -61,7 +60,7 @@ if [ -f /opt/so/state/cortex.txt ]; then
exit 0
else
rm -f garbage_file
while ! wget -O garbage_file {{MANAGERIP}}:9500 2>/dev/null
while ! wget -O garbage_file {{URLBASE}}:9500 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file

View File

@@ -1,5 +1,6 @@
#!/bin/bash
# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
# {%- set URLBASE = salt['pillar.get']('global:url_base', '') %}
# {%- set THEHIVEUSER = salt['pillar.get']('global:hiveuser', 'hiveadmin') %}
# {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %}
# {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
@@ -11,7 +12,8 @@ thehive_clean(){
thehive_init(){
sleep 120
THEHIVE_IP="{{MANAGERIP}}"
THEHIVE_URL="{{URLBASE}}/thehive"
THEHIVE_API_URL="$THEHIVE_URL/api"
THEHIVE_USER="{{THEHIVEUSER}}"
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
THEHIVE_KEY="{{THEHIVEKEY}}"
@@ -21,7 +23,7 @@ thehive_init(){
COUNT=0
THEHIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_IP/thehive"
curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_URL"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
@@ -36,15 +38,15 @@ thehive_init(){
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
# Migrate DB
curl -v -k -XPOST "https://$THEHIVE_IP:/thehive/api/maintenance/migrate"
curl -v -k -XPOST -L "https://$THEHIVE_API_URL/maintenance/migrate"
# Create intial TheHive user
curl -v -k "https://$THEHIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
curl -v -k -L "https://$THEHIVE_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
# Pre-load custom fields
#
# reputation
curl -v -k "https://$THEHIVE_IP/thehive/api/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
curl -v -k -L "https://$THEHIVE_API_URL/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
touch /opt/so/state/thehive.txt

View File

@@ -9,7 +9,7 @@ echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 30 ]]; do
curl --output /dev/null --silent --head --fail http://{{ ES }}:9200
curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
@@ -29,7 +29,7 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
fi
echo "Applying cross cluster search config..."
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
curl -s -XPUT -L http://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
@@ -37,6 +37,6 @@ echo "Applying cross cluster search config..."
{%- if salt['pillar.get']('nodestab', {}) %}
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
curl -XPUT -L http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
{%- endfor %}
{%- endif %}

View File

@@ -6,7 +6,7 @@ echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 30 ]]; do
curl --output /dev/null --silent --head --fail http://{{ ES }}:9200
curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
@@ -26,6 +26,6 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
fi
echo "Applying cross cluster search config..."
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
curl -s -XPUT -L http://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ grains.host }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"

View File

@@ -47,51 +47,53 @@ cat <<HELP_USAGE
HELP_USAGE
}
cleanup_creds() {
/usr/sbin/so-wazuh-user-remove $USER
}
register_agent() {
# Adding agent and getting Id from manager
echo ""
echo "Adding agent:"
echo "curl -s -u $USER:**** -k -X POST -d 'name=$AGENT_NAME&ip=$AGENT_IP' $PROTOCOL://$API_IP:$API_PORT/agents"
API_RESULT=$(curl -s -u $USER:"$PASSWORD" -k -X POST -d 'name='$AGENT_NAME'&ip='$AGENT_IP $PROTOCOL://$API_IP:$API_PORT/agents)
API_RESULT=$(curl -s -u $USER:"$PASSWORD" -k -X POST -d 'name='$AGENT_NAME'&ip='$AGENT_IP -L $PROTOCOL://$API_IP:$API_PORT/agents)
echo -e $API_RESULT | grep -q "\"error\":0" 2>&1
if [ "$?" != "0" ]; then
echo -e $API_RESULT | sed -rn 's/.*"message":"(.+)".*/\1/p'
exit 0
else
# Get agent id and agent key
AGENT_ID=$(echo $API_RESULT | cut -d':' -f 4 | cut -d ',' -f 1)
AGENT_KEY=$(echo $API_RESULT | cut -d':' -f 5 | cut -d '}' -f 1)
echo "Agent '$AGENT_NAME' with ID '$AGENT_ID' added."
echo "Key for agent '$AGENT_ID' received."
# Importing key
echo ""
echo "Importing authentication key:"
echo "y" | /var/ossec/bin/manage_agents -i $AGENT_KEY
# Restarting agent
echo ""
echo "Restarting:"
echo ""
/var/ossec/bin/ossec-control restart
fi
# Get agent id and agent key
AGENT_ID=$(echo $API_RESULT | cut -d':' -f 4 | cut -d ',' -f 1)
AGENT_KEY=$(echo $API_RESULT | cut -d':' -f 5 | cut -d '}' -f 1)
echo "Agent '$AGENT_NAME' with ID '$AGENT_ID' added."
echo "Key for agent '$AGENT_ID' received."
# Importing key
echo ""
echo "Importing authentication key:"
echo "y" | /var/ossec/bin/manage_agents -i $AGENT_KEY
# Restarting agent
echo ""
echo "Restarting:"
echo ""
/var/ossec/bin/ossec-control restart
exit 0
}
remove_agent() {
echo "Found: $AGENT_ID"
echo "Removing previous registration for '$AGENT_NAME' using ID: $AGENT_ID ..."
# curl -u foo:bar -k -X DELETE "https://127.0.0.1:55000/agents/001
REMOVE_AGENT=$(curl -s -u $USER:"$PASSWORD" -k -X DELETE $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID)
REMOVE_AGENT=$(curl -s -u $USER:"$PASSWORD" -k -X DELETE -L $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID)
echo -e $REMOVE_AGENT
}
get_agent_id() {
echo ""
echo "Checking for Agent ID..."
AGENT_ID=$(curl -s -u $USER:"$PASSWORD" -k -X GET $PROTOCOL://$API_IP:$API_PORT/agents/name/$AGENT_NAME | rev | cut -d: -f1 | rev | grep -o '".*"' | tr -d '"')
AGENT_ID=$(curl -s -u $USER:"$PASSWORD" -k -X GET -L $PROTOCOL://$API_IP:$API_PORT/agents/name/$AGENT_NAME | rev | cut -d: -f1 | rev | grep -o '".*"' | tr -d '"')
}
# MAIN
@@ -135,11 +137,15 @@ shift $(($OPTIND - 1))
# fi
# Default action -> try to register the agent
echo "Waiting before registering agent..."
sleep 30s
STATUS=$(curl -s -k -u $USER:$PASSWORD $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID | jq .data.status | sed s'/"//g')
if [[ $STATUS == "Active" ]]; then
if [ -f /opt/so/conf/wazuh/initial_agent_registration.log ]; then
echo "Agent $AGENT_ID already registered!"
exit 0
else
register_agent
cleanup_creds
echo "Initial agent $AGENT_ID with IP $AGENT_IP registered on $DATE." > /opt/so/conf/wazuh/initial_agent_registration.log
exit 0
fi
#remove_agent

View File

@@ -0,0 +1,5 @@
#fields indicator indicator_type meta.source meta.do_notice
# EXAMPLES:
#66.32.119.38 Intel::ADDR Test Address T
#www.honeynet.org Intel::DOMAIN Test Domain T
#4285358dd748ef74cb8161108e11cb73 Intel::FILE_HASH Test MD5 T

BIN
screenshots/alerts-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 188 KiB

BIN
screenshots/hunt-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 138 KiB

View File

@@ -173,7 +173,7 @@ add_web_user() {
echo "Attempting to add administrator user for web interface...";
echo "$WEBPASSWD1" | /usr/sbin/so-user add "$WEBUSER";
echo "Add user result: $?";
} >> "$setup_log" 2>&1
} >> "/root/so-user-add.log" 2>&1
}
# Create an secrets pillar so that passwords survive re-install
@@ -430,8 +430,6 @@ configure_minion() {
{
systemctl restart salt-minion;
printf '%s\n' '----';
cat "$minion_config";
} >> "$setup_log" 2>&1
}
@@ -747,7 +745,7 @@ detect_os() {
disable_auto_start() {
if crontab -l 2>&1 | grep so-setup > /dev/null 2>&1; then
if crontab -l -u $INSTALLUSERNAME 2>&1 | grep so-setup > /dev/null 2>&1; then
# Remove the automated setup script from crontab, if it exists
logCmd "crontab -u $INSTALLUSERNAME -r"
fi
@@ -840,11 +838,22 @@ docker_registry() {
echo "Setting up Docker Registry" >> "$setup_log" 2>&1
mkdir -p /etc/docker >> "$setup_log" 2>&1
if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0
fi
# Make the host use the manager docker registry
DNETBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi
printf '%s\n'\
"{"\
" \"registry-mirrors\": [ \"$proxy:5000\" ]"\
" \"registry-mirrors\": [ \"$proxy:5000\" ],"\
" \"bip\": \"$DNETBIP\","\
" \"default-address-pools\": ["\
" {"\
" \"base\" : \"$DOCKERNET\","\
" \"size\" : 24"\
" }"\
" ]"\
"}" > /etc/docker/daemon.json
echo "Docker Registry Setup - Complete" >> "$setup_log" 2>&1
@@ -1003,15 +1012,6 @@ get_redirect() {
fi
}
got_root() {
# Make sure you are root
uid="$(id -u)"
if [ "$uid" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
}
get_minion_type() {
local minion_type
case "$install_type" in
@@ -1139,12 +1139,17 @@ manager_global() {
fi
fi
if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0
fi
# Create a global file for global values
printf '%s\n'\
"global:"\
" soversion: '$SOVERSION'"\
" hnmanager: '$HNMANAGER'"\
" ntpserver: '$NTPSERVER'"\
" dockernet: '$DOCKERNET'"\
" proxy: '$PROXY'"\
" mdengine: '$ZEEKVERSION'"\
" ids: '$NIDS'"\
@@ -1633,12 +1638,17 @@ salt_checkin() {
done
echo " Confirming existence of the CA certificate"
cat /etc/pki/ca.crt
openssl x509 -in /etc/pki/ca.crt -noout -subject -issuer -dates
echo " Applyng a mine hack";
salt "$MINION_ID" mine.send x509.get_pem_entries glob_path=/etc/pki/ca.crt;
salt "$MINION_ID" mine.update;
echo " Confirming salt mine now contain the certificate";
salt "$MINION_ID" mine.get '*' x509.get_pem_entries;
echo "Confirming salt mine now contains the certificate";
salt "$MINION_ID" mine.get '*' x509.get_pem_entries | grep -E 'BEGIN CERTIFICATE|END CERTIFICATE';
if [ $? -eq 0 ]; then
echo "CA in mine"
else
echo "CA not in mine"
fi
echo " Applying SSL state";
salt-call state.apply ssl;
} >> "$setup_log" 2>&1
@@ -1691,10 +1701,12 @@ setup_salt_master_dirs() {
if [ "$setup_type" = 'iso' ]; then
rsync -avh --exclude 'TRANS.TBL' /home/$INSTALLUSERNAME/SecurityOnion/pillar/* $default_salt_dir/pillar/ >> "$setup_log" 2>&1
rsync -avh --exclude 'TRANS.TBL' /home/$INSTALLUSERNAME/SecurityOnion/salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1
mkdir -p $local_salt_dir/salt/zeek/policy/intel >> "$setup_log" 2>&1
cp -Rv /home/$INSTALLUSERNAME/SecurityOnion/files/intel.dat $local_salt_dir/salt/zeek/policy/intel/ >> "$setup_log" 2>&1
else
cp -Rv ../pillar/* $default_salt_dir/pillar/ >> "$setup_log" 2>&1
cp -Rv ../salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1
mkdir -p $local_salt_dir/salt/zeek/policy/intel >> "$setup_log" 2>&1
cp -Rv files/intel.dat $local_salt_dir/salt/zeek/policy/intel/ >> "$setup_log" 2>&1
fi

View File

@@ -15,7 +15,15 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Make sure you are root before doing anything
uid="$(id -u)"
if [ "$uid" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
cd "$(dirname "$0")" || exit 255
source ./so-functions
source ./so-common-functions
source ./so-whiptail
@@ -108,8 +116,6 @@ esac
# Allow execution of SO tools during setup
export PATH=$PATH:../salt/common/tools/sbin
got_root
detect_os && detect_cloud
set_network_dev_status_list
@@ -185,6 +191,10 @@ elif [ "$install_type" = 'HELIXSENSOR' ]; then
is_helix=true
elif [ "$install_type" = 'IMPORT' ]; then
is_import=true
elif [ "$install_type" = 'ANALYST' ]; then
cd .. || exit 255
./so-analyst-install
exit 0
fi
# Say yes to the dress if its an ISO install
@@ -310,9 +320,8 @@ if [[ $is_helix || $is_sensor || $is_import ]]; then
calculate_useable_cores
fi
if [[ $is_helix || $is_manager || $is_import ]]; then
whiptail_homenet_manager
fi
whiptail_homenet_manager
whiptail_dockernet_check
if [[ $is_helix || $is_manager || $is_node || $is_import ]]; then
set_base_heapsizes

View File

@@ -384,7 +384,7 @@ whiptail_invalid_pass_characters_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --msgbox "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password." 8 75
whiptail --title "Security Onion Setup" --msgbox "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password." 8 75
}
whiptail_cur_close_days() {
@@ -457,6 +457,31 @@ whiptail_dhcp_warn() {
}
whiptail_dockernet_check(){
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno \
"Do you want to keep the default Docker IP range? \n \n(Choose yes if you don't know what this means)" 10 75
local exitstatus=$?
if [[ $exitstatus == 1 ]]; then
whiptail_dockernet_net
fi
}
whiptail_dockernet_net() {
[ -n "$TESTING" ] && return
DOCKERNET=$(whiptail --title "Security Onion Setup" --inputbox \
"\nEnter a /24 network range for docker to use: \nThe same range MUST be used on ALL nodes \n(Default value is pre-populated.)" 10 75 172.17.0.0 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_enable_components() {
[ -n "$TESTING" ] && return
@@ -560,11 +585,12 @@ whiptail_install_type() {
# What kind of install are we doing?
install_type=$(whiptail --title "Security Onion Setup" --radiolist \
"Choose install type:" 10 65 4 \
"Choose install type:" 12 65 5 \
"EVAL" "Evaluation mode (not for production) " ON \
"STANDALONE" "Standalone production install " OFF \
"DISTRIBUTED" "Distributed install submenu " OFF \
"IMPORT" "Standalone to import PCAP or log files " OFF \
"OTHER" "Other install types" OFF \
3>&1 1>&2 2>&3
)
@@ -572,22 +598,50 @@ whiptail_install_type() {
whiptail_check_exitstatus $exitstatus
if [[ $install_type == "DISTRIBUTED" ]]; then
install_type=$(whiptail --title "Security Onion Setup" --radiolist \
"Choose distributed node type:" 13 60 6 \
"MANAGER" "Start a new grid " ON \
"SENSOR" "Create a forward only sensor " OFF \
"SEARCHNODE" "Add a search node with parsing " OFF \
"MANAGERSEARCH" "Manager + search node " OFF \
"FLEET" "Dedicated Fleet Osquery Node " OFF \
"HEAVYNODE" "Sensor + Search Node " OFF \
3>&1 1>&2 2>&3
# "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" OFF \ # TODO
# "WARMNODE" "Add Warm Node to existing Hot or Search node" OFF \ # TODO
# "WAZUH" "Stand Alone Wazuh Server" OFF \ # TODO
# "STRELKA" "Stand Alone Strelka Node" OFF \ # TODO
)
whiptail_install_type_dist
elif [[ $install_type == "OTHER" ]]; then
whiptail_install_type_other
fi
export install_type
}
whiptail_install_type_dist() {
[ -n "$TESTING" ] && return
install_type=$(whiptail --title "Security Onion Setup" --radiolist \
"Choose distributed node type:" 13 60 6 \
"MANAGER" "Start a new grid " ON \
"SENSOR" "Create a forward only sensor " OFF \
"SEARCHNODE" "Add a search node with parsing " OFF \
"MANAGERSEARCH" "Manager + search node " OFF \
"FLEET" "Dedicated Fleet Osquery Node " OFF \
"HEAVYNODE" "Sensor + Search Node " OFF \
3>&1 1>&2 2>&3
# "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" OFF \ # TODO
# "WARMNODE" "Add Warm Node to existing Hot or Search node" OFF \ # TODO
# "WAZUH" "Stand Alone Wazuh Server" OFF \ # TODO
# "STRELKA" "Stand Alone Strelka Node" OFF \ # TODO
)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
export install_type
}
whiptail_install_type_other() {
[ -n "$TESTING" ] && return
install_type=$(whiptail --title "Security Onion Setup" --radiolist \
"Choose distributed node type:" 9 65 2 \
"ANALYST" "Quit setup and run so-analyst-install " ON \
"HELIXSENSOR" "Create a Helix sensor " OFF \
3>&1 1>&2 2>&3
)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus