Merge branch 'dev' into foxtrot

This commit is contained in:
William Wernert
2021-03-09 10:11:04 -05:00
46 changed files with 137 additions and 281 deletions

View File

@@ -3,6 +3,7 @@ logstash:
pipelines: pipelines:
manager: manager:
config: config:
- so/0008_input_fleet_livequery.conf.jinja
- so/0009_input_beats.conf - so/0009_input_beats.conf
- so/0010_input_hhbeats.conf - so/0010_input_hhbeats.conf
- so/9999_output_redis.conf.jinja - so/9999_output_redis.conf.jinja

View File

@@ -8,6 +8,7 @@ logstash:
- so/9002_output_import.conf.jinja - so/9002_output_import.conf.jinja
- so/9034_output_syslog.conf.jinja - so/9034_output_syslog.conf.jinja
- so/9100_output_osquery.conf.jinja - so/9100_output_osquery.conf.jinja
- so/9101_output_osquery_livequery.conf.jinja
- so/9400_output_suricata.conf.jinja - so/9400_output_suricata.conf.jinja
- so/9500_output_beats.conf.jinja - so/9500_output_beats.conf.jinja
- so/9600_output_ossec.conf.jinja - so/9600_output_ossec.conf.jinja

View File

@@ -50,11 +50,7 @@ done
if [ $SKIP -ne 1 ]; then if [ $SKIP -ne 1 ]; then
# List indices # List indices
echo echo
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -k -L https://{{ NODEIP }}:9200/_cat/indices?v curl -k -L https://{{ NODEIP }}:9200/_cat/indices?v
{% else %}
curl -L {{ NODEIP }}:9200/_cat/indices?v
{% endif %}
echo echo
# Inform user we are about to delete all data # Inform user we are about to delete all data
echo echo
@@ -93,18 +89,10 @@ fi
# Delete data # Delete data
echo "Deleting data..." echo "Deleting data..."
{% if grains['role'] in ['so-node','so-heavynode'] %}
INDXS=$(curl -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }') INDXS=$(curl -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
{% else %}
INDXS=$(curl -s -XGET -L {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
{% endif %}
for INDX in ${INDXS} for INDX in ${INDXS}
do do
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1 curl -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
{% else %}
curl -XDELETE -L "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
{% endif %}
done done
#Start Logstash/Filebeat #Start Logstash/Filebeat

View File

@@ -21,6 +21,5 @@ THEHIVEESPORT=9400
echo "Removing read only attributes for indices..." echo "Removing read only attributes for indices..."
echo echo
for p in $ESPORT $THEHIVEESPORT; do curl -s -k -XPUT -H "Content-Type: application/json" -L https://$IP:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
curl -XPUT -H "Content-Type: application/json" -L http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi; curl -XPUT -H "Content-Type: application/json" -L http://$IP:9400/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
done

View File

@@ -19,15 +19,7 @@
. /usr/sbin/so-common . /usr/sbin/so-common
if [ "$1" == "" ]; then if [ "$1" == "" ]; then
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines" curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
{% else %}
curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
{% endif %}
else else
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\"" curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
{% else %}
curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
{% endif %}
fi fi

View File

@@ -17,15 +17,7 @@
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
. /usr/sbin/so-common . /usr/sbin/so-common
if [ "$1" == "" ]; then if [ "$1" == "" ]; then
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys' curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
{% else %}
curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
{% endif %}
else else
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
{% else %}
curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
{% endif %}
fi fi

View File

@@ -17,15 +17,7 @@
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
. /usr/sbin/so-common . /usr/sbin/so-common
if [ "$1" == "" ]; then if [ "$1" == "" ]; then
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys' curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys'
{% else %}
curl -s -L {{ NODEIP }}:9200/_template/* | jq 'keys'
{% endif %}
else else
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq
{% else %}
curl -s -L {{ NODEIP }}:9200/_template/$1 | jq
{% endif %}
fi fi

View File

@@ -30,11 +30,7 @@ echo -n "Waiting for ElasticSearch..."
COUNT=0 COUNT=0
ELASTICSEARCH_CONNECTED="no" ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do while [[ "$COUNT" -le 240 ]]; do
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% else %}
curl --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% endif %}
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes" ELASTICSEARCH_CONNECTED="yes"
echo "connected!" echo "connected!"
@@ -55,11 +51,7 @@ cd ${ELASTICSEARCH_TEMPLATES}
echo "Loading templates..." echo "Loading templates..."
{% if grains['role'] in ['so-node','so-heavynode'] %}
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
{% else %}
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
{% endif %}
echo echo
cd - >/dev/null cd - >/dev/null

View File

@@ -1,53 +0,0 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
. /usr/sbin/so-image-common
local_salt_dir=/opt/so/saltstack/local
cat << EOF
This program will switch from the open source version of the Elastic Stack to the Features version licensed under the Elastic license.
If you proceed, then we will download new Docker images and restart services.
Please review the Elastic license:
https://raw.githubusercontent.com/elastic/elasticsearch/master/licenses/ELASTIC-LICENSE.txt
Please also note that, if you have a distributed deployment and continue with this change, Elastic traffic between nodes will change from encrypted to cleartext!
(We expect to support Elastic Features Security at some point in the future.)
Do you agree to the terms of the Elastic license and understand the note about encryption?
If so, type AGREE to accept the Elastic license and continue. Otherwise, just press Enter to exit this program without making any changes.
EOF
read INPUT
if [ "$INPUT" != "AGREE" ]; then
exit
fi
echo "Please wait while switching to Elastic Features."
require_manager
TRUSTED_CONTAINERS=( \
"so-elasticsearch" \
"so-filebeat" \
"so-kibana" \
"so-logstash" )
update_docker_containers "features" "-features"
# Modify global.sls to enable Features
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/global.sls

View File

@@ -15,8 +15,4 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% if grains['role'] in ['so-node','so-heavynode'] %}
curl -X GET -k -L https://localhost:9200/_cat/indices?v curl -X GET -k -L https://localhost:9200/_cat/indices?v
{% else %}
curl -X GET -L localhost:9200/_cat/indices?v
{% endif %}

View File

@@ -612,16 +612,6 @@ if [ $is_airgap -eq 0 ]; then
else else
update_registry update_registry
update_docker_containers "soup" update_docker_containers "soup"
FEATURESCHECK=$(lookup_pillar features elastic)
if [[ "$FEATURESCHECK" == "True" ]]; then
TRUSTED_CONTAINERS=(
"so-elasticsearch"
"so-filebeat"
"so-kibana"
"so-logstash"
)
update_docker_containers "features" "-features"
fi
fi fi
echo "" echo ""
echo "Stopping Salt Minion service." echo "Stopping Salt Minion service."

View File

@@ -12,11 +12,11 @@ client:
- {{elasticsearch}} - {{elasticsearch}}
port: 9200 port: 9200
url_prefix: url_prefix:
{% if grains['role'] in ['so-node', 'so-heavynode'] %} use_ssl: True{% else %} use_ssl: False{% endif %} use_ssl: True
certificate: certificate:
client_cert: client_cert:
client_key: client_key:
{% if grains['role'] in ['so-node', 'so-heavynode'] %} ssl_no_validate: True{% else %} ssl_no_validate: False{% endif %} ssl_no_validate: True
http_auth: http_auth:
timeout: 30 timeout: 30
master_only: False master_only: False

View File

@@ -16,8 +16,8 @@ elastalert:
#aws_region: us-east-1 #aws_region: us-east-1
#profile: test #profile: test
#es_url_prefix: elasticsearch #es_url_prefix: elasticsearch
#use_ssl: True use_ssl: true
#verify_certs: True verify_certs: false
#es_send_get_body_as: GET #es_send_get_body_as: GET
#es_username: someusername #es_username: someusername
#es_password: somepassword #es_password: somepassword

View File

@@ -104,8 +104,9 @@ elastaconf:
wait_for_elasticsearch: wait_for_elasticsearch:
module.run: module.run:
- http.wait_for_successful_query: - http.wait_for_successful_query:
- url: 'http://{{MANAGER}}:9200/_cat/indices/.kibana*' - url: 'https://{{MANAGER}}:9200/_cat/indices/.kibana*'
- wait_for: 180 - wait_for: 180
- verify_ssl: False
so-elastalert: so-elastalert:
docker_container.running: docker_container.running:

View File

@@ -9,12 +9,6 @@
{%- set NODE_ROLES = salt['pillar.get']('elasticsearch:node_roles', ['data', 'ingest']) %} {%- set NODE_ROLES = salt['pillar.get']('elasticsearch:node_roles', ['data', 'ingest']) %}
cluster.name: "{{ ESCLUSTERNAME }}" cluster.name: "{{ ESCLUSTERNAME }}"
network.host: 0.0.0.0 network.host: 0.0.0.0
# minimum_master_nodes need to be explicitly set when bound on a public IP
# set to 1 to allow single node clusters
# Details: https://github.com/elastic/elasticsearch/pull/17288
#discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.
path.logs: /var/log/elasticsearch path.logs: /var/log/elasticsearch
action.destructive_requires_name: true action.destructive_requires_name: true
transport.bind_host: 0.0.0.0 transport.bind_host: 0.0.0.0
@@ -25,17 +19,12 @@ cluster.routing.allocation.disk.watermark.low: 95%
cluster.routing.allocation.disk.watermark.high: 98% cluster.routing.allocation.disk.watermark.high: 98%
cluster.routing.allocation.disk.watermark.flood_stage: 98% cluster.routing.allocation.disk.watermark.flood_stage: 98%
xpack.ml.enabled: false xpack.ml.enabled: false
{%- if grains['role'] in ['so-node','so-heavynode'] %}
xpack.security.enabled: true xpack.security.enabled: true
{%- else %}
xpack.security.enabled: false
{%- endif %}
xpack.security.transport.ssl.enabled: true xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: none xpack.security.transport.ssl.verification_mode: none
xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key xpack.security.transport.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key
xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt xpack.security.transport.ssl.certificate: /usr/share/elasticsearch/config/elasticsearch.crt
xpack.security.transport.ssl.certificate_authorities: [ "/usr/share/elasticsearch/config/ca.crt" ] xpack.security.transport.ssl.certificate_authorities: [ "/usr/share/elasticsearch/config/ca.crt" ]
{%- if grains['role'] in ['so-node','so-heavynode'] %}
xpack.security.http.ssl.enabled: true xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.client_authentication: none xpack.security.http.ssl.client_authentication: none
xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key xpack.security.http.ssl.key: /usr/share/elasticsearch/config/elasticsearch.key
@@ -46,7 +35,6 @@ xpack.security.authc:
username: anonymous_user username: anonymous_user
roles: superuser roles: superuser
authz_exception: true authz_exception: true
{%- endif %}
node.name: {{ grains.host }} node.name: {{ grains.host }}
script.max_compilations_rate: 1000/1m script.max_compilations_rate: 1000/1m
{%- if TRUECLUSTER is sameas true %} {%- if TRUECLUSTER is sameas true %}

View File

@@ -0,0 +1,16 @@
{
"description" : "osquery live query",
"processors" : [
{
"script": {
"lang": "painless",
"source": "def dict = ['columns': new HashMap()]; for (entry in ctx['rows'].entrySet()) { dict['columns'][entry.getKey()] = entry.getValue(); } ctx['result'] = dict; "
}
},
{ "remove": { "field": [ "rows" ], "ignore_missing": true, "ignore_failure": true } },
{ "rename": { "field": "distributed_query_execution_id", "target_field": "result.query_id", "ignore_missing": true } },
{ "rename": { "field": "computer_name", "target_field": "host.hostname", "ignore_missing": true } },
{ "pipeline": { "name": "osquery.normalize" } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -0,0 +1,14 @@
{
"description" : "osquery normalize",
"processors" : [
{ "rename": { "field": "result.columns.cmdline", "target_field": "process.command_line", "ignore_missing": true } },
{ "rename": { "field": "result.columns.cwd", "target_field": "process.working_directory", "ignore_missing": true } },
{ "rename": { "field": "result.columns.name", "target_field": "process.name", "ignore_missing": true } },
{ "rename": { "field": "result.columns.path", "target_field": "process.executable", "ignore_missing": true } },
{ "rename": { "field": "result.columns.pid", "target_field": "process.pid", "ignore_missing": true } },
{ "rename": { "field": "result.columns.parent", "target_field": "process.ppid", "ignore_missing": true } },
{ "rename": { "field": "result.columns.uid", "target_field": "user.id", "ignore_missing": true } },
{ "rename": { "field": "result.columns.username", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "result.columns.gid", "target_field": "group.id", "ignore_missing": true } }
]
}

View File

@@ -1,24 +1,19 @@
{ {
"description" : "osquery", "description" : "osquery",
"processors" : [ "processors" : [
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, { "json": { "field": "message", "target_field": "result", "ignore_failure": true } },
{ "gsub": { "field": "message2.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } }, { "gsub": { "field": "result.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } },
{ "rename": { "if": "ctx.message2.columns?.eventid != null", "field": "message2.columns", "target_field": "winlog", "ignore_missing": true } }, { "rename": { "if": "ctx.result.columns?.eventid != null", "field": "result.columns", "target_field": "winlog", "ignore_missing": true } },
{ "json": { "field": "winlog.data", "target_field": "unparsed", "ignore_failure": true} }, { "json": { "field": "winlog.data", "target_field": "unparsed", "ignore_failure": true} },
{ "set": { "if": "!(ctx.unparsed?.EventData instanceof Map)", "field": "error.eventdata_parsing", "value": true, "ignore_failure": true } }, { "set": { "if": "!(ctx.unparsed?.EventData instanceof Map)", "field": "error.eventdata_parsing", "value": true, "ignore_failure": true } },
{ "rename": { "if": "!(ctx.error?.eventdata_parsing == true)", "field": "unparsed.EventData", "target_field": "winlog.event_data", "ignore_missing": true, "ignore_failure": true } }, { "rename": { "if": "!(ctx.error?.eventdata_parsing == true)", "field": "unparsed.EventData", "target_field": "winlog.event_data", "ignore_missing": true, "ignore_failure": true } },
{ "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } }, { "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } },
{ "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } }, { "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } },
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } }, { "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } }, { "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational' && ctx.containsKey('winlog')", "name":"win.eventlogs" } },
{
"script": {
"lang": "painless",
"source": "def dict = ['result': new HashMap()]; for (entry in ctx['message2'].entrySet()) { dict['result'][entry.getKey()] = entry.getValue(); } ctx['osquery'] = dict; "
}
},
{ "set": { "field": "event.module", "value": "osquery", "override": false } }, { "set": { "field": "event.module", "value": "osquery", "override": false } },
{ "set": { "field": "event.dataset", "value": "{{osquery.result.name}}", "override": false} }, { "set": { "field": "event.dataset", "value": "{{result.name}}", "override": false} },
{ "pipeline": { "if": "!(ctx.containsKey('winlog'))", "name": "osquery.normalize" } },
{ "pipeline": { "name": "common" } } { "pipeline": { "name": "common" } }
] ]
} }

View File

@@ -27,11 +27,7 @@ echo -n "Waiting for ElasticSearch..."
COUNT=0 COUNT=0
ELASTICSEARCH_CONNECTED="no" ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do while [[ "$COUNT" -le 240 ]]; do
{% if grains['role'] in ['so-node','so-heavynode'] %} curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% else %}
curl ${ELASTICSEARCH_AUTH} --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
{% endif %}
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes" ELASTICSEARCH_CONNECTED="yes"
echo "connected!" echo "connected!"
@@ -51,11 +47,7 @@ fi
cd ${ELASTICSEARCH_INGEST_PIPELINES} cd ${ELASTICSEARCH_INGEST_PIPELINES}
echo "Loading pipelines..." echo "Loading pipelines..."
{% if grains['role'] in ['so-node','so-heavynode'] %}
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
{% else %}
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
{% endif %}
echo echo
cd - >/dev/null cd - >/dev/null

View File

@@ -1,17 +0,0 @@
keystore.path: /usr/share/elasticsearch/config/sokeys
keystore.password: changeit
keystore.algorithm: SunX509
truststore.path: /etc/pki/java/cacerts
truststore.password: changeit
truststore.algorithm: PKIX
protocols:
- TLSv1.2
ciphers:
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_256_GCM_SHA384
transport.encrypted: true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
http.encrypted: true
{%- else %}
http.encrypted: false
{%- endif %}

View File

@@ -140,14 +140,6 @@ esyml:
- group: 939 - group: 939
- template: jinja - template: jinja
sotls:
file.managed:
- name: /opt/so/conf/elasticsearch/sotls.yml
- source: salt://elasticsearch/files/sotls.yml
- user: 930
- group: 939
- template: jinja
#sync templates to /opt/so/conf/elasticsearch/templates #sync templates to /opt/so/conf/elasticsearch/templates
{% for TEMPLATE in TEMPLATES %} {% for TEMPLATE in TEMPLATES %}
es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}: es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}:
@@ -199,7 +191,7 @@ so-elasticsearch:
{% if TRUECLUSTER is sameas false or (TRUECLUSTER is sameas true and not salt['pillar.get']('nodestab', {})) %} {% if TRUECLUSTER is sameas false or (TRUECLUSTER is sameas true and not salt['pillar.get']('nodestab', {})) %}
- discovery.type=single-node - discovery.type=single-node
{% endif %} {% endif %}
- ES_JAVA_OPTS=-Xms{{ esheap }} -Xmx{{ esheap }} - ES_JAVA_OPTS=-Xms{{ esheap }} -Xmx{{ esheap }} -Des.transport.cname_in_publish_address=true
ulimits: ulimits:
- memlock=-1:-1 - memlock=-1:-1
- nofile=65536:65536 - nofile=65536:65536
@@ -221,7 +213,6 @@ so-elasticsearch:
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro - /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro - /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro - /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
- /opt/so/conf/elasticsearch/sotls.yml:/usr/share/elasticsearch/config/sotls.yml:ro
- watch: - watch:
- file: cacertz - file: cacertz
- file: esyml - file: esyml

View File

@@ -365,6 +365,10 @@
"request":{ "request":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true
},
"result":{
"type":"object",
"dynamic": true
}, },
"rfb":{ "rfb":{
"type":"object", "type":"object",

View File

@@ -260,7 +260,7 @@ output.{{ type }}:
{%- if grains['role'] in ["so-eval", "so-import"] %} {%- if grains['role'] in ["so-eval", "so-import"] %}
output.elasticsearch: output.elasticsearch:
enabled: true enabled: true
hosts: ["{{ MANAGER }}:9200"] hosts: ["https://{{ MANAGER }}:9200"]
pipelines: pipelines:
- pipeline: "%{[module]}.%{[dataset]}" - pipeline: "%{[module]}.%{[dataset]}"
indices: indices:

View File

@@ -13,7 +13,6 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set LOCALHOSTNAME = salt['grains.get']('host') %} {% set LOCALHOSTNAME = salt['grains.get']('host') %}

View File

@@ -1,53 +0,0 @@
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 30 ]]; do
curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
exit
fi
# Make sure Kibana is running
MAX_WAIT=240
# Check to see if Kibana is available
wait_step=0
until curl -s -XGET -L http://{{ ES }}:5601 > /dev/null ; do
wait_step=$(( ${wait_step} + 1 ))
echo "Waiting on Kibana...Attempt #$wait_step"
if [ ${wait_step} -gt ${MAX_WAIT} ]; then
echo "ERROR: Kibana not available for more than ${MAX_WAIT} seconds."
exit 5
fi
sleep 1s;
done
# Apply Kibana template
echo
echo "Applying Kibana template..."
curl -s -XPUT -L http://{{ ES }}:9200/_template/kibana \
-H 'Content-Type: application/json' \
-d'{"index_patterns" : ".kibana", "settings": { "number_of_shards" : 1, "number_of_replicas" : 0 }, "mappings" : { "search": {"properties": {"hits": {"type": "integer"}, "version": {"type": "integer"}}}}}'
echo
curl -s -XPUT -L "{{ ES }}:9200/.kibana/_settings" \
-H 'Content-Type: application/json' \
-d'{"index" : {"number_of_replicas" : 0}}'
echo

View File

@@ -14,5 +14,9 @@ cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_o
# SOCtopus and Manager # SOCtopus and Manager
sed -i "s/PLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson sed -i "s/PLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
## This hackery will be removed if using Elastic Auth ##
# Let's snag a cookie from Kibana
THECOOKIE=$(curl -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
# Load saved objects # Load saved objects
curl -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1 curl -b "sid=$THECOOKIE" -L -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1

View File

@@ -1,11 +1,11 @@
--- ---
# Default Kibana configuration from kibana-docker. # Default Kibana configuration from kibana-docker.
{%- set ES = salt['pillar.get']('manager:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- set FEATURES = salt['pillar.get']('elastic:features', False) %}
server.name: kibana server.name: kibana
server.host: "0" server.host: "0"
server.basePath: /kibana server.basePath: /kibana
elasticsearch.hosts: [ "http://{{ ES }}:9200" ] elasticsearch.hosts: [ "https://{{ ES }}:9200" ]
elasticsearch.ssl.verificationMode: none
#kibana.index: ".kibana" #kibana.index: ".kibana"
#elasticsearch.username: elastic #elasticsearch.username: elastic
#elasticsearch.password: changeme #elasticsearch.password: changeme
@@ -14,3 +14,7 @@ elasticsearch.requestTimeout: 90000
logging.dest: /var/log/kibana/kibana.log logging.dest: /var/log/kibana/kibana.log
telemetry.enabled: false telemetry.enabled: false
security.showInsecureClusterWarning: false security.showInsecureClusterWarning: false
xpack.security.authc.providers:
anonymous.anonymous1:
order: 0
credentials: "elasticsearch_anonymous_user"

View File

@@ -0,0 +1,19 @@
{%- set MANAGER = salt['grains.get']('master') %}
{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %}
{% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %}
input {
redis {
host => '{{ MANAGER }}'
port => 6379
data_type => 'pattern_channel'
key => 'results_*'
type => 'live_query'
add_field => {
"module" => "osquery"
"dataset" => "live_query"
}
threads => {{ THREADS }}
batch_count => {{ BATCH }}
}
}

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if [module] =~ "zeek" and "import" not in [tags] { if [module] =~ "zeek" and "import" not in [tags] {
elasticsearch { elasticsearch {
@@ -13,10 +12,8 @@ output {
template_name => "so-zeek" template_name => "so-zeek"
template => "/templates/so-zeek-template.json" template => "/templates/so-zeek-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if "import" in [tags] { if "import" in [tags] {
elasticsearch { elasticsearch {
@@ -13,10 +12,8 @@ output {
template_name => "so-import" template_name => "so-import"
template => "/templates/so-import-template.json" template => "/templates/so-import-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if [event_type] == "sflow" { if [event_type] == "sflow" {
elasticsearch { elasticsearch {
@@ -12,10 +11,8 @@ output {
template_name => "so-flow" template_name => "so-flow"
template => "/templates/so-flow-template.json" template => "/templates/so-flow-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if [event_type] == "ids" and "import" not in [tags] { if [event_type] == "ids" and "import" not in [tags] {
elasticsearch { elasticsearch {
@@ -12,10 +11,8 @@ output {
template_name => "so-ids" template_name => "so-ids"
template => "/templates/so-ids-template.json" template => "/templates/so-ids-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if [module] =~ "syslog" { if [module] =~ "syslog" {
elasticsearch { elasticsearch {
@@ -13,10 +12,8 @@ output {
template_name => "so-syslog" template_name => "so-syslog"
template => "/templates/so-syslog-template.json" template => "/templates/so-syslog-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -3,9 +3,8 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if [module] =~ "osquery" { if [module] =~ "osquery" and "live_query" not in [dataset] {
elasticsearch { elasticsearch {
pipeline => "%{module}.%{dataset}" pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}" hosts => "{{ ES }}"
@@ -13,10 +12,8 @@ output {
template_name => "so-osquery" template_name => "so-osquery"
template => "/templates/so-osquery-template.json" template => "/templates/so-osquery-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -0,0 +1,43 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
filter {
if [type] =~ "live_query" {
mutate {
rename => {
"[host][hostname]" => "computer_name"
}
}
prune {
blacklist_names => ["host"]
}
split {
field => "rows"
}
}
}
output {
if [type] =~ "live_query" {
elasticsearch {
pipeline => "osquery.live_query"
hosts => "{{ ES }}"
index => "so-osquery"
template_name => "so-osquery"
template => "/templates/so-osquery-template.json"
template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true
ssl_certificate_verification => false
{%- endif %}
}
}
}

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if [dataset] =~ "firewall" { if [dataset] =~ "firewall" {
elasticsearch { elasticsearch {
@@ -12,10 +11,8 @@ output {
template_name => "so-firewall" template_name => "so-firewall"
template => "/templates/so-firewall-template.json" template => "/templates/so-firewall-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if [module] =~ "suricata" and "import" not in [tags] { if [module] =~ "suricata" and "import" not in [tags] {
elasticsearch { elasticsearch {
@@ -12,10 +11,8 @@ output {
index => "so-ids" index => "so-ids"
template_name => "so-ids" template_name => "so-ids"
template => "/templates/so-ids-template.json" template => "/templates/so-ids-template.json"
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if "beat-ext" in [tags] and "import" not in [tags] { if "beat-ext" in [tags] and "import" not in [tags] {
elasticsearch { elasticsearch {
@@ -13,10 +12,8 @@ output {
template_name => "so-beats" template_name => "so-beats"
template => "/templates/so-beats-template.json" template => "/templates/so-beats-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if [module] =~ "ossec" { if [module] =~ "ossec" {
elasticsearch { elasticsearch {
@@ -13,10 +12,8 @@ output {
template_name => "so-ossec" template_name => "so-ossec"
template => "/templates/so-ossec-template.json" template => "/templates/so-ossec-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -3,7 +3,6 @@
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
output { output {
if [module] =~ "strelka" { if [module] =~ "strelka" {
elasticsearch { elasticsearch {
@@ -13,10 +12,8 @@ output {
template_name => "so-strelka" template_name => "so-strelka"
template => "/templates/so-strelka-template.json" template => "/templates/so-strelka-template.json"
template_overwrite => true template_overwrite => true
{%- if grains['role'] in ['so-node','so-heavynode'] %}
ssl => true ssl => true
ssl_certificate_verification => false ssl_certificate_verification => false
{%- endif %}
} }
} }
} }

View File

@@ -42,6 +42,7 @@
{ "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.dataset:mysql | groupby mysql.command"}, { "name": "MYSQL", "description": "MYSQL grouped by command", "query": "event.dataset:mysql | groupby mysql.command"},
{ "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.dataset:notice | groupby notice.note notice.message"}, { "name": "NOTICE", "description": "Zeek notice logs grouped by note and message", "query": "event.dataset:notice | groupby notice.note notice.message"},
{ "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.dataset:ntlm | groupby ntlm.server.dns.name"}, { "name": "NTLM", "description": "NTLM grouped by computer name", "query": "event.dataset:ntlm | groupby ntlm.server.dns.name"},
{ "name": "Osquery Live Queries", "description": "Osquery Live Query results grouped by computer name", "query": "event.dataset:live_query | groupby host.hostname"},
{ "name": "PE", "description": "PE files list", "query": "event.dataset:pe | groupby file.machine file.os file.subsystem"}, { "name": "PE", "description": "PE files list", "query": "event.dataset:pe | groupby file.machine file.os file.subsystem"},
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.dataset:radius | groupby user.name.keyword"}, { "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.dataset:radius | groupby user.name.keyword"},
{ "name": "RDP", "description": "RDP grouped by client name", "query": "event.dataset:rdp | groupby client.name"}, { "name": "RDP", "description": "RDP grouped by client name", "query": "event.dataset:rdp | groupby client.name"},

View File

@@ -1,7 +1,6 @@
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %} {%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %}
{%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %} {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
{%- set FEATURES = salt['pillar.get']('elastic:features', False) %}
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %} {%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
{%- import_json "soc/files/soc/alerts.queries.json" as alerts_queries %} {%- import_json "soc/files/soc/alerts.queries.json" as alerts_queries %}
{%- import_json "soc/files/soc/alerts.actions.json" as alerts_actions %} {%- import_json "soc/files/soc/alerts.actions.json" as alerts_actions %}
@@ -31,7 +30,7 @@
"hostUrl": "http://{{ MANAGERIP }}:4434/" "hostUrl": "http://{{ MANAGERIP }}:4434/"
}, },
"elastic": { "elastic": {
"hostUrl": "http://{{ MANAGERIP }}:9200", "hostUrl": "https://{{ MANAGERIP }}:9200",
{%- if salt['pillar.get']('nodestab', {}) %} {%- if salt['pillar.get']('nodestab', {}) %}
"remoteHostUrls": [ "remoteHostUrls": [
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
@@ -45,7 +44,7 @@
}, },
"sostatus": { "sostatus": {
"refreshIntervalMs": 30000, "refreshIntervalMs": 30000,
"offlineThresholdMs": 60000 "offlineThresholdMs": 900000
}, },
{% if THEHIVEKEY != '' %} {% if THEHIVEKEY != '' %}
"thehive": { "thehive": {

View File

@@ -6,7 +6,7 @@
[es] [es]
es_url = http://{{MANAGER}}:9200 es_url = https://{{MANAGER}}:9200
es_ip = {{MANAGER}} es_ip = {{MANAGER}}
es_user = YOURESUSER es_user = YOURESUSER
es_pass = YOURESPASS es_pass = YOURESPASS

View File

@@ -622,7 +622,7 @@
# ## specify a list of one or more Elasticsearch servers # ## specify a list of one or more Elasticsearch servers
# # you can add username and password to your url to use basic authentication: # # you can add username and password to your url to use basic authentication:
# # servers = ["http://user:pass@localhost:9200"] # # servers = ["http://user:pass@localhost:9200"]
servers = ["http://{{ MANAGER }}:9200"] servers = ["https://{{ MANAGER }}:9200"]
{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %} {% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
[[inputs.elasticsearch]] [[inputs.elasticsearch]]
servers = ["https://{{ NODEIP }}:9200"] servers = ["https://{{ NODEIP }}:9200"]

View File

@@ -1,7 +1,6 @@
#!/bin/bash #!/bin/bash
{% set ES = salt['pillar.get']('manager:mainip', '') %} {% set ES = salt['pillar.get']('manager:mainip', '') %}
{% set MANAGER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %} {% set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
# Wait for ElasticSearch to come up, so that we can query for version infromation # Wait for ElasticSearch to come up, so that we can query for version infromation
@@ -9,7 +8,7 @@ echo -n "Waiting for ElasticSearch..."
COUNT=0 COUNT=0
ELASTICSEARCH_CONNECTED="no" ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 30 ]]; do while [[ "$COUNT" -le 30 ]]; do
curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200 curl -k --output /dev/null --silent --head --fail -L https://{{ ES }}:9200
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes" ELASTICSEARCH_CONNECTED="yes"
echo "connected!" echo "connected!"
@@ -29,7 +28,7 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
fi fi
echo "Applying cross cluster search config..." echo "Applying cross cluster search config..."
curl -s -XPUT -L http://{{ ES }}:9200/_cluster/settings \ curl -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \ -H 'Content-Type: application/json' \
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}" -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
@@ -37,7 +36,7 @@ echo "Applying cross cluster search config..."
{%- if TRUECLUSTER is sameas false %} {%- if TRUECLUSTER is sameas false %}
{%- if salt['pillar.get']('nodestab', {}) %} {%- if salt['pillar.get']('nodestab', {}) %}
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %} {%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
curl -XPUT -L http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}' curl -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SN.split('_')|first }}:9300"]}}}}}'
{%- endfor %} {%- endfor %}
{%- endif %} {%- endif %}
{%- endif %} {%- endif %}

View File

@@ -6,7 +6,7 @@ echo -n "Waiting for ElasticSearch..."
COUNT=0 COUNT=0
ELASTICSEARCH_CONNECTED="no" ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 30 ]]; do while [[ "$COUNT" -le 30 ]]; do
curl --output /dev/null --silent --head --fail -L http://{{ ES }}:9200 curl -k --output /dev/null --silent --head --fail -L https://{{ ES }}:9200
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes" ELASTICSEARCH_CONNECTED="yes"
echo "connected!" echo "connected!"
@@ -26,6 +26,6 @@ if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
fi fi
echo "Applying cross cluster search config..." echo "Applying cross cluster search config..."
curl -s -XPUT -L http://{{ ES }}:9200/_cluster/settings \ curl -s -k -XPUT -L https://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \ -H 'Content-Type: application/json' \
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ grains.host }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}" -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ grains.host }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"