fix conflict

This commit is contained in:
Mike Reeves
2023-02-22 10:20:14 -05:00
104 changed files with 2331 additions and 1784 deletions

View File

@@ -45,12 +45,10 @@ echo " rootfs: $ROOTFS" >> $local_salt_dir/pillar/data/$TYPE.sls
echo " nsmfs: $NSM" >> $local_salt_dir/pillar/data/$TYPE.sls echo " nsmfs: $NSM" >> $local_salt_dir/pillar/data/$TYPE.sls
if [ $TYPE == 'sensorstab' ]; then if [ $TYPE == 'sensorstab' ]; then
echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls
salt-call state.apply grafana queue=True
fi fi
if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then
echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls echo " monint: bond0" >> $local_salt_dir/pillar/data/$TYPE.sls
if [ ! $10 ]; then if [ ! $10 ]; then
salt-call state.apply grafana queue=True
salt-call state.apply utility queue=True salt-call state.apply utility queue=True
fi fi
fi fi

View File

@@ -33,7 +33,6 @@
'nginx', 'nginx',
'telegraf', 'telegraf',
'influxdb', 'influxdb',
'grafana',
'soc', 'soc',
'kratos', 'kratos',
'elastic-fleet', 'elastic-fleet',
@@ -82,7 +81,6 @@
'ssl', 'ssl',
'telegraf', 'telegraf',
'firewall', 'firewall',
'filebeat',
'idh', 'idh',
'schedule', 'schedule',
'docker_clean' 'docker_clean'
@@ -119,7 +117,6 @@
'nginx', 'nginx',
'telegraf', 'telegraf',
'influxdb', 'influxdb',
'grafana',
'soc', 'soc',
'kratos', 'kratos',
'elastic-fleet', 'elastic-fleet',
@@ -139,7 +136,6 @@
'nginx', 'nginx',
'telegraf', 'telegraf',
'influxdb', 'influxdb',
'grafana',
'soc', 'soc',
'kratos', 'kratos',
'elastic-fleet', 'elastic-fleet',
@@ -169,7 +165,6 @@
'nginx', 'nginx',
'telegraf', 'telegraf',
'influxdb', 'influxdb',
'grafana',
'soc', 'soc',
'kratos', 'kratos',
'elastic-fleet', 'elastic-fleet',

View File

View File

View File

@@ -24,7 +24,7 @@ mkdir -p /opt/so/conf/elastic-fleet/certs
cp /etc/ssl/certs/intca.crt /opt/so/conf/elastic-fleet/certs cp /etc/ssl/certs/intca.crt /opt/so/conf/elastic-fleet/certs
cp /etc/pki/elasticfleet* /opt/so/conf/elastic-fleet/certs cp /etc/pki/elasticfleet* /opt/so/conf/elastic-fleet/certs
{% if grains.role == 'so-import' %} {% if grains.role in ['so-import', 'so-standalone', 'so-eval'] %}
# Add SO-Manager Elasticsearch Ouput # Add SO-Manager Elasticsearch Ouput
ESCACRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/intca.crt) ESCACRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/intca.crt)
JSON_STRING=$( jq -n \ JSON_STRING=$( jq -n \

View File

@@ -0,0 +1,15 @@
#/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
if [ "$1" == "" ]; then
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/_all/_ilm/explain | jq .
else
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/$1/_ilm/explain | jq .[]
fi

View File

@@ -1,12 +1,11 @@
#!/bin/bash #/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-stop grafana $1 {%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X DELETE https://{{ NODEIP }}:9200/_ilm/policy/$1

View File

@@ -0,0 +1,21 @@
#/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
{% import_yaml 'elasticsearch/defaults.yaml' as ESCONFIG with context %}
{%- set ES_INDEX_SETTINGS = salt['pillar.get']('elasticsearch:index_settings', default=ESCONFIG.elasticsearch.index_settings, merge=True) %}
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
{%- for index, settings in ES_INDEX_SETTINGS.items() %}
{%- if settings.policy is defined %}
echo
echo "Setting up {{ index }}-logs policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://{{ NODEIP }}:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
echo
{%- endif %}
{%- endfor %}
echo

View File

@@ -0,0 +1,15 @@
#/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
if [ "$1" == "" ]; then
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/_ilm/policy | jq .
else
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/_ilm/policy/$1 | jq .[]
fi

View File

@@ -1,12 +1,10 @@
#!/bin/bash #/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-restart grafana $1 so-elasticsearch-ilm-stop
so-elasticsearch-ilm-start

View File

@@ -0,0 +1,12 @@
/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
echo "Starting ILM..."
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://{{ NODEIP }}:9200/_ilm/start

View File

@@ -1,12 +1,11 @@
#!/bin/bash /bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-start grafana $1 {%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://{{ NODEIP }}:9200/_ilm/status | jq .

View File

@@ -0,0 +1,12 @@
#/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
echo "Stopping ILM..."
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://{{ NODEIP }}:9200/_ilm/stop

View File

@@ -69,7 +69,7 @@ fi
so-firewall --apply --role=heavynodes --ip="$IP" so-firewall --apply --role=heavynodes --ip="$IP"
;; ;;
'IDH') 'IDH')
so-firewall --apply --role=beats_endpoint_ssl --ip="$IP" so-firewall --apply --role=sensors --ip="$IP"
;; ;;
'RECEIVER') 'RECEIVER')
so-firewall --apply --role=receivers --ip="$IP" so-firewall --apply --role=receivers --ip="$IP"

View File

@@ -1,20 +0,0 @@
# this script is used to delete the default Grafana dashboard folders that existed prior to Grafana dashboard and Salt management changes in 2.3.70
# Exit if an error occurs. The next highstate will retry.
set -e
folders=$(curl -X GET http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders | jq -r '.[] | @base64')
delfolder=("Manager" "Manager Search" "Sensor Nodes" "Search Nodes" "Standalone" "Eval Mode")
for row in $folders; do
title=$(echo ${row} | base64 --decode | jq -r '.title')
uid=$(echo ${row} | base64 --decode | jq -r '.uid')
if [[ " ${delfolder[@]} " =~ " ${title} " ]]; then
curl -X DELETE http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders/$uid
fi
done
echo "so-grafana-dashboard-folder-delete has been run to delete default Grafana dashboard folders that existed prior to 2.3.70" > /opt/so/state/so-grafana-dashboard-folder-delete-complete
exit 0

View File

@@ -25,7 +25,6 @@ container_list() {
if [ $MANAGERCHECK == 'so-import' ]; then if [ $MANAGERCHECK == 'so-import' ]; then
TRUSTED_CONTAINERS=( TRUSTED_CONTAINERS=(
"so-elasticsearch" "so-elasticsearch"
"so-filebeat"
"so-idstools" "so-idstools"
"so-influxdb" "so-influxdb"
"so-kibana" "so-kibana"
@@ -47,8 +46,6 @@ container_list() {
"so-elastic-agent" "so-elastic-agent"
"so-elastic-agent-builder" "so-elastic-agent-builder"
"so-elasticsearch" "so-elasticsearch"
"so-filebeat"
"so-grafana"
"so-idh" "so-idh"
"so-idstools" "so-idstools"
"so-influxdb" "so-influxdb"
@@ -73,7 +70,6 @@ container_list() {
) )
else else
TRUSTED_CONTAINERS=( TRUSTED_CONTAINERS=(
"so-filebeat"
"so-idstools" "so-idstools"
"so-elasticsearch" "so-elasticsearch"
"so-logstash" "so-logstash"

View File

@@ -1,45 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
wdurregex="^[0-9]+w$"
ddurregex="^[0-9]+d$"
echo -e "\nThis script is used to reduce the size of InfluxDB by removing old data and retaining only the duration specified."
echo "The duration will need to be specified as an integer followed by the duration unit without a space."
echo -e "\nFor example, to purge all data but retain the past 12 weeks, specify 12w for the duration."
echo "The duration units are as follows:"
echo " w - week(s)"
echo " d - day(s)"
while true; do
echo ""
read -p 'Enter the duration of past data that you would like to retain: ' duration
duration=$(echo $duration | tr '[:upper:]' '[:lower:]')
if [[ "$duration" =~ $wdurregex ]] || [[ "$duration" =~ $ddurregex ]]; then
break
fi
echo -e "\nInvalid duration."
done
echo -e "\nInfluxDB will now be cleaned and leave only the past $duration worth of data."
read -r -p "Are you sure you want to continue? [y/N] " yorn
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
echo -e "\nCleaning InfluxDb and saving only the past $duration. This may could take several minutes depending on how much data needs to be cleaned."
if docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"DELETE FROM /.*/ WHERE \"time\" >= '2020-01-01T00:00:00.0000000Z' AND \"time\" <= now() - $duration\""; then
echo -e "\nInfluxDb clean complete."
else
echo -e "\nSomething went wrong with cleaning InfluxDB. Please verify that the so-influxdb Docker container is running, and check the log at /opt/so/log/influxdb/influxdb.log for any details."
fi
else
echo -e "\nExiting as requested."
fi

View File

@@ -1,55 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{%- set role = grains.id.split('_') | last %}
{%- if role in ['manager', 'managersearch', 'eval', 'standalone'] %}
{%- import_yaml 'influxdb/defaults.yaml' as default_settings %}
{%- set influxdb = salt['grains.filter_by'](default_settings, default='influxdb', merge=salt['pillar.get']('influxdb', {})) %}
. /usr/sbin/so-common
echo -e "\nThis script is used to reduce the size of InfluxDB by downsampling old data into the so_long_term retention policy."
echo -e "\nInfluxDB will now be downsampled. This could take a few hours depending on how large the database is and hardware resources available."
read -r -p "Are you sure you want to continue? [y/N] " yorn
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
echo -e "\nDownsampling InfluxDb started at `date`. This may take several hours depending on how much data needs to be downsampled."
{% for dest_rp in influxdb.downsample.keys() -%}
{% for measurement in influxdb.downsample[dest_rp].get('measurements', []) -%}
day=0
startdate=`date`
while docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"SELECT mean(*) INTO \"so_long_term\".\"{{measurement}}\" FROM \"autogen\".\"{{measurement}}\" WHERE \"time\" >= '2020-07-21T00:00:00.0000000Z' + ${day}d AND \"time\" <= '2020-07-21T00:00:00.0000000Z' + $((day+1))d GROUP BY time(5m),*\""; do
# why 2020-07-21?
migrationdate=`date -d "2020-07-21 + ${day} days" +"%y-%m-%d"`
echo "Downsampling of measurement: {{measurement}} from $migrationdate started at $startdate and completed at `date`."
newdaytomigrate=$(date -d "$migrationdate + 1 days" +"%s")
today=$(date +"%s")
if [ $newdaytomigrate -ge $today ]; then
break
else
((day=day+1))
startdate=`date`
echo -e "\nDownsampling the next day's worth of data for measurement: {{measurement}}."
fi
done
{% endfor -%}
{% endfor -%}
echo -e "\nInfluxDb data downsampling complete."
else
echo -e "\nExiting as requested."
fi
{%- else %}
echo -e "\nThis script can only be run on a node running InfluxDB."
{%- endif %}

View File

@@ -1,26 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
echo -e "\nThis script is used to reduce the size of InfluxDB by dropping the autogen retention policy."
echo "If you want to retain historical data prior to 2.3.60, then this should only be run after you have downsampled your data using so-influxdb-downsample."
echo -e "\nThe autogen retention policy will now be dropped from InfluxDB."
read -r -p "Are you sure you want to continue? [y/N] " yorn
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
echo -e "\nDropping autogen retention policy."
if docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -execute "drop retention policy autogen on telegraf"; then
echo -e "\nAutogen retention policy dropped from InfluxDb."
else
echo -e "\nSomething went wrong dropping then autogen retention policy from InfluxDB. Please verify that the so-influxdb Docker container is running, and check the log at /opt/so/log/influxdb/influxdb.log for any details."
fi
else
echo -e "\nExiting as requested."
fi

View File

@@ -0,0 +1,285 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
usage() {
echo "Usage: $0 <operation> [args]"
echo ""
echo "Supported Operations:"
echo " dashboardpath Returns the URL path for a dashboard, requires: <name-of-dashboard>"
echo " export Exports all templates to stdout"
echo " setup Loads all templates and creates all required buckets"
echo " userlist Lists users"
echo " useradd Adds a new user, requires: <email>"
echo " userdel Removes an existing user, requires: <email>"
echo " userenable Enables a user, requires: <email>"
echo " userdisable Disables a user, requires: <email>"
echo " userpass Updates a user's password, requires: <email>"
echo " userpromote Promotes a user to admin: <email>"
echo " userdemote Demotes a user from admin: <email>"
echo ""
echo "If required, the password will be read from STDIN."
exit 1
}
if [ $# -lt 1 ]; then
usage
fi
COMMAND=$(basename $0)
OP=$1
shift
set -eo pipefail
log() {
echo -e "$(date) | $COMMAND | $@" >&2
}
check_response() {
response=$1
if [[ "$response" =~ "\"code\":" ]]; then
log "Failed. Check the response for more details.\n$response"
exit 1
fi
}
request() {
curl -skK /opt/so/conf/influxdb/curl.config "https://localhost:8086/api/v2/$@"
}
lookup_user_id() {
email=$1
response=$(request users?limit=100)
check_response "$response"
uid=$(echo "$response" | jq -r ".users[] | select(.name == \"$email\").id")
if [[ -z "$uid" ]]; then
log "User not found"
exit 1
fi
echo "$uid"
}
lookup_stack_id() {
oid=$1
response=$(request "stacks?orgID=$oid&name=Security+Onion")
check_response "$response"
stackid=$(echo "$response" | jq -r ".stacks[0].id")
if [[ -z "$stackid" || "$stackid" == null ]]; then
response=$(request stacks -X POST -d "{\"name\":\"Security Onion\",\"orgID\":\"$oid\"}")
check_response "$response"
stackid=$(echo "$response" | jq -r .id)
fi
echo "$stackid"
}
change_password() {
uid=$1
set +e
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
set -e
read -rs pass
check_password_and_exit "$pass"
response=$(request users/$uid/password -X POST -d "{\"password\":\"$pass\"}")
check_response "$response"
}
apply_templates() {
oid=$1
stackid=$2
template_objects_array=$3
body="{\"orgID\":\"$oid\",\"stackID\":\"$stackid\",\"templates\":$template_objects_array}"
response=$(request templates/apply -X POST -d "$body")
check_response "$response"
}
setup_bucket() {
oid=$1
name=$2
age=$3
shardduration=$4
response=$(request "buckets?orgID=$oid&name=$name")
bucketid=$(echo "$response" | jq -r ".buckets[0].id")
if [[ -z "$bucketid" || "$bucketid" == null ]]; then
response=$(request buckets -X POST -d "{\"name\":\"$name\",\"orgID\":\"$oid\"}")
check_response "$response"
bucketid=$(echo "$response" | jq -r .id)
fi
response=$(request buckets/$bucketid -X PATCH -d "{\"name\":\"$name\",\"retentionRules\":[{\"everySeconds\":$age,\"shardGroupDurationSeconds\":$shardduration,\"type\":\"expire\"}]}")
check_response "$response"
}
lookup_org_id_with_wait() {
max_attempts=30
attempts=0
wait=10
while [[ $attempts -lt $max_attempts ]]; do
response=$(request orgs?org=Security+Onion)
oid=$(echo "$response" | jq -r ".orgs[] | select(.name == \"Security Onion\").id")
if [[ -z $oid ]]; then
attempts=$((attempts+1))
log "Server does not appear to be running or fully initialized - will try again in $wait seconds ($attempts / $max_attempts)"
sleep $wait
else
echo "$oid"
return
fi
done
log "Server has not started after $max_attempts attempts - aborting"
exit 1
}
oid=$(lookup_org_id_with_wait)
case "$OP" in
setup)
log "Ensuring organization is setup correctly"
# Load templates if at least one has been modified since the last setup
newest=$(ls -1t /opt/so/conf/influxdb/templates/ | head -1)
if [ /opt/so/conf/influxdb/templates/$newest -nt /opt/so/conf/influxdb/last_template_setup ]; then
log "Updating templates"
stackid=$(lookup_stack_id "$oid")
for file in /opt/so/conf/influxdb/templates/*; do
if [[ "$templates_array" != "" ]]; then
templates_array="$templates_array,"
fi
template=$(cat "$file")
templates_array="$templates_array{\"contents\":$template}"
done
apply_templates "$oid" "$stackid" "[$templates_array]"
echo $(date) > /opt/so/conf/influxdb/last_template_setup
else
log "Templates have not been modified since last setup"
fi
# Setup buckets and retention periods if at least one has been modified since the last setup
if [ /opt/so/conf/influxdb/buckets.json -nt /opt/so/conf/influxdb/last_bucket_setup ]; then
log "Updating buckets and retention periods"
for rp in so_short_term so_long_term; do
bucket=telegraf/$rp
log "Ensuring bucket is created and configured; bucket=$bucket"
age=$(cat /opt/so/conf/influxdb/buckets.json | jq -r .$rp.duration)
shard_duration=$(cat /opt/so/conf/influxdb/buckets.json | jq -r .$rp.shard_duration)
setup_bucket "$oid" "$bucket" "$age" "$shard_duration"
done
echo $(date) > /opt/so/conf/influxdb/last_bucket_setup
else
log "Buckets have not been modified since last setup"
fi
;;
userlist)
log "Listing existing users"
response=$(request users)
check_response "$response"
echo "$response" | jq -r '.users[] | "\(.id): \(.name) (\(.status))"'
;;
useradd)
[ $# -ne 1 ] && usage
email=$1
log "Adding new user; email=$email"
response=$(request users -X POST -d "{\"name\":\"$email\"}")
check_response "$response"
uid=$(echo "$response" | jq -r .id)
log "Adding new user to organization"
response=$(request orgs/$oid/members -X POST -d "{\"id\":\"$uid\"}")
check_response "$response"
change_password "$uid"
;;
userpass)
[ $# -ne 1 ] && usage
email=$1
log "Updating user password; email=$email"
uid=$(lookup_user_id "$email")
change_password "$uid"
;;
userdel)
[ $# -ne 1 ] && usage
email=$1
log "Deleting user; email=$email"
uid=$(lookup_user_id "$email")
response=$(request users/$uid -X DELETE)
check_response "$response"
;;
userenable)
[ $# -ne 1 ] && usage
email=$1
log "Enabling user; email=$email"
uid=$(lookup_user_id "$email")
response=$(request users/$uid -X PATCH -d "{\"name\":\"$email\",\"status\":\"active\"}")
check_response "$response"
;;
userdisable)
[ $# -ne 1 ] && usage
email=$1
log "Disabling user; email=$email"
uid=$(lookup_user_id "$email")
response=$(request users/$uid -X PATCH -d "{\"name\":\"$email\",\"status\":\"inactive\"}")
check_response "$response"
;;
userpromote)
[ $# -ne 1 ] && usage
email=$1
log "Promoting user to admin; email=$email"
uid=$(lookup_user_id "$email")
response=$(request orgs/$oid/members/$uid -X DELETE)
response=$(request orgs/$oid/owners -X POST -d "{\"id\":\"$uid\"}")
check_response "$response"
;;
userdemote)
[ $# -ne 1 ] && usage
email=$1
log "Demoting user from admin; email=$email"
uid=$(lookup_user_id "$email")
response=$(request orgs/$oid/owners/$uid -X DELETE)
response=$(request orgs/$oid/members -X POST -d "{\"id\":\"$uid\"}")
check_response "$response"
;;
export)
log "Exporting all organization templates"
request templates/export -X POST -d "{\"orgIDs\":[{\"orgID\":\"$oid\"}]}" -H "Content-Type: application/json"
;;
dashboardpath)
[ $# -ne 1 ] && usage
name=$1
response=$(request dashboards?limit=100&orgID=$oid)
check_response "$response"
dbid=$(echo "$response" | jq -r ".dashboards[] | select(.name == \"$name\").id")
if [[ -z "$dbid" ]]; then
log "Dashboard not found"
exit 1
fi
echo -n "/influxdb/orgs/$oid/dashboards/$dbid"
;;
*)
usage
;;
esac

View File

@@ -119,6 +119,18 @@ function add_elastic_to_minion() {
" " >> $PILLARFILE " " >> $PILLARFILE
} }
# Add IDH Services info to the minion file
function add_idh_to_minion() {
printf '%s\n'\
"idh:"\
" restrict_management_ip: $IDH_MGTRESTRICT"\
" services:" >> "$PILLARFILE"
IFS=',' read -ra IDH_SERVICES_ARRAY <<< "$IDH_SERVICES"
for service in ${IDH_SERVICES_ARRAY[@]}; do
echo " - $service" | tr '[:upper:]' '[:lower:]' | tr -d '"' >> "$PILLARFILE"
done
}
function add_logstash_to_minion() { function add_logstash_to_minion() {
# Create the logstash advanced pillar # Create the logstash advanced pillar
printf '%s\n'\ printf '%s\n'\
@@ -183,8 +195,8 @@ function createEVAL() {
add_sensor_to_minion add_sensor_to_minion
} }
function createIDHNODE() { function createIDH() {
echo "Nothing custom needed for IDH nodes" add_idh_to_minion
} }
function createIMPORT() { function createIMPORT() {

View File

@@ -587,7 +587,10 @@ case "${operation}" in
createUser "$email" "${role:-$DEFAULT_ROLE}" "${firstName}" "${lastName}" "${note}" createUser "$email" "${role:-$DEFAULT_ROLE}" "${firstName}" "${lastName}" "${note}"
syncAll syncAll
echo "Successfully added new user to SOC" echo "Successfully added new user to SOC"
check_container fleet && echo "$password" | so-fleet-user-add "$email" echo "$password" | so-influxdb-manage useradd "$email"
if [[ "$role" == "superuser" ]]; then
echo "$password" | so-influxdb-manage userpromote "$email"
fi
;; ;;
"list") "list")
@@ -605,6 +608,9 @@ case "${operation}" in
if addUserRole "$email" "$role"; then if addUserRole "$email" "$role"; then
syncElastic syncElastic
echo "Successfully added role to user" echo "Successfully added role to user"
if [[ "$role" == "superuser" ]]; then
echo "$password" | so-influxdb-manage userpromote "$email"
fi
fi fi
;; ;;
@@ -618,6 +624,9 @@ case "${operation}" in
deleteUserRole "$email" "$role" deleteUserRole "$email" "$role"
syncElastic syncElastic
echo "Successfully removed role from user" echo "Successfully removed role from user"
if [[ "$role" == "superuser" ]]; then
echo "$password" | so-influxdb-manage userdemote "$email"
fi
;; ;;
"password") "password")
@@ -628,6 +637,7 @@ case "${operation}" in
updateUserPassword "$email" updateUserPassword "$email"
syncAll syncAll
echo "Successfully updated user password" echo "Successfully updated user password"
echo "$password" | so-influxdb-manage userpass "$email"
;; ;;
"profile") "profile")
@@ -647,7 +657,7 @@ case "${operation}" in
updateStatus "$email" 'active' updateStatus "$email" 'active'
syncAll syncAll
echo "Successfully enabled user" echo "Successfully enabled user"
echo "Fleet user will need to be recreated manually with so-fleet-user-add" so-influxdb-manage userenable "$email"
;; ;;
"disable") "disable")
@@ -658,7 +668,7 @@ case "${operation}" in
updateStatus "$email" 'locked' updateStatus "$email" 'locked'
syncAll syncAll
echo "Successfully disabled user" echo "Successfully disabled user"
check_container fleet && so-fleet-user-delete "$email" so-influxdb-manage userdisable "$email"
;; ;;
"delete") "delete")
@@ -669,7 +679,7 @@ case "${operation}" in
deleteUser "$email" deleteUser "$email"
syncAll syncAll
echo "Successfully deleted user" echo "Successfully deleted user"
check_container fleet && so-fleet-user-delete "$email" so-influxdb-manage userdel "$email"
;; ;;
"sync") "sync")

View File

@@ -23,10 +23,6 @@ docker:
- 0.0.0.0:514:514/udp - 0.0.0.0:514:514/udp
- 0.0.0.0:514:514/tcp - 0.0.0.0:514:514/tcp
- 0.0.0.0:5066:5066/tcp - 0.0.0.0:5066:5066/tcp
'so-grafana':
final_octet: 24
port_bindings:
- 0.0.0.0:3000:3000
'so-idstools': 'so-idstools':
final_octet: 25 final_octet: 25
'so-influxdb': 'so-influxdb':
@@ -106,3 +102,5 @@ docker:
final_octet: 44 final_octet: 44
port_bindings: port_bindings:
- 0.0.0.0:8080:8080/tcp - 0.0.0.0:8080:8080/tcp
'so-idh':
final_octet: 45

View File

@@ -1430,6 +1430,8 @@ elasticsearch:
date_detection: false date_detection: false
settings: settings:
index: index:
lifecycle:
name: so-elasticsearch-logs
mapping: mapping:
total_fields: total_fields:
limit: 5000 limit: 5000
@@ -1498,6 +1500,25 @@ elasticsearch:
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-endgame: so-endgame:
index_sorting: False index_sorting: False
index_template: index_template:
@@ -2183,6 +2204,8 @@ elasticsearch:
date_detection: false date_detection: false
settings: settings:
index: index:
lifecycle:
name: so-suricata-logs
mapping: mapping:
total_fields: total_fields:
limit: 5000 limit: 5000
@@ -2251,6 +2274,25 @@ elasticsearch:
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-imperva: so-imperva:
index_sorting: False index_sorting: False
index_template: index_template:
@@ -2351,6 +2393,8 @@ elasticsearch:
date_detection: false date_detection: false
settings: settings:
index: index:
lifecycle:
name: so-import-logs
mapping: mapping:
total_fields: total_fields:
limit: 5000 limit: 5000
@@ -2419,6 +2463,25 @@ elasticsearch:
- common-dynamic-mappings - common-dynamic-mappings
- winlog-mappings - winlog-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-infoblox: so-infoblox:
index_sorting: False index_sorting: False
index_template: index_template:
@@ -2671,6 +2734,25 @@ elasticsearch:
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-kratos: so-kratos:
warm: 7 warm: 7
close: 30 close: 30
@@ -2754,6 +2836,25 @@ elasticsearch:
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-logstash: so-logstash:
index_sorting: False index_sorting: False
index_template: index_template:
@@ -2770,6 +2871,8 @@ elasticsearch:
date_detection: false date_detection: false
settings: settings:
index: index:
lifecycle:
name: so-logstash-logs
mapping: mapping:
total_fields: total_fields:
limit: 5000 limit: 5000
@@ -2838,6 +2941,25 @@ elasticsearch:
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-microsoft: so-microsoft:
index_sorting: False index_sorting: False
index_template: index_template:
@@ -3691,6 +3813,8 @@ elasticsearch:
date_detection: false date_detection: false
settings: settings:
index: index:
lifecycle:
name: so-redis-logs
mapping: mapping:
total_fields: total_fields:
limit: 5000 limit: 5000
@@ -3759,6 +3883,25 @@ elasticsearch:
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-snort: so-snort:
index_sorting: False index_sorting: False
index_template: index_template:
@@ -4262,6 +4405,25 @@ elasticsearch:
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-syslog: so-syslog:
index_sorting: False index_sorting: False
index_template: index_template:
@@ -4347,6 +4509,25 @@ elasticsearch:
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-tomcat: so-tomcat:
index_sorting: False index_sorting: False
index_template: index_template:
@@ -4447,6 +4628,8 @@ elasticsearch:
date_detection: false date_detection: false
settings: settings:
index: index:
lifecycle:
name: so-zeek-logs
mapping: mapping:
total_fields: total_fields:
limit: 5000 limit: 5000
@@ -4517,6 +4700,25 @@ elasticsearch:
- common-settings - common-settings
- common-dynamic-mappings - common-dynamic-mappings
priority: 500 priority: 500
policy:
phases:
hot:
min_age: 0ms
actions:
set_priority:
priority: 100
rollover:
max_age: 30d
max_primary_shard_size: 50gb
cold:
min_age: 30d
actions:
set_priority:
priority: 0
delete:
min_age: 365d
actions:
delete: {}
so-zscaler: so-zscaler:
index_sorting: False index_sorting: False
index_template: index_template:

View File

@@ -53,9 +53,19 @@ es_sync_scripts:
- source: salt://elasticsearch/tools/sbin - source: salt://elasticsearch/tools/sbin
- exclude_pat: - exclude_pat:
- so-elasticsearch-pipelines # exclude this because we need to watch it for changes, we sync it in another state - so-elasticsearch-pipelines # exclude this because we need to watch it for changes, we sync it in another state
- so-elasticsearch-ilm-policy-load
- defaults: - defaults:
GLOBALS: {{ GLOBALS }} GLOBALS: {{ GLOBALS }}
so-elasticsearch-ilm-policy-load-script:
file.managed:
- name: /usr/sbin/so-elasticsearch-ilm-policy-load
- source: salt://elasticsearch/tools/sbin/so-elasticsearch-ilm-policy-load
- user: 930
- group: 939
- mode: 754
- template: jinja
so-elasticsearch-pipelines-script: so-elasticsearch-pipelines-script:
file.managed: file.managed:
- name: /usr/sbin/so-elasticsearch-pipelines - name: /usr/sbin/so-elasticsearch-pipelines
@@ -362,6 +372,16 @@ so-es-cluster-settings:
- docker_container: so-elasticsearch - docker_container: so-elasticsearch
- file: es_sync_scripts - file: es_sync_scripts
so-elasticsearch-ilm-policy-load:
cmd.run:
- name: /usr/sbin/so-elasticsearch-ilm-policy-load
- cwd: /opt/so
- require:
- docker_container: so-elasticsearch
- file: so-elasticsearch-ilm-policy-load-script
- onchanges:
- file: so-elasticsearch-ilm-policy-load-script
so-elasticsearch-templates: so-elasticsearch-templates:
cmd.run: cmd.run:
- name: /usr/sbin/so-elasticsearch-templates-load - name: /usr/sbin/so-elasticsearch-templates-load

View File

@@ -36,7 +36,7 @@ elasticsearch:
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
index_settings: index_settings:
so-aws: &indexSettings so-elasticsearch: &indexSettings
warm: warm:
description: Age (in days) of this index before it will move to warm storage, if warm nodes are present. Once moved, events on this index can take longer to fetch. description: Age (in days) of this index before it will move to warm storage, if warm nodes are present. Once moved, events on this index can take longer to fetch.
global: True global: True
@@ -75,45 +75,51 @@ elasticsearch:
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs. description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
global: True global: True
helpLink: elasticsearch.html helpLink: elasticsearch.html
so-azure: *indexSettings policy:
so-barracuda: *indexSettings phases:
so-beats: *indexSettings hot:
so-bluecoat: *indexSettings min_age:
so-cef: *indexSettings description: Minimum age
so-checkpoint: *indexSettings global: True
so-cisco: *indexSettings helpLink: elasticsearch.html
so-cyberark: *indexSettings actions:
so-cylance: *indexSettings set_priority:
so-elasticsearch: *indexSettings priority:
description: Priority of index, used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
global: True
helpLink: elasticsearch.html
rollover:
max_age:
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
global: True
helpLink: elasticsearch.html
max_primary_shard_size:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index, determining when it should be sent to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
global: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
global: True
helpLink: elasticsearch.html
delete:
min_age:
description: Minimum age of index, determining when it should be deleted.
global: True
helpLink: elastic
so-endgame: *indexSettings so-endgame: *indexSettings
so-f5: *indexSettings
so-firewall: *indexSettings so-firewall: *indexSettings
so-fortinet: *indexSettings
so-gcp: *indexSettings
so-google_workspace: *indexSettings
so-ids: *indexSettings
so-imperva: *indexSettings
so-import: *indexSettings so-import: *indexSettings
so-infoblox: *indexSettings
so-juniper: *indexSettings
so-kibana: *indexSettings so-kibana: *indexSettings
so-logstash: *indexSettings so-logstash: *indexSettings
so-microsoft: *indexSettings
so-misp: *indexSettings
so-netflow: *indexSettings
so-netscout: *indexSettings
so-o365: *indexSettings
so-okta: *indexSettings
so-osquery: *indexSettings so-osquery: *indexSettings
so-proofpoint: *indexSettings
so-radware: *indexSettings
so-redis: *indexSettings so-redis: *indexSettings
so-snort: *indexSettings
so-snyk: *indexSettings
so-sonicwall: *indexSettings
so-sophos: *indexSettings
so-strelka: *indexSettings so-strelka: *indexSettings
so-syslog: *indexSettings so-syslog: *indexSettings
so-tomcat: *indexSettings
so-zeek: *indexSettings so-zeek: *indexSettings
so-zscaler: *indexSettings

View File

@@ -0,0 +1,21 @@
#/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
{% import_yaml 'elasticsearch/defaults.yaml' as ESCONFIG with context %}
{%- set ES_INDEX_SETTINGS = salt['pillar.get']('elasticsearch:index_settings', default=ESCONFIG.elasticsearch.index_settings, merge=True) %}
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
{%- for index, settings in ES_INDEX_SETTINGS.items() %}
{%- if settings.policy is defined %}
echo
echo "Setting up {{ index }}-logs policy..."
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://{{ NODEIP }}:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
echo
{%- endif %}
{%- endfor %}
echo

View File

@@ -2,6 +2,7 @@
{% import_yaml 'firewall/ports/ports.yaml' as portgroups %} {% import_yaml 'firewall/ports/ports.yaml' as portgroups %}
{% set portgroups = portgroups.firewall.ports %} {% set portgroups = portgroups.firewall.ports %}
{% set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', True) %} {% set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', True) %}
{% from 'idh/opencanary_config.map.jinja' import IDH_PORTGROUPS %}
role: role:
eval: eval:
@@ -576,7 +577,7 @@ role:
portgroups: portgroups:
{% set idh_services = salt['pillar.get']('idh:services', []) %} {% set idh_services = salt['pillar.get']('idh:services', []) %}
{% for service in idh_services %} {% for service in idh_services %}
- {{ portgroups['idh_'~service] }} - {{ IDH_PORTGROUPS['idh_'~service] }}
{% endfor %} {% endfor %}
dockernet: dockernet:
portgroups: portgroups:
@@ -586,4 +587,7 @@ role:
- {{ portgroups.all }} - {{ portgroups.all }}
manager: manager:
portgroups: portgroups:
- {{ portgroups.ssh }} - {{ IDH_PORTGROUPS.openssh }}
standalone:
portgroups:
- {{ IDH_PORTGROUPS.openssh }}

View File

@@ -1,3 +1,31 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.role == 'so-eval' %}
{% set NODE_CONTAINERS = [
'so-curator',
'so-dockerregistry',
'so-elasticsearch',
'so-elastic-fleet',
'so-elastic-fleet-package-registry',
'so-grafana',
'so-influxdb',
'so-kibana',
'so-kratos',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',
'so-strelka-backend',
'so-strelka-manager',
'so-strelka-filestream'
] %}
{% endif %}
{% if GLOBALS.role == 'so-manager' or GLOBALS.role == 'so-standalone' or GLOBALS.role == 'so-managersearch' %}
{% set NODE_CONTAINERS = [ {% set NODE_CONTAINERS = [
'so-curator', 'so-curator',
'so-dockerregistry', 'so-dockerregistry',
@@ -5,7 +33,6 @@
'so-elastic-fleet', 'so-elastic-fleet',
'so-elastic-fleet-package-registry', 'so-elastic-fleet-package-registry',
'so-filebeat', 'so-filebeat',
'so-grafana',
'so-influxdb', 'so-influxdb',
'so-kibana', 'so-kibana',
'so-kratos', 'so-kratos',
@@ -22,3 +49,59 @@
'so-strelka-manager', 'so-strelka-manager',
'so-strelka-filestream' 'so-strelka-filestream'
] %} ] %}
{% endif %}
{% if GLOBALS.role == 'so-searchnode' %}
{% set NODE_CONTAINERS = [
'so-elasticsearch',
'so-filebeat',
'so-logstash',
'so-nginx'
] %}
{% endif %}
{% if GLOBALS.role == 'so-heavynode' %}
{% set NODE_CONTAINERS = [
'so-curator',
'so-elasticsearch',
'so-filebeat',
'so-logstash',
'so-nginx',
'so-redis',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',
'so-strelka-backend',
'so-strelka-manager',
'so-strelka-filestream'
] %}
{% endif %}
{% if GLOBALS.role == 'so-import' %}
{% set NODE_CONTAINERS = [
'so-dockerregistry',
'so-elasticsearch',
'so-elastic-fleet',
'so-elastic-fleet-package-registry',
'so-filebeat',
'so-influxdb',
'so-kibana',
'so-kratos',
'so-nginx',
'so-soc'
] %}
{% endif %}
{% if GLOBALS.role == 'so-receiver' %}
{% set NODE_CONTAINERS = [
'so-filebeat',
'so-logstash',
'so-redis',
] %}
{% endif %}
{% if GLOBALS.role == 'so-idh' %}
{% set NODE_CONTAINERS = [
'so-idh',
] %}
{% endif %}

View File

@@ -1,3 +1,4 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set role = grains.id.split('_') | last %} {% set role = grains.id.split('_') | last %}
{% set translated_pillar_assigned_hostgroups = {} %} {% set translated_pillar_assigned_hostgroups = {} %}
@@ -9,9 +10,15 @@
{% else %} {% else %}
{% set local_portgroups = {} %} {% set local_portgroups = {} %}
{% endif %} {% endif %}
{% set portgroups = salt['defaults.merge'](default_portgroups, local_portgroups, in_place=False) %} {% set portgroups = salt['defaults.merge'](default_portgroups, local_portgroups, in_place=False) %}
{% set defined_portgroups = portgroups %} {% set defined_portgroups = portgroups %}
{% if GLOBALS.role == 'so-idh' %}
{% from 'idh/opencanary_config.map.jinja' import IDH_PORTGROUPS %}
{% do salt['defaults.merge'](defined_portgroups, IDH_PORTGROUPS, in_place=True) %}
{% endif %}
{% set local_hostgroups = {'firewall': {'hostgroups': {}}} %} {% set local_hostgroups = {'firewall': {'hostgroups': {}}} %}
{% set hostgroup_list = salt['cp.list_master'](prefix='firewall/hostgroups') %} {% set hostgroup_list = salt['cp.list_master'](prefix='firewall/hostgroups') %}

View File

@@ -1,11 +1,25 @@
{% set idh_services = salt['pillar.get']('idh:services', []) %} {% set idh_services = salt['pillar.get']('idh:services', []) %}
{% import_yaml "idh/defaults/defaults.yaml" as OPENCANARYCONFIG with context %} {% set IDH_PORTGROUPS = {} %}
{% import_yaml "idh/defaults/defaults.yaml" as IDHCONFIG with context %}
{% for service in idh_services %} {% for service in idh_services %}
{% import_yaml "idh/defaults/" ~ service ~ ".defaults.yaml" as SERVICECONFIG with context %} {% import_yaml "idh/defaults/" ~ service ~ ".defaults.yaml" as SERVICECONFIG with context %}
{% do salt['defaults.merge'](OPENCANARYCONFIG, SERVICECONFIG, in_place=True) %} {% do salt['defaults.merge'](IDHCONFIG, SERVICECONFIG, in_place=True) %}
{% endfor %} {% endfor %}
{% set OPENCANARYCONFIG = salt['pillar.get']('idh:opencanary:config', default=OPENCANARYCONFIG.idh.opencanary.config, merge=True) %} {% set OPENCANARYCONFIG = salt['pillar.get']('idh:opencanary:config', default=IDHCONFIG.idh.opencanary.config, merge=True) %}
{% do OPENCANARYCONFIG.idh.opencanary.config.update({'device.node_id': grains.host}) %} {% set OPENSSH = salt['pillar.get']('idh:openssh', default=IDHCONFIG.idh.openssh, merge=True) %}
{% for service in idh_services %}
{% if service in ["smnp","ntp", "tftp"] %}
{% set proto = 'udp' %}
{% else %}
{% set proto = 'tcp' %}
{% endif %}
{% do IDH_PORTGROUPS.update({'idh_' ~ service: {proto: [OPENCANARYCONFIG[service ~ '.port']]}}) %}
{% endfor %}
{% do IDH_PORTGROUPS.update({'openssh': {'tcp': [OPENSSH.config.port]}}) %}
{% do OPENCANARYCONFIG.update({'device.node_id': grains.host}) %}

View File

@@ -0,0 +1 @@
{{ INFLUXMERGED.buckets | json }}

View File

@@ -0,0 +1 @@
{{ INFLUXMERGED.config | yaml(false) }}

View File

@@ -0,0 +1 @@
header = "Authorization: Token {{ salt['pillar.get']('secrets:influx_token') }}"

View File

@@ -1,167 +1,77 @@
influxdb: influxdb:
config: config:
meta: assets-path: /ui
dir: /var/lib/influxdb/meta bolt-path: /var/lib/influxdb2/influxd.bolt
retention-autocreate: true engine-path: /var/lib/influxdb2/engine
logging-enabled: true feature-flags: ""
data: flux-log-enabled: false
dir: /var/lib/influxdb/data hardening-enabled: true
wal-dir: /var/lib/influxdb/wal http-bind-address: :8086
wal-fsync-delay: 0s http-idle-timeout: 0
index-version: inmem http-read-header-timeout: 10s
race-logging-enabled: false http-read-timeout: 0
query-log-enabled: true http-write-timeout: 0
validate-keys: false influxql-max-select-buckets: 0
cache-max-memory-size: 1g influxql-max-select-point: 0
cache-snapshot-memory-size: 25m influxql-max-select-series: 0
cache-snapshot-write-cold-duration: 10m instance-id: ""
compact-full-write-cold-duration: 4h log-level: info
max-concurrent-compactions: 0 metrics-disabled: true
compact-throughput: 48m no-tasks: false
compact-throughput-burst: 48m pprof-disabled: true
max-index-log-file-size: 1m query-concurrency: 100
max-series-per-database: 1000000 query-initial-memory-bytes: 1073741824
max-values-per-tag: 100000 query-max-memory-bytes: 107374182400
tsm-use-madv-willneed: false query-memory-bytes: 1073741824
coordinator: query-queue-size: 500
write-timeout: 10s reporting-disabled: true
max-concurrent-queries: 0 secret-store: bolt
query-timeout: 0s session-length: 10080
log-queries-after: 0s session-renew-disabled: false
max-select-point: 0 sqlite-path: /var/lib/influxdb2/influxd.sqlite
max-select-series: 0 storage-cache-max-memory-size: 1073741824
max-select-buckets: 0 storage-cache-snapshot-memory-size: 26214400
retention: storage-cache-snapshot-write-cold-duration: 10m0s
enabled: true storage-compact-full-write-cold-duration: 4h0m0s
check-interval: 30m storage-compact-throughput-burst: 50331648
shard-precreation: storage-max-concurrent-compactions: 0
enabled: true storage-max-index-log-file-size: 1048576
check-interval: 10m storage-no-validate-field-size: false
advance-period: 30m storage-retention-check-interval: 30m0s
monitor: storage-series-file-max-concurrent-snapshot-compactions: 0
store-enabled: true storage-series-id-set-cache-size: 100
store-database: _internal storage-shard-precreator-advance-period: 30m0s
store-interval: 10s storage-shard-precreator-check-interval: 10m0s
http: storage-tsm-use-madv-willneed: false
enabled: true storage-validate-keys: false
flux-enabled: true storage-wal-fsync-delay: 0s
bind-address: ':8086' storage-wal-max-concurrent-writes: 0
auth-enabled: false storage-wal-max-write-delay: 10m
realm: InfluxDB storage-write-timeout: 10s
log-enabled: false store: disk
suppress-write-log: false tls-cert: /conf/influxdb.crt
access-log-path: '' tls-key: /conf/influxdb.key
access-log-status-filters: [] tls-min-version: 1.2
write-tracing: false tls-strict-ciphers: true
pprof-enabled: true tracing-type: ""
debug-pprof-enabled: false ui-disabled: false
https-enabled: true vault-addr: ""
https-certificate: /etc/ssl/influxdb.crt vault-cacert: ""
https-private-key: /etc/ssl/influxdb.key vault-capath: ""
shared-secret: '' vault-client-cert: ""
max-row-limit: 0 vault-client-key: ""
max-connection-limit: 0 vault-client-timeout: 60s
unix-socket-enabled: false vault-max-retries: 2
bind-socket: /var/run/influxdb.sock vault-skip-verify: false
max-body-size: 25000000 vault-tls-server-name: ""
max-concurrent-write-limit: 0 vault-token: ""
max-enqueued-write-limit: 0 buckets:
enqueued-write-timeout: 0
logging:
format: auto
level: info
suppress-logo: false
subscriber:
enabled: true
http-timeout: 30s
insecure-skip-verify: false
ca-certs: ''
write-concurrency: 40
write-buffer-size: 1000
graphite:
enabled: false
database: graphite
retention-policy: ''
bind-address: ':2003'
protocol: tcp
consistency-level: one
batch-size: 5000
batch-pending: 10
batch-timeout: 1s
udp-read-buffer: 0
separator: '.'
tags: []
templates: []
collectd:
enabled: false
bind-address: ':25826'
database: collectd
retention-policy: ''
typesdb: /usr/local/share/collectd
security-level: none
auth-file: /etc/collectd/auth_file
batch-size: 5000
bath-pending: 10
batch-timeout: 10s
read-buffer: 0
parse-multivalue-plugin: split
opentsdb:
enabled: false
bind-address: ':4242'
database: opentsdb
retention-policy: ''
consistency-level: one
tls-enabled: false
certificate: /etc/ssl/influxdb.pem
log-point-errors: true
batch-size: 1000
batch-pending: 5
bath-timeout: 1s
udp:
enabled: false
bind-address: ':8089'
database: udp
retention-policy: ''
precision: ''
batch-size: 5000
batch-pending: 10
batch-timeout: 1s
read-buffer: 0
continuous_queries:
enabled: true
log-enabled: true
query-stats-enabled: false
run-interval: 1s
tls:
ciphers:
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
- TLS_RSA_WITH_AES_128_CBC_SHA
- TLS_RSA_WITH_AES_128_CBC_SHA256
- TLS_RSA_WITH_AES_128_GCM_SHA256
- TLS_RSA_WITH_AES_256_CBC_SHA
- TLS_RSA_WITH_AES_256_GCM_SHA384
min-version: tls1.2
max-version: tls1.2
retention_policies:
so_short_term: so_short_term:
default: True duration: 2592000
duration: 30d shard_duration: 86400
shard_duration: 1d
so_long_term: so_long_term:
default: False duration: 0
duration: 0d shard_duration: 604800
shard_duration: 7d
downsample: downsample:
so_long_term: so_long_term:
resolution: 5m resolution: 5m

View File

@@ -1,26 +0,0 @@
{%- import_yaml 'influxdb/defaults.yaml' as INFLUXDEFAULTS %}
{%- set INFLUXMERGED = salt['pillar.get']('influxdb', default=INFLUXDEFAULTS.influxdb, merge=true) %}
{%- for header in INFLUXMERGED.config.keys() %}
{%- if header in ['graphite', 'collectd', 'opentsdb', 'udp'] %}
[[{{header}}]]
{%- else %}
[{{header}}]
{%- endif %}
{%- for k, v in INFLUXMERGED.config[header].items() %}
{#- is v a list? #}
{%- if v is iterable and (v is not string and v is not mapping) %}
{{k}} = [
{%- for li in v %}
"{{li}}",
{%- endfor %}
]
{%- elif v is string %}
{{k}} = "{{v}}"
{%- elif v is boolean %}
{{k}} = {{v|string|lower}}
{%- else %}
{{k}} = {{v}}
{%- endif %}
{%- endfor %}
{%- endfor %}

View File

@@ -2,15 +2,11 @@
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %} {% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'influxdb/map.jinja' import INFLUXMERGED %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-eval', 'so-import'] %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-eval', 'so-import'] %}
{% import_yaml 'influxdb/defaults.yaml' as default_settings %} {% set PASSWORD = salt['pillar.get']('secrets:influx_pass') %}
{% set influxdb = salt['grains.filter_by'](default_settings, default='influxdb', merge=salt['pillar.get']('influxdb', {})) %} {% set TOKEN = salt['pillar.get']('secrets:influx_token') %}
{% from 'salt/map.jinja' import PYTHON3INFLUX with context %}
{% from 'salt/map.jinja' import PYTHONINFLUXVERSION with context %}
{% set PYTHONINFLUXVERSIONINSTALLED = salt['cmd.run']("python3 -c \"exec('try:import influxdb; print (influxdb.__version__)\\nexcept:print(\\'Module Not Found\\')')\"", python_shell=True) %}
include: include:
- salt.minion - salt.minion
@@ -19,7 +15,7 @@ include:
# Influx DB # Influx DB
influxconfdir: influxconfdir:
file.directory: file.directory:
- name: /opt/so/conf/influxdb/etc - name: /opt/so/conf/influxdb
- makedirs: True - makedirs: True
influxlogdir: influxlogdir:
@@ -37,11 +33,43 @@ influxdbdir:
influxdbconf: influxdbconf:
file.managed: file.managed:
- name: /opt/so/conf/influxdb/etc/influxdb.conf - name: /opt/so/conf/influxdb/config.yaml
- source: salt://influxdb/config.yaml.jinja
- user: 939 - user: 939
- group: 939 - group: 939
- template: jinja - template: jinja
- source: salt://influxdb/etc/influxdb.conf.jinja - defaults:
INFLUXMERGED: {{ INFLUXMERGED }}
influxdbbucketsconf:
file.managed:
- name: /opt/so/conf/influxdb/buckets.json
- source: salt://influxdb/buckets.json.jinja
- user: 939
- group: 939
- template: jinja
- defaults:
INFLUXMERGED: {{ INFLUXMERGED }}
influxdb-templates:
file.recurse:
- name: /opt/so/conf/influxdb/templates
- source: salt://influxdb/templates
- user: 939
- group: 939
- template: jinja
- clean: True
- defaults:
INFLUXMERGED: {{ INFLUXMERGED }}
influxdb_curl_config:
file.managed:
- name: /opt/so/conf/influxdb/curl.config
- source: salt://influxdb/curl.config.jinja
- mode: 600
- template: jinja
- show_changes: False
- makedirs: True
so-influxdb: so-influxdb:
docker_container.running: docker_container.running:
@@ -51,13 +79,20 @@ so-influxdb:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }} - ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }}
- environment: - environment:
- INFLUXD_CONFIG_PATH=/conf
- INFLUXDB_HTTP_LOG_ENABLED=false - INFLUXDB_HTTP_LOG_ENABLED=false
- DOCKER_INFLUXDB_INIT_MODE=setup
- DOCKER_INFLUXDB_INIT_USERNAME=so
- DOCKER_INFLUXDB_INIT_PASSWORD={{ PASSWORD }}
- DOCKER_INFLUXDB_INIT_ORG=Security Onion
- DOCKER_INFLUXDB_INIT_BUCKET=telegraf/so_short_term
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN={{ TOKEN }}
- binds: - binds:
- /opt/so/log/influxdb/:/log:rw - /opt/so/log/influxdb/:/log:rw
- /opt/so/conf/influxdb/etc/influxdb.conf:/etc/influxdb/influxdb.conf:ro - /opt/so/conf/influxdb/config.yaml:/conf/config.yaml:ro
- /nsm/influxdb:/var/lib/influxdb:rw - /nsm/influxdb:/var/lib/influxdb2:rw
- /etc/pki/influxdb.crt:/etc/ssl/influxdb.crt:ro - /etc/pki/influxdb.crt:/conf/influxdb.crt:ro
- /etc/pki/influxdb.key:/etc/ssl/influxdb.key:ro - /etc/pki/influxdb.key:/conf/influxdb.key:ro
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-influxdb'].port_bindings %} {% for BINDING in DOCKER.containers['so-influxdb'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
@@ -74,6 +109,14 @@ append_so-influxdb_so-status.conf:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf
- text: so-influxdb - text: so-influxdb
influxdb-setup:
cmd.run:
- name: /usr/sbin/so-influxdb-manage setup &>> /opt/so/log/influxdb/setup.log
- require:
- file: influxdbbucketsconf
- file: influxdb_curl_config
- docker_container: so-influxdb
# Install cron job to determine size of influxdb for telegraf # Install cron job to determine size of influxdb for telegraf
get_influxdb_size: get_influxdb_size:
cron.present: cron.present:
@@ -85,71 +128,6 @@ get_influxdb_size:
- month: '*' - month: '*'
- dayweek: '*' - dayweek: '*'
# We have to make sure the influxdb module is the right version prior to state run since reload_modules is bugged
{% if PYTHONINFLUXVERSIONINSTALLED == PYTHONINFLUXVERSION %}
wait_for_influxdb:
http.query:
- name: 'https://{{GLOBALS.manager}}:8086/query?q=SHOW+DATABASES'
- ssl: True
- verify_ssl: False
- status: 200
- timeout: 10
- retry:
attempts: 20
interval: 5
- require:
- docker_container: so-influxdb
telegraf_database:
influxdb_database.present:
- name: telegraf
- database: telegraf
- ssl: True
- verify_ssl: /etc/pki/ca.crt
- cert: ['/etc/pki/influxdb.crt', '/etc/pki/influxdb.key']
- influxdb_host: {{ GLOBALS.manager }}
- require:
- docker_container: so-influxdb
- http: wait_for_influxdb
{% for rp in influxdb.retention_policies.keys() %}
{{rp}}_retention_policy:
influxdb_retention_policy.present:
- name: {{rp}}
- database: telegraf
- duration: {{influxdb.retention_policies[rp].duration}}
- shard_duration: {{influxdb.retention_policies[rp].shard_duration}}
- replication: 1
- default: {{influxdb.retention_policies[rp].get('default', 'False')}}
- ssl: True
- verify_ssl: /etc/pki/ca.crt
- cert: ['/etc/pki/influxdb.crt', '/etc/pki/influxdb.key']
- influxdb_host: {{ GLOBALS.manager }}
- require:
- docker_container: so-influxdb
- influxdb_database: telegraf_database
- file: influxdb_retention_policy.present_patch
{% endfor %}
{% for dest_rp in influxdb.downsample.keys() %}
{% for measurement in influxdb.downsample[dest_rp].get('measurements', []) %}
so_downsample_{{measurement}}_cq:
influxdb_continuous_query.present:
- name: so_downsample_{{measurement}}_cq
- database: telegraf
- query: SELECT mean(*) INTO "{{dest_rp}}"."{{measurement}}" FROM "{{measurement}}" GROUP BY time({{influxdb.downsample[dest_rp].resolution}}),*
- ssl: True
- verify_ssl: /etc/pki/ca.crt
- cert: ['/etc/pki/influxdb.crt', '/etc/pki/influxdb.key']
- influxdb_host: {{ GLOBALS.manager }}
- require:
- docker_container: so-influxdb
- influxdb_database: telegraf_database
- file: influxdb_continuous_query.present_patch
{% endfor %}
{% endfor %}
{% endif %}
{% endif %} {% endif %}
{% else %} {% else %}

View File

@@ -1,9 +1,2 @@
{% import_yaml 'influxdb/defaults.yaml' as INFLUXDB %} {%- import_yaml 'influxdb/defaults.yaml' as INFLUXDEFAULTS %}
{% set measurements = salt['cmd.shell']('docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -database telegraf -execute "show measurements" 2> /root/measurement_query.log | jq -r .results[0].series[0].values[]?[0] 2>> /root/measurement_query.log', shell='/bin/bash') %} {%- set INFLUXMERGED = salt['pillar.get']('influxdb', default=INFLUXDEFAULTS.influxdb, merge=true) %}
{% if measurements|length > 0 %}
{% do INFLUXDB.influxdb.downsample.so_long_term.update('measurements': [])%}
{% for measurement in measurements.splitlines() %}
{% do INFLUXDB.influxdb.downsample.so_long_term.measurements.append(measurement)%}
{% endfor %}
{% endif %}

View File

@@ -1,362 +1,355 @@
influxdb: influxdb:
config: config:
meta: assets-path:
logging-enabled: description: Path to the InfluxDB user interface assets located inside the so-influxdb container.
description: Enable InfluxDB meta server logging.
global: True
helpLink: influxdb.html
data:
wal-fsync-delay:
description: The amount of time that a write will wait before fsyncing.
global: True
helpLink: influxdb.html
index-version:
description: The type of shard index to use for new shards.
global: True
helpLink: influxdb.html
trace-logging-enabled:
description: Trace logging provides more verbose output around the tsm engine.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
query-log-enabled: bolt-path:
description: Whether queries should be logged before execution. description: Path to the bolt DB file located inside the so-influxdb container.
global: True
helpLink: influxdb.html
validate-keys:
description: Validates incoming writes to ensure keys only have valid unicode characters.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
cache-max-memory-size: engine-path:
description: The maximum size a shard's cache can reach before it starts rejecting writes. description: Path to the engine directory located inside the so-influxdb container. This directory stores the time series data.
global: True
helpLink: influxdb.html
cache-snapshot-memory-size:
description: The size at which the engine will snapshot the cache and write it to a TSM file, freeing up memory.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
cache-snapshot-write-cold-duration: feature-flags:
description: The length of time at which the engine will snapshot the cache and write it to a new TSM file if the shard hasn't received writes or deletes. description: List of key=value flags to enable.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
compact-full-write-cold-duration: flux-log-enabled:
description: The duration at which the engine will compact all TSM files in a shard if it hasn't received a write or delete. description: Controls whether detailed flux query logging is enabled.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
max-concurrent-compactions: hardening-enabled:
description: The maximum number of concurrent full and level compactions that can run at one time. description: If true, enforces outbound connections from the InfluxDB process must never attempt to reach an internal, private network address.
global: True
helpLink: influxdb.html
compact-throughput:
description: The rate limit in bytes per second that we will allow TSM compactions to write to disk.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
compact-throughput-burst: http-bind-address:
description: The rate limit in bytes per second that we will allow TSM compactions to write to disk. description: The URL and port on which InfluxDB will listen for new connections.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
max-index-log-file-size: http-idle-timeout:
description: The threshold, in bytes, when an index write-ahead log file will compact into an index file. description: Keep-alive timeout while a connection waits for new requests. A value of 0 is the same as no timeout enforced.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
max-series-per-database: http-read-header-timeout:
description: The maximum series allowed per database before writes are dropped. description: The duration to wait for a request header before closing the connection. A value of 0 is the same as no timeout enforced.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
max-values-per-tag: http-read-timeout:
description: The maximum number of tag values per tag that are allowed before writes are dropped. description: The duration to wait for the request to be fully read before closing the connection. A value of 0 is the same as no timeout enforced.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
tsm-use-madv-willneed: http-write-timeout:
description: If true, then the mmap advise value MADV_WILLNEED will be provided to the kernel with respect to TSM files. description: The duration to wait for the response to be fully written before closing the connection. A value of 0 is the same as no timeout enforced.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
coordinator: influxql-max-select-buckets:
write-timeout: description: Maximum number of group-by clauses in a SELECT statement. A value of 0 is the same as unlimited.
description: The default time a write request will wait until a "timeout" error is returned to the caller.
global: True
helpLink: influxdb.html
max-concurrent-queries:
description: The maximum number of concurrent queries allowed to be executing at one time.
global: True
helpLink: influxdb.html
query-timeout:
description: The maximum time a query will is allowed to execute before being killed by the system.
global: True
helpLink: influxdb.html
log-queries-after:
description: The time threshold when a query will be logged as a slow query.
global: True
helpLink: influxdb.html
max-select-point:
description: The maximum number of points a SELECT can process.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
max-select-series: influxql-max-select-point:
description: The maximum number of series a SELECT can run. description: Maximum number of points that can be queried in a SELECT statement. A value of 0 is the same as unlimited.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
max-select-buckets: influxql-max-select-series:
description: The maxium number of group by time bucket a SELECT can create. description: Maximum number of series that can be returned in a SELECT statement. A value of 0 is the same as unlimited.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
retention: instance-id:
enabled: description: Unique instance ID for this server, to avoid collisions in a replicated cluster.
description: Determines whether retention policy enforcement enabled.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
check-interval: log-level:
description: The interval of time when retention policy enforcement checks run. description: The log level to use for outputting log statements. Allowed values are debug, info, or error.
global: True global: True
advanced: false
regex: ^(info|debug|error)$
helpLink: influxdb.html helpLink: influxdb.html
shard-precreation: metrics-disabled:
enabled: description: If true, the HTTP endpoint that exposes internal InfluxDB metrics will be inaccessible.
description: Determines whether shard pre-creation service is enabled.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
check-interval: no-tasks:
description: The interval of time when the check to pre-create new shards runs. description: If true, the task system will not process any queued tasks. Useful for troubleshooting startup problems.
global: True
helpLink: influxdb.html
advance-period:
description: The default period ahead of the endtime of a shard group that its successor group is created.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
monitor: pprof-disabled:
store-enabled: description: If true, the profiling data HTTP endpoint will be inaccessible.
description: Whether to record statistics internally.
global: True
helpLink: influxdb.html
store-database:
description: The destination database for recorded statistics.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
store-interval: query-concurrency:
description: The interval at which to record statistics. description: Maximum number of queries to execute concurrently. A value of 0 is the same as unlimited.
global: True
helpLink: influxdb.html
http:
enabled:
description: Determines whether HTTP endpoint is enabled.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
flux-enabled: query-initial-memory-bytes:
description: Determines whether the Flux query endpoint is enabled. description: The initial number of bytes of memory to allocate for a new query.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
bind-address: query-max-memory-bytes:
description: The bind address used by the HTTP service. description: The number of bytes of memory to allocate to all running queries. Should typically be the query bytes times the max concurrent queries.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
auth-enabled: query-memory-bytes:
description: Determines whether user authentication is enabled over HTTP/HTTPS. description: Maximum number of bytes of memory to allocate to a query.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
realm: query-queue-size:
description: The default realm sent back when issuing a basic auth challenge. description: Maximum number of queries that can be queued at one time. If this value is reached, new queries will not be queued. A value of 0 is the same as unlimited.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
log-enabled: reporting-disabled:
description: Determines whether HTTP request logging is enabled. description: If true, prevents InfluxDB from sending telemetry updates to InfluxData's servers.
global: True
helpLink: influxdb.html
suppress-write-log:
description: Determines whether the HTTP write request logs should be suppressed when the log is enabled.
global: True
helpLink: influxdb.html
access-log-path:
description: Path for http access logs.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
access-log-status-filters: secret-store:
description: Filters which requests should be logged. description: Determines the type of storage used for secrets. Allowed values are bolt or vault.
global: True
advanced: True
regex: ^(bolt|vault)$
helpLink: influxdb.html
session-length:
description: Number of minutes that a user login session can remain authenticated.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
write-tracing: session-renew-disabled:
description: Determines whether detailed write logging is enabled. description: If true, user login sessions will renew after each request.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
pprof-enabled: sqlite-path:
description: Determines whether the pprof endpoint is enabled. description: Path to the Sqlite3 database inside the container. This database stored user data and other information about the database.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
debug-pprof-enabled: storage-cache-max-memory-size:
description: Determines whether the pprof endpoint is enabled in debug mode. description: Maximum number of bytes to allocate to cache data per shard. If exceeded, new data writes will be rejected.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
https-enabled: storage-cache-snapshot-memory-size:
description: Determines whether HTTPS is enabled. description: Number of bytes to allocate to cache snapshot data. When the cache reaches this size, it will be written to disk to increase available memory.
global: True
helpLink: influxdb.html
https-certificate:
description: The SSL certificate to use when HTTPS is enabled.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
https-private-key: storage-cache-snapshot-write-cold-duration:
description: Use a separate private key location. description: Duration between snapshot writes to disk when the shard data hasn't been modified.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
shared-secret: storage-compact-full-write-cold-duration:
description: The JWT auth shared secret to validate requests using JSON web tokens. description: Duration between shard compactions when the shard data hasn't been modified.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
max-row-limit: storage-compact-throughput-burst:
description: The default chunk size for result sets that should be chunked. description: Maximum throughput (number of bytes per second) that compactions be written to disk.
global: True
helpLink: influxdb.html
max-connection-limit:
description: The maximum number of HTTP connections that may be open at once.
global: True
helpLink: influxdb.html
unix-socket-enabled:
description: Enable http service over unix domain socket.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
bind-socket: storage-max-concurrent-compactions:
description: The path of the unix domain socket. description: Maximum number of concurrent compactions. A value of 0 is the same as half the available CPU processors (procs).
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
max-body-size: storage-max-index-log-file-size:
description: The maximum size of a client request body, in bytes. description: Maximum number of bytes of a write-ahead log (WAL) file before it will be compacted into an index on disk.
global: True
helpLink: influxdb.html
max-concurrent-write-limit:
description: The maximum number of writes processed concurrently.
global: True
helpLink: influxdb.html
max-enqueued-write-limit:
description: The maximum number of writes queued for processing.
global: True
helpLink: influxdb.html
enqueued-write-timeout:
description: The maximum duration for a write to wait in the queue to be processed.
global: True
helpLink: influxdb.html
logging:
format:
description: Determines which log encoder to use for logs.
global: True
helpLink: influxdb.html
level:
description: Determines which level of logs will be emitted.
global: True
helpLink: influxdb.html
suppress-logo:
description: Suppresses the logo output that is printed when the program is started.
global: True
helpLink: influxdb.html
subscriber:
enabled:
description: Determines whether the subscriber service is enabled.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
http-timeout: storage-no-validate-field-size:
description: The default timeout for HTTP writes to subscribers. description: If true, incoming requests will skip the field size validation.
global: True
helpLink: influxdb.html
insecure-skip-verify:
description: Allows insecure HTTPS connections to subscribers.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
ca-certs: storage-retention-check-interval:
description: The path to the PEM encoded CA certs file. description: Interval between reviewing each bucket's retention policy and the age of the associated data.
global: True global: True
advanced: True advanced: True
helpLink: influxdb.html helpLink: influxdb.html
write-concurrency: storage-series-file-max-concurrent-snapshot-compactions:
description: he number of writer goroutines processing the write channel. description: Maximum number of concurrent snapshot compactions across all database partitions.
global: True global: True
advanced: True
helpLink: influxdb.html helpLink: influxdb.html
write-buffer-size: storage-series-id-set-cache-size:
description: The number of in-flight writes buffered in the write channel. description: Maximum size of the series cache results. Higher values may increase performance for repeated data lookups.
global: True global: True
advanced: True
helpLink: influxdb.html helpLink: influxdb.html
continuous_queries: storage-shard-precreator-advance-period:
enabled: description: The duration before a successor shard group is created after the end-time has been reached.
description: Determines whether the continuous query service is enabled.
global: True global: True
advanced: True
helpLink: influxdb.html helpLink: influxdb.html
log-enabled: storage-shard-precreator-check-interval:
description: Controls whether queries are logged when executed by the CQ service. description: Interval between checking if new shards should be created.
global: True global: True
advanced: True
helpLink: influxdb.html helpLink: influxdb.html
query-stats-enabled: storage-tsm-use-madv-willneed:
description: Controls whether queries are logged to the self-monitoring data store. description: If true, InfluxDB will manage TSM memory paging.
global: True global: True
advanced: True
helpLink: influxdb.html helpLink: influxdb.html
run-interval: storage-validate-keys:
description: Interval for how often continuous queries will be checked if they need to run. description: If true, validates incoming requests for supported characters.
global: True global: True
advanced: True
helpLink: influxdb.html helpLink: influxdb.html
tls: storage-wal-fsync-delay:
ciphers: description: Duration to wait before calling fsync. Useful for handling conflicts on slower disks.
description: Determines the available set of cipher suites.
global: True global: True
advanced: True
helpLink: influxdb.html helpLink: influxdb.html
min-version: storage-wal-max-concurrent-writes:
description: Minimum version of the tls protocol that will be negotiated. description: Maximum number of concurrent write-ahead log (WAL) writes to disk. The value of 0 is the same as CPU processors (procs) x 2.
global: True global: True
advanced: True
helpLink: influxdb.html helpLink: influxdb.html
max-version: storage-wal-max-write-delay:
description: Maximum version of the tls protocol that will be negotiated. description: Maximum duration to wait before writing the write-ahead log (WAL) to disk, when the concurrency limit has been exceeded. A value of 0 is the same as no timeout.
global: True global: True
advanced: True
helpLink: influxdb.html helpLink: influxdb.html
retention_policies: storage-write-timeout:
description: Maximum time to wait for a write-ahead log (WAL) to write to disk before aborting.
global: True
advanced: True
helpLink: influxdb.html
store:
description: The type of data store to use for HTTP resources. Allowed values are disk or memory. Memory should not be used for production Security Onion installations.
global: True
advanced: True
regex: ^(disk|memory)$
helpLink: influxdb.html
tls-cert:
description: The container path to the certificate to use for TLS encryption of the HTTP requests and responses.
global: True
advanced: True
helpLink: influxdb.html
tls-key:
description: The container path to the certificate key to use for TLS encryption of the HTTP requests and responses.
global: True
advanced: True
helpLink: influxdb.html
tls-min-version:
description: The minimum supported version of TLS to be enforced on all incoming HTTP requests.
global: True
advanced: True
helpLink: influxdb.html
tls-strict-ciphers:
description: If true, the allowed ciphers used with TLS connections are ECDHE_RSA_WITH_AES_256_GCM_SHA384, ECDHE_RSA_WITH_AES_256_CBC_SHA, RSA_WITH_AES_256_GCM_SHA384, or RSA_WITH_AES_256_CBC_SHA.
global: True
advanced: True
helpLink: influxdb.html
tracing-type:
description: The tracing format for debugging purposes. Allowed values are log or jaeger, or leave blank to disable tracing.
global: True
advanced: True
helpLink: influxdb.html
ui-disabled:
description: If true, the InfluxDB HTTP user interface will be disabled. This will prevent use of the included InfluxDB dashboard visualizations.
global: True
advanced: True
helpLink: influxdb.html
vault-addr:
description: Vault server address.
global: True
advanced: True
helpLink: influxdb.html
vault-cacert:
description: Path to the Vault's single certificate authority certificate file within the container.
global: True
advanced: True
helpLink: influxdb.html
vault-capath:
description: Path to the Vault's certificate authority directory within the container.
global: True
advanced: True
helpLink: influxdb.html
vault-client-cert:
description: Vault client certificate path within the container.
global: True
advanced: True
helpLink: influxdb.html
vault-client-key:
description: Vault client certificate key path within the container.
global: True
advanced: True
helpLink: influxdb.html
vault-client-timeout:
description: Duration to wait for a response from the Vault server before aborting.
global: True
advanced: True
helpLink: influxdb.html
vault-max-retries:
description: Maximum number of retries when attempting to contact the Vault server. A value of 0 is the same as disabling retries.
global: True
advanced: True
helpLink: influxdb.html
vault-skip-verify:
description: Skip certification validation of the Vault server.
global: True
advanced: True
helpLink: influxdb.html
vault-tls-server-name:
description: SNI host to specify when using TLS to connect to the Vault server.
global: True
advanced: True
helpLink: influxdb.html
vault-token:
description: Vault token used for authentication.
global: True
advanced: True
helpLink: influxdb.html
buckets:
so_short_term: so_short_term:
duration: duration:
description: Amount of time to keep short term data. description: Amount of time (in seconds) to keep short term data.
global: True global: True
helpLink: grafana.html#data helpLink: influxdb.html
shard_duration: shard_duration:
description: Time range description: Amount of the time (in seconds) range covered by the shard group.
global: True global: True
helpLink: grafana.html#data helpLink: influxdb.html
so_long_term: so_long_term:
duration: duration:
description: Amount of time to keep long term downsampled data. description: Amount of time (in seconds) to keep long term downsampled data.
global: True global: True
helpLink: grafana.html#data helpLink: influxdb.html
shard_duration: shard_duration:
description: Amount of the time range covered by the shard group. description: Amount of the time (in seconds) range covered by the shard group.
global: True global: True
helpLink: grafana.html#data helpLink: influxdb.html
downsample: downsample:
so_long_term: so_long_term:
resolution: resolution:
description: Amount of time to turn into a single data point. description: Amount of time to turn into a single data point.
global: True global: True
helpLink: grafana.html#data helpLink: influxdb.html

View File

@@ -0,0 +1,18 @@
[{
"apiVersion": "influxdata.com/v2alpha1",
"kind": "CheckDeadman",
"metadata": {
"name": "alarm-deadman"
},
"spec": {
"description": "Data has not been received from Telegraf for an extended period.",
"every": "1m0s",
"level": "CRIT",
"name": "Telegraf Data Outage",
"query": "from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")",
"staleTime": "15m0s",
"status": "active",
"statusMessageTemplate": "Check: ${ r._check_name } is: ${ r._level }",
"timeSince": "2m0s"
}
}]

View File

@@ -0,0 +1,21 @@
[{
"apiVersion": "influxdata.com/v2alpha1",
"kind": "CheckThreshold",
"metadata": {
"name": "alarm-logstash-eps"
},
"spec": {
"every": "1m0s",
"name": "Logstash EPS at 0",
"query": "from(bucket: \"telegraf/so_short_term\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"logstash_events\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"in\")\n |\u003e aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |\u003e yield(name: \"mean\")",
"status": "active",
"statusMessageTemplate": "Check: ${ r._check_name } is: ${ r._level }",
"thresholds": [
{
"level": "CRIT",
"type": "lesser",
"value": 1
}
]
}
}]

View File

@@ -0,0 +1,32 @@
[{
"apiVersion": "influxdata.com/v2alpha1",
"kind": "CheckThreshold",
"metadata": {
"name": "alarm-nsm-disk"
},
"spec": {
"description": "Percent used space on the root partition of at least one node has exceeded the alarm threshold.",
"every": "1m0s",
"name": "NSM Disk High Usage",
"query": "from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> aggregateWindow(every: 1m, fn: max, createEmpty: false)\n |> yield(name: \"max\")",
"status": "active",
"statusMessageTemplate": "Check: ${ r._check_name } is: ${ r._level }",
"thresholds": [
{
"level": "CRIT",
"type": "greater",
"value": 95
},
{
"level": "INFO",
"type": "greater",
"value": 85
},
{
"level": "WARN",
"type": "greater",
"value": 90
}
]
}
}]

View File

@@ -0,0 +1,32 @@
[{
"apiVersion": "influxdata.com/v2alpha1",
"kind": "CheckThreshold",
"metadata": {
"name": "alarm-root-disk"
},
"spec": {
"description": "Percent used space on the root partition of at least one node has exceeded the alarm threshold.",
"every": "1m0s",
"name": "Root Disk High Usage",
"query": "from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> aggregateWindow(every: 1m, fn: max, createEmpty: false)\n |> yield(name: \"max\")",
"status": "active",
"statusMessageTemplate": "Check: ${ r._check_name } is: ${ r._level }",
"thresholds": [
{
"level": "CRIT",
"type": "greater",
"value": 95
},
{
"level": "INFO",
"type": "greater",
"value": 85
},
{
"level": "WARN",
"type": "greater",
"value": 90
}
]
}
}]

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
[{"apiVersion":"influxdata.com/v2alpha1","kind":"Task","metadata":{"name":"task-downsample"},"spec":{"every":"{{ INFLUXMERGED.downsample.so_long_term.resolution }}","name":"Downsample","query":"import \"types\"\n\n\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: -task.every)\n |> filter(fn: (r) => types.isNumeric(v: r._value))\n |> aggregateWindow(every: task.every, fn: mean)\n |> to(bucket: \"telegraf/so_long_term\")"}}]

View File

@@ -0,0 +1 @@
[{"apiVersion":"influxdata.com/v2alpha1","kind":"Variable","metadata":{"name":"variable-container"},"spec":{"language":"flux","name":"Container","query":"import \"array\"\n\ndynamic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"host\"] == v.Host)\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> keep(columns: [\"container_name\"])\n |> rename(fn: (column) => \"_value\")\n |> unique()\n\nstatic = array.from(\n rows: [\n {\n _value: \"(All)\",\n },\n ],\n)\n\nunion(tables: [static, dynamic])","selected":["cool_gauss"],"type":"query"}}]

View File

@@ -0,0 +1 @@
[{"apiVersion":"influxdata.com/v2alpha1","kind":"Variable","metadata":{"name":"variable-host"},"spec":{"language":"flux","name":"Host","query":"import \"influxdata/influxdb/schema\"\nimport \"array\"\n\ndynamic = schema.tagValues(bucket: \"telegraf/so_short_term\", tag: \"host\")\n\nstatic = array.from(\n rows: [\n {\n _value: \"(All)\",\n },\n ],\n)\n\nunion(tables: [static, dynamic])","selected":["dev"],"type":"query"}}]

View File

@@ -0,0 +1 @@
[{"apiVersion":"influxdata.com/v2alpha1","kind":"Variable","metadata":{"name":"variable-role"},"spec":{"language":"flux","name":"Role","query":"import \"influxdata/influxdb/schema\"\nimport \"array\"\n\ndynamic = schema.tagValues(bucket: \"telegraf/so_short_term\", tag: \"role\")\n\nstatic = array.from(\n rows: [\n {\n _value: \"(All)\",\n },\n ],\n)\n\nunion(tables: [static, dynamic])","selected":["standalone"],"type":"query"}}]

View File

@@ -63,7 +63,7 @@ update() {
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))' IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
for i in "${LINES[@]}"; do for i in "${LINES[@]}"; do
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.6.1" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ") RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.6.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
done done

View File

@@ -1 +1 @@
{"attributes": {"buildNum": 39457,"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.6.1","id": "8.6.1","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} {"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.6.2","id": "8.6.2","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}

File diff suppressed because one or more lines are too long

View File

@@ -1,7 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set REDIS_NODES = [] %} {% set REDIS_NODES = [] %}
{% set LOGSTASH_NODES = [] %} {% set LOGSTASH_NODES = [] %}
{% set node_data = salt['pillar.get']('logstash:nodes') %} {% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
{% for node_type, node_details in node_data.items() | sort %} {% for node_type, node_details in node_data.items() | sort %}
{% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch'] %} {% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch'] %}

View File

@@ -1,5 +1,9 @@
{%- from 'vars/globals.map.jinja' import GLOBALS %} {%- from 'vars/globals.map.jinja' import GLOBALS %}
{%- set role = grains.id.split('_') | last %} {%- set role = grains.id.split('_') | last %}
{%- set influxpass = salt['pillar.get']('secrets:influx_pass') %}
{%- set influxauth = ('so:' + influxpass) | base64_encode %}
worker_processes auto; worker_processes auto;
error_log /var/log/nginx/error.log; error_log /var/log/nginx/error.log;
pid /run/nginx.pid; pid /run/nginx.pid;
@@ -179,17 +183,18 @@ http {
autoindex_localtime on; autoindex_localtime on;
} }
location /grafana/ { location /influxdb/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break; rewrite /influxdb/api/(.*) /api/$1 break;
proxy_pass http://{{ GLOBALS.manager_ip }}:3000/; proxy_pass https://{{ GLOBALS.manager_ip }}:8086/;
proxy_read_timeout 90; proxy_read_timeout 300;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy ""; proxy_set_header Proxy "";
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_cookie_path /api/ /influxdb/api/;
} }
location /kibana/ { location /kibana/ {

View File

@@ -28,6 +28,7 @@ nginxconf:
- group: 939 - group: 939
- template: jinja - template: jinja
- source: salt://nginx/etc/nginx.conf - source: salt://nginx/etc/nginx.conf
- show_changes: False
nginxlogdir: nginxlogdir:
file.directory: file.directory:

View File

@@ -1,4 +0,0 @@
56c56
< database, name, query, resample_time, coverage_period
---
> database, name, query, resample_time, coverage_period, **client_args

View File

@@ -1,16 +0,0 @@
34c34
< hours = int(duration.split("h"))
---
> hours = int(duration.split("h")[0])
48c48
< def present(name, database, duration="7d", replication=1, default=False, **client_args):
---
> def present(name, database, duration="7d", replication=1, default=False, shard_duration="1d", **client_args):
73c73
< database, name, duration, replication, default, **client_args
---
> database, name, duration, replication, shard_duration, default, **client_args
113c113
< database, name, duration, replication, default, **client_args
---
> database, name, duration, replication, shard_duration, default, **client_args

View File

@@ -1,16 +0,0 @@
421c421
< database, name, duration, replication, default=False, **client_args
---
> database, name, duration, replication, shard_duration, default=False, **client_args
456c456
< client.create_retention_policy(name, duration, replication, database, default)
---
> client.create_retention_policy(name, duration, replication, database, default, shard_duration)
462c462
< database, name, duration, replication, default=False, **client_args
---
> database, name, duration, replication, shard_duration, default=False, **client_args
498c498
< client.alter_retention_policy(name, database, duration, replication, default)
---
> client.alter_retention_policy(name, database, duration, replication, default, shard_duration)

View File

@@ -7,10 +7,6 @@
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %} {% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/states' %} {% set SALT_STATE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/modules' %} {% set SALT_MODULE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/modules' %}
{% set PYTHONINFLUXVERSION = '5.3.1' %}
{% set PYTHON3INFLUX= 'influxdb == ' ~ PYTHONINFLUXVERSION %}
{% set PYTHON3INFLUXDEPS= ['certifi', 'chardet', 'python-dateutil', 'pytz', 'requests'] %}
{% set PYTHONINSTALLER = 'pip' %}
{% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %} {% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %}
{% else %} {% else %}
{% set SPLITCHAR = '-' %} {% set SPLITCHAR = '-' %}
@@ -18,10 +14,6 @@
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %} {% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/states' %} {% set SALT_STATE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/modules' %} {% set SALT_MODULE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/modules' %}
{% set PYTHONINFLUXVERSION = '5.3.1' %}
{% set PYTHON3INFLUX= 'securityonion-python3-influxdb' %}
{% set PYTHON3INFLUXDEPS= ['python36-certifi', 'python36-chardet', 'python36-dateutil', 'python36-pytz', 'python36-requests'] %}
{% set PYTHONINSTALLER = 'pkg' %}
{% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %} {% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %}
{% endif %} {% endif %}

View File

@@ -1,6 +1,8 @@
{% import_yaml 'soc/defaults.yaml' as SOCDEFAULTS %} {% import_yaml 'soc/defaults.yaml' as SOCDEFAULTS %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER -%} {% from 'docker/docker.map.jinja' import DOCKER -%}
{%- set INFLUXDB_TOKEN = salt['pillar.get']('secrets:influx_token') %}
{%- set METRICS_LINK = salt['cmd.run']('so-influxdb-manage dashboardpath "Security Onion Performance"') %}
{% for module, application_url in GLOBALS.application_urls.items() %} {% for module, application_url in GLOBALS.application_urls.items() %}
{% do SOCDEFAULTS.soc.server.modules[module].update({'hostUrl': application_url}) %} {% do SOCDEFAULTS.soc.server.modules[module].update({'hostUrl': application_url}) %}
@@ -15,9 +17,13 @@
{% do SOCDEFAULTS.soc.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %} {% do SOCDEFAULTS.soc.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %}
{% if GLOBALS.role != 'so-import' %}
{% do SOCDEFAULTS.soc.server.modules.influxdb.update({'hostUrl': 'https://' ~ GLOBALS.influxdb_host ~ ':8086'}) %} {% do SOCDEFAULTS.soc.server.modules.influxdb.update({'hostUrl': 'https://' ~ GLOBALS.influxdb_host ~ ':8086'}) %}
{% do SOCDEFAULTS.soc.server.modules.influxdb.update({'token': INFLUXDB_TOKEN}) %}
{% for tool in SOCDEFAULTS.soc.server.client.tools %}
{% if tool.name == "toolInfluxDb" %}
{% do tool.update({'link': METRICS_LINK}) %}
{% endif %} {% endif %}
{% endfor %}
{% do SOCDEFAULTS.soc.server.modules.statickeyauth.update({'anonymousCidr': DOCKER.sorange, 'apiKey': pillar.sensoroni.sensoronikey}) %} {% do SOCDEFAULTS.soc.server.modules.statickeyauth.update({'anonymousCidr': DOCKER.sorange, 'apiKey': pillar.sensoroni.sensoronikey}) %}

View File

@@ -1031,9 +1031,9 @@ soc:
asyncThreshold: 10 asyncThreshold: 10
influxdb: influxdb:
hostUrl: hostUrl:
token: '' token:
org: '' org: Security Onion
bucket: telegraf bucket: telegraf/so_short_term
verifyCert: false verifyCert: false
salt: salt:
saltPipe: /opt/sensoroni/salt/pipe saltPipe: /opt/sensoroni/salt/pipe
@@ -1076,11 +1076,11 @@ soc:
icon: fa-external-link-alt icon: fa-external-link-alt
target: so-osquery-manager target: so-osquery-manager
link: /kibana/app/osquery/live_queries link: /kibana/app/osquery/live_queries
- name: toolGrafana - name: toolInfluxDb
description: toolGrafanaHelp description: toolInfluxDbHelp
icon: fa-external-link-alt icon: fa-external-link-alt
target: so-grafana target: so-influxdb
link: /grafana/d/so_overview link: /influxdb
- name: toolCyberchef - name: toolCyberchef
description: toolCyberchefHelp description: toolCyberchefHelp
icon: fa-external-link-alt icon: fa-external-link-alt

View File

@@ -27,11 +27,6 @@
{% do SOCMERGED.server.client.inactiveTools.append('toolFleet') %} {% do SOCMERGED.server.client.inactiveTools.append('toolFleet') %}
{% if pillar.manager.grafana == 0 %}
{% do SOCMERGED.server.client.inactiveTools.append('toolGrafana') %}
{% endif %}
{% set standard_actions = SOCMERGED.pop('actions') %} {% set standard_actions = SOCMERGED.pop('actions') %}
{% if pillar.global.endgamehost is defined %} {% if pillar.global.endgamehost is defined %}
{% set endgame_dict = { {% set endgame_dict = {

View File

@@ -2,6 +2,7 @@
{%- set INFLUXDBHOST = GLOBALS.influxdb_host %} {%- set INFLUXDBHOST = GLOBALS.influxdb_host %}
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %} {%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} {%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
{%- set TOKEN = salt['pillar.get']('secrets:influx_token', '') %}
{%- set NODEIP = GLOBALS.node_ip %} {%- set NODEIP = GLOBALS.node_ip %}
{%- set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %} {%- set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %}
{%- set ZEEK_ENABLED = salt['pillar.get']('zeek:enabled', True) %} {%- set ZEEK_ENABLED = salt['pillar.get']('zeek:enabled', True) %}
@@ -71,8 +72,11 @@
############################################################################### ###############################################################################
# Configuration for sending metrics to InfluxDB # Configuration for sending metrics to InfluxDB
[[outputs.influxdb]] [[outputs.influxdb_v2]]
urls = ["https://{{ INFLUXDBHOST }}:8086"] urls = ["https://{{ INFLUXDBHOST }}:8086"]
token = "{{ TOKEN }}"
organization = "Security Onion"
bucket = "telegraf/so_short_term"
## Optional TLS Config for use on HTTP connections. ## Optional TLS Config for use on HTTP connections.
tls_ca = "/etc/telegraf/ca.crt" tls_ca = "/etc/telegraf/ca.crt"

View File

@@ -53,7 +53,7 @@ tgrafconf:
TGMERGED: {{ TGMERGED }} TGMERGED: {{ TGMERGED }}
# this file will be read by telegraf to send node details (management interface, monitor interface, etc) # this file will be read by telegraf to send node details (management interface, monitor interface, etc)
# into influx so that Grafana can build dashboards using queries # into influx
node_config: node_config:
file.managed: file.managed:
- name: /opt/so/conf/telegraf/node_config.json - name: /opt/so/conf/telegraf/node_config.json

View File

@@ -293,9 +293,9 @@ base:
- sensoroni - sensoroni
- manager - manager
- nginx - nginx
- soc
- influxdb
- telegraf - telegraf
- influxdb
- soc
- kratos - kratos
- firewall - firewall
- idstools - idstools

1
salt/vars/idh.map.jinja Normal file
View File

@@ -0,0 +1 @@
{% set ROLE_GLOBALS = {} %}

View File

@@ -21,7 +21,6 @@ BASICSURI=2
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ ADMINPASS2=onionuser
# ZEEKVERSION=ZEEK # ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=2
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ ADMINPASS2=onionuser
#ZEEKVERSION=ZEEK #ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
#GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
#GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=7
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ ADMINPASS2=onionuser
# ZEEKVERSION=ZEEK # ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=7
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ ADMINPASS2=onionuser
# ZEEKVERSION=ZEEK # ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=7
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ ADMINPASS2=onionuser
# ZEEKVERSION=ZEEK # ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=ens19
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=7
ZEEKVERSION=SURICATA ZEEKVERSION=SURICATA
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ ADMINPASS2=onionuser
# ZEEKVERSION=ZEEK # ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=ens19
ZEEKVERSION=SURICATA ZEEKVERSION=SURICATA
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=ens19
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=2
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=2
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=2
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=2
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BASICSURI=2
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
# GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=SURICATA ZEEKVERSION=SURICATA
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

View File

@@ -21,7 +21,6 @@ BNICS=eth1
ZEEKVERSION=ZEEK ZEEKVERSION=ZEEK
# CURCLOSEDAYS= # CURCLOSEDAYS=
# EVALADVANCED=BASIC # EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY= # HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit HNSENSOR=inherit

Some files were not shown because too many files have changed in this diff Show More