mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
10
README.md
10
README.md
@@ -1,4 +1,4 @@
|
|||||||
# Security Onion Hybrid Hunter Tech Preview 1.0.3
|
# Security Onion Hybrid Hunter Tech Preview 1.0.4
|
||||||
|
|
||||||
### About
|
### About
|
||||||
Hybrid Hunter is a brand new Security Onion platform with the following characteristics:
|
Hybrid Hunter is a brand new Security Onion platform with the following characteristics:
|
||||||
@@ -56,6 +56,14 @@ sudo bash so-setup-network.sh
|
|||||||
```
|
```
|
||||||
Follow the prompts and reboot if asked to do so.
|
Follow the prompts and reboot if asked to do so.
|
||||||
|
|
||||||
|
Want to try the bleeding edge? You can install the following:
|
||||||
|
```
|
||||||
|
git clone https://github.com/TOoSmOotH/securityonion-saltstack
|
||||||
|
cd securityonion-saltstack
|
||||||
|
sudo bash so-setup-network.sh
|
||||||
|
```
|
||||||
|
This is an active development repo so many things can and will be broken.
|
||||||
|
|
||||||
### Allow Access to Kibana
|
### Allow Access to Kibana
|
||||||
Once Setup is complete and services have initialized, you can then allow access to Kibana as follows.
|
Once Setup is complete and services have initialized, you can then allow access to Kibana as follows.
|
||||||
|
|
||||||
|
|||||||
@@ -5,12 +5,47 @@
|
|||||||
TYPE=$1
|
TYPE=$1
|
||||||
NAME=$2
|
NAME=$2
|
||||||
IPADDRESS=$3
|
IPADDRESS=$3
|
||||||
|
CPUS=$4
|
||||||
|
GUID=$5
|
||||||
|
MANINT=$6
|
||||||
|
ROOTFS=$7
|
||||||
|
NSM=$8
|
||||||
|
MONINT=$9
|
||||||
|
|
||||||
if grep -q $IPADDRESS "/opt/so/saltstack/pillar/data/nodestab.sls"; then
|
echo "Seeing if this host is already in here. If so delete it"
|
||||||
echo "Storage Node Already in There"
|
if grep -q $NAME "/opt/so/saltstack/pillar/data/$TYPE.sls"; then
|
||||||
else
|
echo "Node Already Present - Let's re-add it"
|
||||||
echo " $NAME:" >> /opt/so/saltstack/pillar/data/nodestab.sls
|
awk -v blah=" $NAME:" 'BEGIN{ print_flag=1 }
|
||||||
echo " ip: $IPADDRESS" >> /opt/so/saltstack/pillar/data/nodestab.sls
|
{
|
||||||
salt-call state.apply utility
|
if( $0 ~ blah )
|
||||||
|
{
|
||||||
|
print_flag=0;
|
||||||
|
next
|
||||||
|
}
|
||||||
|
if( $0 ~ /^ [a-zA-Z0-9]+:$/ )
|
||||||
|
{
|
||||||
|
print_flag=1;
|
||||||
|
}
|
||||||
|
if ( print_flag == 1 )
|
||||||
|
print $0
|
||||||
|
|
||||||
|
} ' /opt/so/saltstack/pillar/data/$TYPE.sls > /opt/so/saltstack/pillar/data/tmp.$TYPE.sls
|
||||||
|
mv /opt/so/saltstack/pillar/data/tmp.$TYPE.sls /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
echo "Deleted $NAME from the tab. Now adding it in again with updated info"
|
||||||
|
fi
|
||||||
|
echo " $NAME:" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
echo " ip: $IPADDRESS" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
echo " manint: $MANINT" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
echo " totalcpus: $CPUS" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
echo " guid: $GUID" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
echo " rootfs: $ROOTFS" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
echo " nsmfs: $NSM" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
if [ $TYPE == 'sensorstab' ]; then
|
||||||
|
echo " monint: $MONINT" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
salt-call state.apply common
|
||||||
|
fi
|
||||||
|
if [ $TYPE == 'evaltab' ]; then
|
||||||
|
echo " monint: $MONINT" >> /opt/so/saltstack/pillar/data/$TYPE.sls
|
||||||
|
salt-call state.apply common
|
||||||
|
salt-call state.apply utility
|
||||||
fi
|
fi
|
||||||
|
|||||||
1
pillar/data/evaltab.sls
Normal file
1
pillar/data/evaltab.sls
Normal file
@@ -0,0 +1 @@
|
|||||||
|
evaltab:
|
||||||
1
pillar/data/mastertab.sls
Normal file
1
pillar/data/mastertab.sls
Normal file
@@ -0,0 +1 @@
|
|||||||
|
mastertab:
|
||||||
1
pillar/data/sensorstab.sls
Normal file
1
pillar/data/sensorstab.sls
Normal file
@@ -0,0 +1 @@
|
|||||||
|
sensorstab:
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
schedule:
|
|
||||||
highstate:
|
|
||||||
funtion: state.highstate
|
|
||||||
minutes: 15
|
|
||||||
maxrunning: 1
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
schedule:
|
|
||||||
highstate:
|
|
||||||
funtion: state.highstate
|
|
||||||
minutes: 15
|
|
||||||
maxrunning: 1
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
schedule:
|
|
||||||
highstate:
|
|
||||||
funtion: state.highstate
|
|
||||||
minutes: 15
|
|
||||||
maxrunning: 1
|
|
||||||
@@ -1,20 +1,17 @@
|
|||||||
base:
|
base:
|
||||||
'G@role:so-sensor':
|
'G@role:so-sensor':
|
||||||
- sensors.schedule
|
|
||||||
- sensors.{{ grains.host }}
|
- sensors.{{ grains.host }}
|
||||||
- static
|
- static
|
||||||
- firewall.*
|
- firewall.*
|
||||||
- brologs
|
- brologs
|
||||||
|
|
||||||
'G@role:so-master':
|
'G@role:so-master':
|
||||||
- masters.schedule
|
|
||||||
- masters.{{ grains.host }}
|
- masters.{{ grains.host }}
|
||||||
- static
|
- static
|
||||||
- firewall.*
|
- firewall.*
|
||||||
- data.*
|
- data.*
|
||||||
|
|
||||||
'G@role:so-eval':
|
'G@role:so-eval':
|
||||||
- masters.schedule
|
|
||||||
- masters.{{ grains.host }}
|
- masters.{{ grains.host }}
|
||||||
- static
|
- static
|
||||||
- firewall.*
|
- firewall.*
|
||||||
@@ -22,7 +19,6 @@ base:
|
|||||||
- brologs
|
- brologs
|
||||||
|
|
||||||
'G@role:so-node':
|
'G@role:so-node':
|
||||||
- nodes.schedule
|
|
||||||
- nodes.{{ grains.host }}
|
- nodes.{{ grains.host }}
|
||||||
- static
|
- static
|
||||||
- firewall.*
|
- firewall.*
|
||||||
|
|||||||
2
salt/bro/cron/packetloss.sh
Normal file
2
salt/bro/cron/packetloss.sh
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
/usr/bin/docker exec so-bro /opt/bro/bin/broctl netstats | awk -F '[ =]' '{RCVD += $5;DRP += $7;TTL += $9} END { print "rcvd: " RCVD, "dropped: " DRP, "total: " TTL}' >> /nsm/bro/logs/packetloss.log
|
||||||
@@ -141,4 +141,5 @@
|
|||||||
#@load custom/somebropolicy.bro
|
#@load custom/somebropolicy.bro
|
||||||
|
|
||||||
# Write logs in JSON
|
# Write logs in JSON
|
||||||
#redef LogAscii::use_json = T;
|
redef LogAscii::use_json = T;
|
||||||
|
redef LogAscii::json_timestamps = JSON::TS_ISO8601;
|
||||||
|
|||||||
@@ -129,4 +129,5 @@
|
|||||||
#@load custom/somebropolicy.bro
|
#@load custom/somebropolicy.bro
|
||||||
|
|
||||||
# Use JSON
|
# Use JSON
|
||||||
#redef LogAscii::use_json = T;
|
redef LogAscii::use_json = T;
|
||||||
|
redef LogAscii::json_timestamps = JSON::TS_ISO8601;
|
||||||
|
|||||||
@@ -35,6 +35,18 @@ brospooldir:
|
|||||||
- user: 937
|
- user: 937
|
||||||
- makedirs: true
|
- makedirs: true
|
||||||
|
|
||||||
|
brosfafincompletedir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/faf/files/incomplete
|
||||||
|
- user: 937
|
||||||
|
- makedirs: true
|
||||||
|
|
||||||
|
brosfafcompletedir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/faf/files/complete
|
||||||
|
- user: 937
|
||||||
|
- makedirs: true
|
||||||
|
|
||||||
# Sync the policies
|
# Sync the policies
|
||||||
bropolicysync:
|
bropolicysync:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
@@ -53,6 +65,21 @@ nodecfgsync:
|
|||||||
- group: 939
|
- group: 939
|
||||||
- template: jinja
|
- template: jinja
|
||||||
|
|
||||||
|
plcronscript:
|
||||||
|
file.managed:
|
||||||
|
- name: /usr/local/bin/packetloss.sh
|
||||||
|
- source: salt://bro/cron/packetloss.sh
|
||||||
|
- mode: 755
|
||||||
|
|
||||||
|
/usr/local/bin/packetloss.sh:
|
||||||
|
cron.present:
|
||||||
|
- user: root
|
||||||
|
- minute: '*/10'
|
||||||
|
- hour: '*'
|
||||||
|
- daymonth: '*'
|
||||||
|
- month: '*'
|
||||||
|
- dayweek: '*'
|
||||||
|
|
||||||
# Sync local.bro
|
# Sync local.bro
|
||||||
{% if salt['pillar.get']('static:broversion', '') == 'COMMUNITY' %}
|
{% if salt['pillar.get']('static:broversion', '') == 'COMMUNITY' %}
|
||||||
localbrosync:
|
localbrosync:
|
||||||
|
|||||||
@@ -38,3 +38,16 @@ x509_signing_policies:
|
|||||||
- authorityKeyIdentifier: keyid,issuer:always
|
- authorityKeyIdentifier: keyid,issuer:always
|
||||||
- days_valid: 3000
|
- days_valid: 3000
|
||||||
- copypath: /etc/pki/issued_certs/
|
- copypath: /etc/pki/issued_certs/
|
||||||
|
influxdb:
|
||||||
|
- minions: '*'
|
||||||
|
- signing_private_key: /etc/pki/ca.key
|
||||||
|
- signing_cert: /etc/pki/ca.crt
|
||||||
|
- C: US
|
||||||
|
- ST: Utah
|
||||||
|
- L: Salt Lake City
|
||||||
|
- basicConstraints: "critical CA:false"
|
||||||
|
- keyUsage: "critical keyEncipherment"
|
||||||
|
- subjectKeyIdentifier: hash
|
||||||
|
- authorityKeyIdentifier: keyid,issuer:always
|
||||||
|
- days_valid: 3000
|
||||||
|
- copypath: /etc/pki/issued_certs/
|
||||||
|
|||||||
35
salt/common/grafana/etc/dashboards/dashboard.yml
Normal file
35
salt/common/grafana/etc/dashboards/dashboard.yml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
providers:
|
||||||
|
|
||||||
|
{%- if grains['role'] != 'so-eval' %}
|
||||||
|
- name: 'Master'
|
||||||
|
folder: 'Master'
|
||||||
|
type: file
|
||||||
|
disableDeletion: false
|
||||||
|
editable: true
|
||||||
|
options:
|
||||||
|
path: /etc/grafana/grafana_dashboards/master
|
||||||
|
- name: 'Forward Nodes'
|
||||||
|
folder: 'Forward Nodes'
|
||||||
|
type: file
|
||||||
|
disableDeletion: false
|
||||||
|
editable: true
|
||||||
|
options:
|
||||||
|
path: /etc/grafana/grafana_dashboards/forward_nodes
|
||||||
|
- name: 'Storage Nodes'
|
||||||
|
folder: 'Storage Nodes'
|
||||||
|
type: file
|
||||||
|
disableDeletion: false
|
||||||
|
editable: true
|
||||||
|
options:
|
||||||
|
path: /etc/grafana/grafana_dashboards/storage_nodes
|
||||||
|
{%- else %}
|
||||||
|
- name: 'Security Onion'
|
||||||
|
folder: 'Eval Mode'
|
||||||
|
type: file
|
||||||
|
disableDeletion: false
|
||||||
|
editable: true
|
||||||
|
options:
|
||||||
|
path: /etc/grafana/grafana_dashboards/eval
|
||||||
|
{% endif %}
|
||||||
18
salt/common/grafana/etc/datasources/influxdb.yaml
Normal file
18
salt/common/grafana/etc/datasources/influxdb.yaml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
{%- set MASTER = salt['pillar.get']('static:masterip', '') %}
|
||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
deleteDatasources:
|
||||||
|
- name: Graphite
|
||||||
|
orgId: 1
|
||||||
|
|
||||||
|
datasources:
|
||||||
|
- name: InfluxDB
|
||||||
|
type: influxdb
|
||||||
|
access: proxy
|
||||||
|
database: telegraf
|
||||||
|
url: https://{{ MASTER }}:8086
|
||||||
|
jsonData:
|
||||||
|
tlsAuth: false
|
||||||
|
tlsAuthWithCACert: false
|
||||||
|
tlsSkipVerify: true
|
||||||
|
version: 1
|
||||||
482
salt/common/grafana/etc/grafana.ini
Normal file
482
salt/common/grafana/etc/grafana.ini
Normal file
@@ -0,0 +1,482 @@
|
|||||||
|
##################### Grafana Configuration Example #####################
|
||||||
|
#
|
||||||
|
# Everything has defaults so you only need to uncomment things you want to
|
||||||
|
# change
|
||||||
|
|
||||||
|
# possible values : production, development
|
||||||
|
;app_mode = production
|
||||||
|
|
||||||
|
# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
|
||||||
|
;instance_name = ${HOSTNAME}
|
||||||
|
|
||||||
|
#################################### Paths ####################################
|
||||||
|
[paths]
|
||||||
|
# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
|
||||||
|
;data = /var/lib/grafana
|
||||||
|
|
||||||
|
# Temporary files in `data` directory older than given duration will be removed
|
||||||
|
;temp_data_lifetime = 24h
|
||||||
|
|
||||||
|
# Directory where grafana can store logs
|
||||||
|
;logs = /var/log/grafana
|
||||||
|
|
||||||
|
# Directory where grafana will automatically scan and look for plugins
|
||||||
|
;plugins = /var/lib/grafana/plugins
|
||||||
|
|
||||||
|
# folder that contains provisioning config files that grafana will apply on startup and while running.
|
||||||
|
;provisioning = conf/provisioning
|
||||||
|
|
||||||
|
#################################### Server ####################################
|
||||||
|
[server]
|
||||||
|
# Protocol (http, https, socket)
|
||||||
|
;protocol = http
|
||||||
|
|
||||||
|
# The ip address to bind to, empty will bind to all interfaces
|
||||||
|
;http_addr =
|
||||||
|
|
||||||
|
# The http port to use
|
||||||
|
;http_port = 3000
|
||||||
|
|
||||||
|
# The public facing domain name used to access grafana from a browser
|
||||||
|
;domain = localhost
|
||||||
|
|
||||||
|
# Redirect to correct domain if host header does not match domain
|
||||||
|
# Prevents DNS rebinding attacks
|
||||||
|
;enforce_domain = false
|
||||||
|
|
||||||
|
# The full public facing url you use in browser, used for redirects and emails
|
||||||
|
# If you use reverse proxy and sub path specify full url (with sub path)
|
||||||
|
;root_url = http://localhost:3000
|
||||||
|
|
||||||
|
# Log web requests
|
||||||
|
;router_logging = false
|
||||||
|
|
||||||
|
# the path relative working path
|
||||||
|
;static_root_path = public
|
||||||
|
|
||||||
|
# enable gzip
|
||||||
|
;enable_gzip = false
|
||||||
|
|
||||||
|
# https certs & key file
|
||||||
|
;cert_file =
|
||||||
|
;cert_key =
|
||||||
|
|
||||||
|
# Unix socket path
|
||||||
|
;socket =
|
||||||
|
|
||||||
|
#################################### Database ####################################
|
||||||
|
[database]
|
||||||
|
# You can configure the database connection by specifying type, host, name, user and password
|
||||||
|
# as separate properties or as on string using the url properties.
|
||||||
|
|
||||||
|
# Either "mysql", "postgres" or "sqlite3", it's your choice
|
||||||
|
;type = sqlite3
|
||||||
|
;host = 127.0.0.1:3306
|
||||||
|
;name = grafana
|
||||||
|
;user = root
|
||||||
|
# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
|
||||||
|
;password =
|
||||||
|
|
||||||
|
# Use either URL or the previous fields to configure the database
|
||||||
|
# Example: mysql://user:secret@host:port/database
|
||||||
|
;url =
|
||||||
|
|
||||||
|
# For "postgres" only, either "disable", "require" or "verify-full"
|
||||||
|
;ssl_mode = disable
|
||||||
|
|
||||||
|
# For "sqlite3" only, path relative to data_path setting
|
||||||
|
;path = grafana.db
|
||||||
|
|
||||||
|
# Max idle conn setting default is 2
|
||||||
|
;max_idle_conn = 2
|
||||||
|
|
||||||
|
# Max conn setting default is 0 (mean not set)
|
||||||
|
;max_open_conn =
|
||||||
|
|
||||||
|
# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours)
|
||||||
|
;conn_max_lifetime = 14400
|
||||||
|
|
||||||
|
# Set to true to log the sql calls and execution times.
|
||||||
|
log_queries =
|
||||||
|
|
||||||
|
#################################### Session ####################################
|
||||||
|
[session]
|
||||||
|
# Either "memory", "file", "redis", "mysql", "postgres", default is "file"
|
||||||
|
;provider = file
|
||||||
|
|
||||||
|
# Provider config options
|
||||||
|
# memory: not have any config yet
|
||||||
|
# file: session dir path, is relative to grafana data_path
|
||||||
|
# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
|
||||||
|
# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`
|
||||||
|
# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
|
||||||
|
;provider_config = sessions
|
||||||
|
|
||||||
|
# Session cookie name
|
||||||
|
;cookie_name = grafana_sess
|
||||||
|
|
||||||
|
# If you use session in https only, default is false
|
||||||
|
;cookie_secure = false
|
||||||
|
|
||||||
|
# Session life time, default is 86400
|
||||||
|
;session_life_time = 86400
|
||||||
|
|
||||||
|
#################################### Data proxy ###########################
|
||||||
|
[dataproxy]
|
||||||
|
|
||||||
|
# This enables data proxy logging, default is false
|
||||||
|
;logging = false
|
||||||
|
|
||||||
|
#################################### Analytics ####################################
|
||||||
|
[analytics]
|
||||||
|
# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
|
||||||
|
# No ip addresses are being tracked, only simple counters to track
|
||||||
|
# running instances, dashboard and error counts. It is very helpful to us.
|
||||||
|
# Change this option to false to disable reporting.
|
||||||
|
;reporting_enabled = true
|
||||||
|
|
||||||
|
# Set to false to disable all checks to https://grafana.net
|
||||||
|
# for new vesions (grafana itself and plugins), check is used
|
||||||
|
# in some UI views to notify that grafana or plugin update exists
|
||||||
|
# This option does not cause any auto updates, nor send any information
|
||||||
|
# only a GET request to http://grafana.com to get latest versions
|
||||||
|
;check_for_updates = true
|
||||||
|
|
||||||
|
# Google Analytics universal tracking code, only enabled if you specify an id here
|
||||||
|
;google_analytics_ua_id =
|
||||||
|
|
||||||
|
#################################### Security ####################################
|
||||||
|
[security]
|
||||||
|
# default admin user, created on startup
|
||||||
|
;admin_user = admin
|
||||||
|
|
||||||
|
# default admin password, can be changed before first start of grafana, or in profile settings
|
||||||
|
;admin_password = admin
|
||||||
|
|
||||||
|
# used for signing
|
||||||
|
;secret_key = SW2YcwTIb9zpOOhoPsMm
|
||||||
|
|
||||||
|
# Auto-login remember days
|
||||||
|
;login_remember_days = 7
|
||||||
|
;cookie_username = grafana_user
|
||||||
|
;cookie_remember_name = grafana_remember
|
||||||
|
|
||||||
|
# disable gravatar profile images
|
||||||
|
;disable_gravatar = false
|
||||||
|
|
||||||
|
# data source proxy whitelist (ip_or_domain:port separated by spaces)
|
||||||
|
;data_source_proxy_whitelist =
|
||||||
|
|
||||||
|
# disable protection against brute force login attempts
|
||||||
|
;disable_brute_force_login_protection = false
|
||||||
|
|
||||||
|
#################################### Snapshots ###########################
|
||||||
|
[snapshots]
|
||||||
|
# snapshot sharing options
|
||||||
|
;external_enabled = true
|
||||||
|
;external_snapshot_url = https://snapshots-origin.raintank.io
|
||||||
|
;external_snapshot_name = Publish to snapshot.raintank.io
|
||||||
|
|
||||||
|
# remove expired snapshot
|
||||||
|
;snapshot_remove_expired = true
|
||||||
|
|
||||||
|
#################################### Dashboards History ##################
|
||||||
|
[dashboards]
|
||||||
|
# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1
|
||||||
|
;versions_to_keep = 20
|
||||||
|
|
||||||
|
#################################### Users ###############################
|
||||||
|
[users]
|
||||||
|
# disable user signup / registration
|
||||||
|
;allow_sign_up = true
|
||||||
|
|
||||||
|
# Allow non admin users to create organizations
|
||||||
|
;allow_org_create = true
|
||||||
|
|
||||||
|
# Set to true to automatically assign new users to the default organization (id 1)
|
||||||
|
;auto_assign_org = true
|
||||||
|
|
||||||
|
# Default role new users will be automatically assigned (if disabled above is set to true)
|
||||||
|
;auto_assign_org_role = Viewer
|
||||||
|
|
||||||
|
# Background text for the user field on the login page
|
||||||
|
;login_hint = email or username
|
||||||
|
|
||||||
|
# Default UI theme ("dark" or "light")
|
||||||
|
;default_theme = dark
|
||||||
|
|
||||||
|
# External user management, these options affect the organization users view
|
||||||
|
;external_manage_link_url =
|
||||||
|
;external_manage_link_name =
|
||||||
|
;external_manage_info =
|
||||||
|
|
||||||
|
# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard.
|
||||||
|
;viewers_can_edit = false
|
||||||
|
|
||||||
|
[auth]
|
||||||
|
# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false
|
||||||
|
;disable_login_form = false
|
||||||
|
|
||||||
|
# Set to true to disable the signout link in the side menu. useful if you use auth.proxy, defaults to false
|
||||||
|
;disable_signout_menu = false
|
||||||
|
|
||||||
|
# URL to redirect the user to after sign out
|
||||||
|
;signout_redirect_url =
|
||||||
|
|
||||||
|
#################################### Anonymous Auth ##########################
|
||||||
|
[auth.anonymous]
|
||||||
|
# enable anonymous access
|
||||||
|
;enabled = false
|
||||||
|
|
||||||
|
# specify organization name that should be used for unauthenticated users
|
||||||
|
;org_name = Main Org.
|
||||||
|
|
||||||
|
# specify role for unauthenticated users
|
||||||
|
;org_role = Viewer
|
||||||
|
|
||||||
|
#################################### Github Auth ##########################
|
||||||
|
[auth.github]
|
||||||
|
;enabled = false
|
||||||
|
;allow_sign_up = true
|
||||||
|
;client_id = some_id
|
||||||
|
;client_secret = some_secret
|
||||||
|
;scopes = user:email,read:org
|
||||||
|
;auth_url = https://github.com/login/oauth/authorize
|
||||||
|
;token_url = https://github.com/login/oauth/access_token
|
||||||
|
;api_url = https://api.github.com/user
|
||||||
|
;team_ids =
|
||||||
|
;allowed_organizations =
|
||||||
|
|
||||||
|
#################################### Google Auth ##########################
|
||||||
|
[auth.google]
|
||||||
|
;enabled = false
|
||||||
|
;allow_sign_up = true
|
||||||
|
;client_id = some_client_id
|
||||||
|
;client_secret = some_client_secret
|
||||||
|
;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
|
||||||
|
;auth_url = https://accounts.google.com/o/oauth2/auth
|
||||||
|
;token_url = https://accounts.google.com/o/oauth2/token
|
||||||
|
;api_url = https://www.googleapis.com/oauth2/v1/userinfo
|
||||||
|
;allowed_domains =
|
||||||
|
|
||||||
|
#################################### Generic OAuth ##########################
|
||||||
|
[auth.generic_oauth]
|
||||||
|
;enabled = false
|
||||||
|
;name = OAuth
|
||||||
|
;allow_sign_up = true
|
||||||
|
;client_id = some_id
|
||||||
|
;client_secret = some_secret
|
||||||
|
;scopes = user:email,read:org
|
||||||
|
;auth_url = https://foo.bar/login/oauth/authorize
|
||||||
|
;token_url = https://foo.bar/login/oauth/access_token
|
||||||
|
;api_url = https://foo.bar/user
|
||||||
|
;team_ids =
|
||||||
|
;allowed_organizations =
|
||||||
|
;tls_skip_verify_insecure = false
|
||||||
|
;tls_client_cert =
|
||||||
|
;tls_client_key =
|
||||||
|
;tls_client_ca =
|
||||||
|
|
||||||
|
#################################### Grafana.com Auth ####################
|
||||||
|
[auth.grafana_com]
|
||||||
|
;enabled = false
|
||||||
|
;allow_sign_up = true
|
||||||
|
;client_id = some_id
|
||||||
|
;client_secret = some_secret
|
||||||
|
;scopes = user:email
|
||||||
|
;allowed_organizations =
|
||||||
|
|
||||||
|
#################################### Auth Proxy ##########################
|
||||||
|
[auth.proxy]
|
||||||
|
;enabled = false
|
||||||
|
;header_name = X-WEBAUTH-USER
|
||||||
|
;header_property = username
|
||||||
|
;auto_sign_up = true
|
||||||
|
;ldap_sync_ttl = 60
|
||||||
|
;whitelist = 192.168.1.1, 192.168.2.1
|
||||||
|
;headers = Email:X-User-Email, Name:X-User-Name
|
||||||
|
|
||||||
|
#################################### Basic Auth ##########################
|
||||||
|
[auth.basic]
|
||||||
|
;enabled = true
|
||||||
|
|
||||||
|
#################################### Auth LDAP ##########################
|
||||||
|
[auth.ldap]
|
||||||
|
;enabled = false
|
||||||
|
;config_file = /etc/grafana/ldap.toml
|
||||||
|
;allow_sign_up = true
|
||||||
|
|
||||||
|
#################################### SMTP / Emailing ##########################
|
||||||
|
[smtp]
|
||||||
|
;enabled = false
|
||||||
|
;host = localhost:25
|
||||||
|
;user =
|
||||||
|
# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
|
||||||
|
;password =
|
||||||
|
;cert_file =
|
||||||
|
;key_file =
|
||||||
|
;skip_verify = false
|
||||||
|
;from_address = admin@grafana.localhost
|
||||||
|
;from_name = Grafana
|
||||||
|
# EHLO identity in SMTP dialog (defaults to instance_name)
|
||||||
|
;ehlo_identity = dashboard.example.com
|
||||||
|
|
||||||
|
[emails]
|
||||||
|
;welcome_email_on_sign_up = false
|
||||||
|
|
||||||
|
#################################### Logging ##########################
|
||||||
|
[log]
|
||||||
|
# Either "console", "file", "syslog". Default is console and file
|
||||||
|
# Use space to separate multiple modes, e.g. "console file"
|
||||||
|
;mode = console file
|
||||||
|
|
||||||
|
# Either "debug", "info", "warn", "error", "critical", default is "info"
|
||||||
|
;level = info
|
||||||
|
|
||||||
|
# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
|
||||||
|
;filters =
|
||||||
|
|
||||||
|
# For "console" mode only
|
||||||
|
[log.console]
|
||||||
|
;level =
|
||||||
|
|
||||||
|
# log line format, valid options are text, console and json
|
||||||
|
;format = console
|
||||||
|
|
||||||
|
# For "file" mode only
|
||||||
|
[log.file]
|
||||||
|
;level =
|
||||||
|
|
||||||
|
# log line format, valid options are text, console and json
|
||||||
|
;format = text
|
||||||
|
|
||||||
|
# This enables automated log rotate(switch of following options), default is true
|
||||||
|
;log_rotate = true
|
||||||
|
|
||||||
|
# Max line number of single file, default is 1000000
|
||||||
|
;max_lines = 1000000
|
||||||
|
|
||||||
|
# Max size shift of single file, default is 28 means 1 << 28, 256MB
|
||||||
|
;max_size_shift = 28
|
||||||
|
|
||||||
|
# Segment log daily, default is true
|
||||||
|
;daily_rotate = true
|
||||||
|
|
||||||
|
# Expired days of log file(delete after max days), default is 7
|
||||||
|
;max_days = 7
|
||||||
|
|
||||||
|
[log.syslog]
|
||||||
|
;level =
|
||||||
|
|
||||||
|
# log line format, valid options are text, console and json
|
||||||
|
;format = text
|
||||||
|
|
||||||
|
# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
|
||||||
|
;network =
|
||||||
|
;address =
|
||||||
|
|
||||||
|
# Syslog facility. user, daemon and local0 through local7 are valid.
|
||||||
|
;facility =
|
||||||
|
|
||||||
|
# Syslog tag. By default, the process' argv[0] is used.
|
||||||
|
;tag =
|
||||||
|
|
||||||
|
#################################### Alerting ############################
|
||||||
|
[alerting]
|
||||||
|
# Disable alerting engine & UI features
|
||||||
|
;enabled = true
|
||||||
|
# Makes it possible to turn off alert rule execution but alerting UI is visible
|
||||||
|
;execute_alerts = true
|
||||||
|
|
||||||
|
# Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state)
|
||||||
|
;error_or_timeout = alerting
|
||||||
|
|
||||||
|
# Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
|
||||||
|
;nodata_or_nullvalues = no_data
|
||||||
|
|
||||||
|
# Alert notifications can include images, but rendering many images at the same time can overload the server
|
||||||
|
# This limit will protect the server from render overloading and make sure notifications are sent out quickly
|
||||||
|
;concurrent_render_limit = 5
|
||||||
|
|
||||||
|
#################################### Explore #############################
|
||||||
|
[explore]
|
||||||
|
# Enable the Explore section
|
||||||
|
;enabled = false
|
||||||
|
|
||||||
|
#################################### Internal Grafana Metrics ##########################
|
||||||
|
# Metrics available at HTTP API Url /metrics
|
||||||
|
[metrics]
|
||||||
|
# Disable / Enable internal metrics
|
||||||
|
;enabled = true
|
||||||
|
|
||||||
|
# Publish interval
|
||||||
|
;interval_seconds = 10
|
||||||
|
|
||||||
|
# Send internal metrics to Graphite
|
||||||
|
[metrics.graphite]
|
||||||
|
# Enable by setting the address setting (ex localhost:2003)
|
||||||
|
;address =
|
||||||
|
;prefix = prod.grafana.%(instance_name)s.
|
||||||
|
|
||||||
|
#################################### Distributed tracing ############
|
||||||
|
[tracing.jaeger]
|
||||||
|
# Enable by setting the address sending traces to jaeger (ex localhost:6831)
|
||||||
|
;address = localhost:6831
|
||||||
|
# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
|
||||||
|
;always_included_tag = tag1:value1
|
||||||
|
# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
|
||||||
|
;sampler_type = const
|
||||||
|
# jaeger samplerconfig param
|
||||||
|
# for "const" sampler, 0 or 1 for always false/true respectively
|
||||||
|
# for "probabilistic" sampler, a probability between 0 and 1
|
||||||
|
# for "rateLimiting" sampler, the number of spans per second
|
||||||
|
# for "remote" sampler, param is the same as for "probabilistic"
|
||||||
|
# and indicates the initial sampling rate before the actual one
|
||||||
|
# is received from the mothership
|
||||||
|
;sampler_param = 1
|
||||||
|
|
||||||
|
#################################### Grafana.com integration ##########################
|
||||||
|
# Url used to import dashboards directly from Grafana.com
|
||||||
|
[grafana_com]
|
||||||
|
;url = https://grafana.com
|
||||||
|
|
||||||
|
#################################### External image storage ##########################
|
||||||
|
[external_image_storage]
|
||||||
|
# Used for uploading images to public servers so they can be included in slack/email messages.
|
||||||
|
# you can choose between (s3, webdav, gcs, azure_blob, local)
|
||||||
|
;provider =
|
||||||
|
|
||||||
|
[external_image_storage.s3]
|
||||||
|
;bucket =
|
||||||
|
;region =
|
||||||
|
;path =
|
||||||
|
;access_key =
|
||||||
|
;secret_key =
|
||||||
|
|
||||||
|
[external_image_storage.webdav]
|
||||||
|
;url =
|
||||||
|
;public_url =
|
||||||
|
;username =
|
||||||
|
;password =
|
||||||
|
|
||||||
|
[external_image_storage.gcs]
|
||||||
|
;key_file =
|
||||||
|
;bucket =
|
||||||
|
;path =
|
||||||
|
|
||||||
|
[external_image_storage.azure_blob]
|
||||||
|
;account_name =
|
||||||
|
;account_key =
|
||||||
|
;container_name =
|
||||||
|
|
||||||
|
[external_image_storage.local]
|
||||||
|
# does not require any configuration
|
||||||
|
|
||||||
|
[rendering]
|
||||||
|
# Options to configure external image rendering server like https://github.com/grafana/grafana-image-renderer
|
||||||
|
;server_url =
|
||||||
|
;callback_url =
|
||||||
|
|
||||||
|
[enterprise]
|
||||||
|
# Path to a valid Grafana Enterprise license.jwt file
|
||||||
|
;license_path =
|
||||||
4222
salt/common/grafana/grafana_dashboards/eval/eval.json
Normal file
4222
salt/common/grafana/grafana_dashboards/eval/eval.json
Normal file
File diff suppressed because it is too large
Load Diff
3936
salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json
Normal file
3936
salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json
Normal file
File diff suppressed because it is too large
Load Diff
3911
salt/common/grafana/grafana_dashboards/master/master.json
Normal file
3911
salt/common/grafana/grafana_dashboards/master/master.json
Normal file
File diff suppressed because it is too large
Load Diff
3439
salt/common/grafana/grafana_dashboards/storage_nodes/storage.json
Normal file
3439
salt/common/grafana/grafana_dashboards/storage_nodes/storage.json
Normal file
File diff suppressed because it is too large
Load Diff
560
salt/common/influxdb/etc/influxdb.conf
Normal file
560
salt/common/influxdb/etc/influxdb.conf
Normal file
@@ -0,0 +1,560 @@
|
|||||||
|
### Welcome to the InfluxDB configuration file.
|
||||||
|
|
||||||
|
# The values in this file override the default values used by the system if
|
||||||
|
# a config option is not specified. The commented out lines are the configuration
|
||||||
|
# field and the default value used. Uncommenting a line and changing the value
|
||||||
|
# will change the value used at runtime when the process is restarted.
|
||||||
|
|
||||||
|
# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
|
||||||
|
# The data includes a random ID, os, arch, version, the number of series and other
|
||||||
|
# usage data. No data from user databases is ever transmitted.
|
||||||
|
# Change this option to true to disable reporting.
|
||||||
|
# reporting-disabled = false
|
||||||
|
|
||||||
|
# Bind address to use for the RPC service for backup and restore.
|
||||||
|
# bind-address = "127.0.0.1:8088"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [meta]
|
||||||
|
###
|
||||||
|
### Controls the parameters for the Raft consensus group that stores metadata
|
||||||
|
### about the InfluxDB cluster.
|
||||||
|
###
|
||||||
|
|
||||||
|
[meta]
|
||||||
|
# Where the metadata/raft database is stored
|
||||||
|
dir = "/var/lib/influxdb/meta"
|
||||||
|
|
||||||
|
# Automatically create a default retention policy when creating a database.
|
||||||
|
# retention-autocreate = true
|
||||||
|
|
||||||
|
# If log messages are printed for the meta service
|
||||||
|
# logging-enabled = true
|
||||||
|
|
||||||
|
###
|
||||||
|
### [data]
|
||||||
|
###
|
||||||
|
### Controls where the actual shard data for InfluxDB lives and how it is
|
||||||
|
### flushed from the WAL. "dir" may need to be changed to a suitable place
|
||||||
|
### for your system, but the WAL settings are an advanced configuration. The
|
||||||
|
### defaults should work for most systems.
|
||||||
|
###
|
||||||
|
|
||||||
|
[data]
|
||||||
|
# The directory where the TSM storage engine stores TSM files.
|
||||||
|
dir = "/var/lib/influxdb/data"
|
||||||
|
|
||||||
|
# The directory where the TSM storage engine stores WAL files.
|
||||||
|
wal-dir = "/var/lib/influxdb/wal"
|
||||||
|
|
||||||
|
# The amount of time that a write will wait before fsyncing. A duration
|
||||||
|
# greater than 0 can be used to batch up multiple fsync calls. This is useful for slower
|
||||||
|
# disks or when WAL write contention is seen. A value of 0s fsyncs every write to the WAL.
|
||||||
|
# Values in the range of 0-100ms are recommended for non-SSD disks.
|
||||||
|
# wal-fsync-delay = "0s"
|
||||||
|
|
||||||
|
|
||||||
|
# The type of shard index to use for new shards. The default is an in-memory index that is
|
||||||
|
# recreated at startup. A value of "tsi1" will use a disk based index that supports higher
|
||||||
|
# cardinality datasets.
|
||||||
|
# index-version = "inmem"
|
||||||
|
|
||||||
|
# Trace logging provides more verbose output around the tsm engine. Turning
|
||||||
|
# this on can provide more useful output for debugging tsm engine issues.
|
||||||
|
# trace-logging-enabled = false
|
||||||
|
|
||||||
|
# Whether queries should be logged before execution. Very useful for troubleshooting, but will
|
||||||
|
# log any sensitive data contained within a query.
|
||||||
|
# query-log-enabled = true
|
||||||
|
|
||||||
|
# Validates incoming writes to ensure keys only have valid unicode characters.
|
||||||
|
# This setting will incur a small overhead because every key must be checked.
|
||||||
|
# validate-keys = false
|
||||||
|
|
||||||
|
# Settings for the TSM engine
|
||||||
|
|
||||||
|
# CacheMaxMemorySize is the maximum size a shard's cache can
|
||||||
|
# reach before it starts rejecting writes.
|
||||||
|
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
|
||||||
|
# Values without a size suffix are in bytes.
|
||||||
|
# cache-max-memory-size = "1g"
|
||||||
|
|
||||||
|
# CacheSnapshotMemorySize is the size at which the engine will
|
||||||
|
# snapshot the cache and write it to a TSM file, freeing up memory
|
||||||
|
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
|
||||||
|
# Values without a size suffix are in bytes.
|
||||||
|
# cache-snapshot-memory-size = "25m"
|
||||||
|
|
||||||
|
# CacheSnapshotWriteColdDuration is the length of time at
|
||||||
|
# which the engine will snapshot the cache and write it to
|
||||||
|
# a new TSM file if the shard hasn't received writes or deletes
|
||||||
|
# cache-snapshot-write-cold-duration = "10m"
|
||||||
|
|
||||||
|
# CompactFullWriteColdDuration is the duration at which the engine
|
||||||
|
# will compact all TSM files in a shard if it hasn't received a
|
||||||
|
# write or delete
|
||||||
|
# compact-full-write-cold-duration = "4h"
|
||||||
|
|
||||||
|
# The maximum number of concurrent full and level compactions that can run at one time. A
|
||||||
|
# value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater
|
||||||
|
# than 0 limits compactions to that value. This setting does not apply
|
||||||
|
# to cache snapshotting.
|
||||||
|
# max-concurrent-compactions = 0
|
||||||
|
|
||||||
|
# CompactThroughput is the rate limit in bytes per second that we
|
||||||
|
# will allow TSM compactions to write to disk. Note that short bursts are allowed
|
||||||
|
# to happen at a possibly larger value, set by CompactThroughputBurst
|
||||||
|
# compact-throughput = "48m"
|
||||||
|
|
||||||
|
# CompactThroughputBurst is the rate limit in bytes per second that we
|
||||||
|
# will allow TSM compactions to write to disk.
|
||||||
|
# compact-throughput-burst = "48m"
|
||||||
|
|
||||||
|
# The threshold, in bytes, when an index write-ahead log file will compact
|
||||||
|
# into an index file. Lower sizes will cause log files to be compacted more
|
||||||
|
# quickly and result in lower heap usage at the expense of write throughput.
|
||||||
|
# Higher sizes will be compacted less frequently, store more series in-memory,
|
||||||
|
# and provide higher write throughput.
|
||||||
|
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
|
||||||
|
# Values without a size suffix are in bytes.
|
||||||
|
# max-index-log-file-size = "1m"
|
||||||
|
|
||||||
|
# The maximum series allowed per database before writes are dropped. This limit can prevent
|
||||||
|
# high cardinality issues at the database level. This limit can be disabled by setting it to
|
||||||
|
# 0.
|
||||||
|
# max-series-per-database = 1000000
|
||||||
|
|
||||||
|
# The maximum number of tag values per tag that are allowed before writes are dropped. This limit
|
||||||
|
# can prevent high cardinality tag values from being written to a measurement. This limit can be
|
||||||
|
# disabled by setting it to 0.
|
||||||
|
# max-values-per-tag = 100000
|
||||||
|
|
||||||
|
# If true, then the mmap advise value MADV_WILLNEED will be provided to the kernel with respect to
|
||||||
|
# TSM files. This setting has been found to be problematic on some kernels, and defaults to off.
|
||||||
|
# It might help users who have slow disks in some cases.
|
||||||
|
# tsm-use-madv-willneed = false
|
||||||
|
|
||||||
|
###
|
||||||
|
### [coordinator]
|
||||||
|
###
|
||||||
|
### Controls the clustering service configuration.
|
||||||
|
###
|
||||||
|
|
||||||
|
[coordinator]
|
||||||
|
# The default time a write request will wait until a "timeout" error is returned to the caller.
|
||||||
|
# write-timeout = "10s"
|
||||||
|
|
||||||
|
# The maximum number of concurrent queries allowed to be executing at one time. If a query is
|
||||||
|
# executed and exceeds this limit, an error is returned to the caller. This limit can be disabled
|
||||||
|
# by setting it to 0.
|
||||||
|
# max-concurrent-queries = 0
|
||||||
|
|
||||||
|
# The maximum time a query will is allowed to execute before being killed by the system. This limit
|
||||||
|
# can help prevent run away queries. Setting the value to 0 disables the limit.
|
||||||
|
# query-timeout = "0s"
|
||||||
|
|
||||||
|
# The time threshold when a query will be logged as a slow query. This limit can be set to help
|
||||||
|
# discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging.
|
||||||
|
# log-queries-after = "0s"
|
||||||
|
|
||||||
|
# The maximum number of points a SELECT can process. A value of 0 will make
|
||||||
|
# the maximum point count unlimited. This will only be checked every second so queries will not
|
||||||
|
# be aborted immediately when hitting the limit.
|
||||||
|
# max-select-point = 0
|
||||||
|
|
||||||
|
# The maximum number of series a SELECT can run. A value of 0 will make the maximum series
|
||||||
|
# count unlimited.
|
||||||
|
# max-select-series = 0
|
||||||
|
|
||||||
|
# The maxium number of group by time bucket a SELECT can create. A value of zero will max the maximum
|
||||||
|
# number of buckets unlimited.
|
||||||
|
# max-select-buckets = 0
|
||||||
|
|
||||||
|
###
|
||||||
|
### [retention]
|
||||||
|
###
|
||||||
|
### Controls the enforcement of retention policies for evicting old data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[retention]
|
||||||
|
# Determines whether retention policy enforcement enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# The interval of time when retention policy enforcement checks run.
|
||||||
|
# check-interval = "30m"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [shard-precreation]
|
||||||
|
###
|
||||||
|
### Controls the precreation of shards, so they are available before data arrives.
|
||||||
|
### Only shards that, after creation, will have both a start- and end-time in the
|
||||||
|
### future, will ever be created. Shards are never precreated that would be wholly
|
||||||
|
### or partially in the past.
|
||||||
|
|
||||||
|
[shard-precreation]
|
||||||
|
# Determines whether shard pre-creation service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# The interval of time when the check to pre-create new shards runs.
|
||||||
|
# check-interval = "10m"
|
||||||
|
|
||||||
|
# The default period ahead of the endtime of a shard group that its successor
|
||||||
|
# group is created.
|
||||||
|
# advance-period = "30m"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Controls the system self-monitoring, statistics and diagnostics.
|
||||||
|
###
|
||||||
|
### The internal database for monitoring data is created automatically if
|
||||||
|
### if it does not already exist. The target retention within this database
|
||||||
|
### is called 'monitor' and is also created with a retention period of 7 days
|
||||||
|
### and a replication factor of 1, if it does not exist. In all cases the
|
||||||
|
### this retention policy is configured as the default for the database.
|
||||||
|
|
||||||
|
[monitor]
|
||||||
|
# Whether to record statistics internally.
|
||||||
|
# store-enabled = true
|
||||||
|
|
||||||
|
# The destination database for recorded statistics
|
||||||
|
# store-database = "_internal"
|
||||||
|
|
||||||
|
# The interval at which to record statistics
|
||||||
|
# store-interval = "10s"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [http]
|
||||||
|
###
|
||||||
|
### Controls how the HTTP endpoints are configured. These are the primary
|
||||||
|
### mechanism for getting data into and out of InfluxDB.
|
||||||
|
###
|
||||||
|
|
||||||
|
[http]
|
||||||
|
# Determines whether HTTP endpoint is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# Determines whether the Flux query endpoint is enabled.
|
||||||
|
# flux-enabled = false
|
||||||
|
|
||||||
|
# The bind address used by the HTTP service.
|
||||||
|
# bind-address = ":8086"
|
||||||
|
|
||||||
|
# Determines whether user authentication is enabled over HTTP/HTTPS.
|
||||||
|
# auth-enabled = false
|
||||||
|
|
||||||
|
# The default realm sent back when issuing a basic auth challenge.
|
||||||
|
# realm = "InfluxDB"
|
||||||
|
|
||||||
|
# Determines whether HTTP request logging is enabled.
|
||||||
|
# log-enabled = true
|
||||||
|
|
||||||
|
# Determines whether the HTTP write request logs should be suppressed when the log is enabled.
|
||||||
|
# suppress-write-log = false
|
||||||
|
|
||||||
|
# When HTTP request logging is enabled, this option specifies the path where
|
||||||
|
# log entries should be written. If unspecified, the default is to write to stderr, which
|
||||||
|
# intermingles HTTP logs with internal InfluxDB logging.
|
||||||
|
#
|
||||||
|
# If influxd is unable to access the specified path, it will log an error and fall back to writing
|
||||||
|
# the request log to stderr.
|
||||||
|
# access-log-path = ""
|
||||||
|
|
||||||
|
# Filters which requests should be logged. Each filter is of the pattern NNN, NNX, or NXX where N is
|
||||||
|
# a number and X is a wildcard for any number. To filter all 5xx responses, use the string 5xx.
|
||||||
|
# If multiple filters are used, then only one has to match. The default is to have no filters which
|
||||||
|
# will cause every request to be printed.
|
||||||
|
# access-log-status-filters = []
|
||||||
|
|
||||||
|
# Determines whether detailed write logging is enabled.
|
||||||
|
# write-tracing = false
|
||||||
|
|
||||||
|
# Determines whether the pprof endpoint is enabled. This endpoint is used for
|
||||||
|
# troubleshooting and monitoring.
|
||||||
|
# pprof-enabled = true
|
||||||
|
|
||||||
|
# Enables a pprof endpoint that binds to localhost:6060 immediately on startup.
|
||||||
|
# This is only needed to debug startup issues.
|
||||||
|
# debug-pprof-enabled = false
|
||||||
|
|
||||||
|
# Determines whether HTTPS is enabled.
|
||||||
|
https-enabled = true
|
||||||
|
|
||||||
|
# The SSL certificate to use when HTTPS is enabled.
|
||||||
|
https-certificate = "/etc/ssl/influxdb.crt"
|
||||||
|
|
||||||
|
# Use a separate private key location.
|
||||||
|
https-private-key = "/etc/ssl/influxdb.key"
|
||||||
|
|
||||||
|
# The JWT auth shared secret to validate requests using JSON web tokens.
|
||||||
|
# shared-secret = ""
|
||||||
|
|
||||||
|
# The default chunk size for result sets that should be chunked.
|
||||||
|
# max-row-limit = 0
|
||||||
|
|
||||||
|
# The maximum number of HTTP connections that may be open at once. New connections that
|
||||||
|
# would exceed this limit are dropped. Setting this value to 0 disables the limit.
|
||||||
|
# max-connection-limit = 0
|
||||||
|
|
||||||
|
# Enable http service over unix domain socket
|
||||||
|
# unix-socket-enabled = false
|
||||||
|
|
||||||
|
# The path of the unix domain socket.
|
||||||
|
# bind-socket = "/var/run/influxdb.sock"
|
||||||
|
|
||||||
|
# The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit.
|
||||||
|
# max-body-size = 25000000
|
||||||
|
|
||||||
|
# The maximum number of writes processed concurrently.
|
||||||
|
# Setting this to 0 disables the limit.
|
||||||
|
# max-concurrent-write-limit = 0
|
||||||
|
|
||||||
|
# The maximum number of writes queued for processing.
|
||||||
|
# Setting this to 0 disables the limit.
|
||||||
|
# max-enqueued-write-limit = 0
|
||||||
|
|
||||||
|
# The maximum duration for a write to wait in the queue to be processed.
|
||||||
|
# Setting this to 0 or setting max-concurrent-write-limit to 0 disables the limit.
|
||||||
|
# enqueued-write-timeout = 0
|
||||||
|
|
||||||
|
###
|
||||||
|
### [logging]
|
||||||
|
###
|
||||||
|
### Controls how the logger emits logs to the output.
|
||||||
|
###
|
||||||
|
|
||||||
|
[logging]
|
||||||
|
# Determines which log encoder to use for logs. Available options
|
||||||
|
# are auto, logfmt, and json. auto will use a more a more user-friendly
|
||||||
|
# output format if the output terminal is a TTY, but the format is not as
|
||||||
|
# easily machine-readable. When the output is a non-TTY, auto will use
|
||||||
|
# logfmt.
|
||||||
|
# format = "auto"
|
||||||
|
|
||||||
|
# Determines which level of logs will be emitted. The available levels
|
||||||
|
# are error, warn, info, and debug. Logs that are equal to or above the
|
||||||
|
# specified level will be emitted.
|
||||||
|
# level = "info"
|
||||||
|
|
||||||
|
# Suppresses the logo output that is printed when the program is started.
|
||||||
|
# The logo is always suppressed if STDOUT is not a TTY.
|
||||||
|
# suppress-logo = false
|
||||||
|
|
||||||
|
###
|
||||||
|
### [subscriber]
|
||||||
|
###
|
||||||
|
### Controls the subscriptions, which can be used to fork a copy of all data
|
||||||
|
### received by the InfluxDB host.
|
||||||
|
###
|
||||||
|
|
||||||
|
[subscriber]
|
||||||
|
# Determines whether the subscriber service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# The default timeout for HTTP writes to subscribers.
|
||||||
|
# http-timeout = "30s"
|
||||||
|
|
||||||
|
# Allows insecure HTTPS connections to subscribers. This is useful when testing with self-
|
||||||
|
# signed certificates.
|
||||||
|
# insecure-skip-verify = false
|
||||||
|
|
||||||
|
# The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used
|
||||||
|
# ca-certs = ""
|
||||||
|
|
||||||
|
# The number of writer goroutines processing the write channel.
|
||||||
|
# write-concurrency = 40
|
||||||
|
|
||||||
|
# The number of in-flight writes buffered in the write channel.
|
||||||
|
# write-buffer-size = 1000
|
||||||
|
|
||||||
|
|
||||||
|
###
|
||||||
|
### [[graphite]]
|
||||||
|
###
|
||||||
|
### Controls one or many listeners for Graphite data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[graphite]]
|
||||||
|
# Determines whether the graphite endpoint is enabled.
|
||||||
|
# enabled = false
|
||||||
|
# database = "graphite"
|
||||||
|
# retention-policy = ""
|
||||||
|
# bind-address = ":2003"
|
||||||
|
# protocol = "tcp"
|
||||||
|
# consistency-level = "one"
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Batching
|
||||||
|
# will buffer points in memory if you have many coming in.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 5000
|
||||||
|
|
||||||
|
# number of batches that may be pending in memory
|
||||||
|
# batch-pending = 10
|
||||||
|
|
||||||
|
# Flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "1s"
|
||||||
|
|
||||||
|
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
|
||||||
|
# udp-read-buffer = 0
|
||||||
|
|
||||||
|
### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
|
||||||
|
# separator = "."
|
||||||
|
|
||||||
|
### Default tags that will be added to all metrics. These can be overridden at the template level
|
||||||
|
### or by tags extracted from metric
|
||||||
|
# tags = ["region=us-east", "zone=1c"]
|
||||||
|
|
||||||
|
### Each template line requires a template pattern. It can have an optional
|
||||||
|
### filter before the template and separated by spaces. It can also have optional extra
|
||||||
|
### tags following the template. Multiple tags should be separated by commas and no spaces
|
||||||
|
### similar to the line protocol format. There can be only one default template.
|
||||||
|
# templates = [
|
||||||
|
# "*.app env.service.resource.measurement",
|
||||||
|
# # Default template
|
||||||
|
# "server.*",
|
||||||
|
# ]
|
||||||
|
|
||||||
|
###
|
||||||
|
### [collectd]
|
||||||
|
###
|
||||||
|
### Controls one or many listeners for collectd data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[collectd]]
|
||||||
|
# enabled = false
|
||||||
|
# bind-address = ":25826"
|
||||||
|
# database = "collectd"
|
||||||
|
# retention-policy = ""
|
||||||
|
#
|
||||||
|
# The collectd service supports either scanning a directory for multiple types
|
||||||
|
# db files, or specifying a single db file.
|
||||||
|
# typesdb = "/usr/local/share/collectd"
|
||||||
|
#
|
||||||
|
# security-level = "none"
|
||||||
|
# auth-file = "/etc/collectd/auth_file"
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Batching
|
||||||
|
# will buffer points in memory if you have many coming in.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 5000
|
||||||
|
|
||||||
|
# Number of batches that may be pending in memory
|
||||||
|
# batch-pending = 10
|
||||||
|
|
||||||
|
# Flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "10s"
|
||||||
|
|
||||||
|
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
|
||||||
|
# read-buffer = 0
|
||||||
|
|
||||||
|
# Multi-value plugins can be handled two ways.
|
||||||
|
# "split" will parse and store the multi-value plugin data into separate measurements
|
||||||
|
# "join" will parse and store the multi-value plugin as a single multi-value measurement.
|
||||||
|
# "split" is the default behavior for backward compatability with previous versions of influxdb.
|
||||||
|
# parse-multivalue-plugin = "split"
|
||||||
|
###
|
||||||
|
### [opentsdb]
|
||||||
|
###
|
||||||
|
### Controls one or many listeners for OpenTSDB data.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[opentsdb]]
|
||||||
|
# enabled = false
|
||||||
|
# bind-address = ":4242"
|
||||||
|
# database = "opentsdb"
|
||||||
|
# retention-policy = ""
|
||||||
|
# consistency-level = "one"
|
||||||
|
# tls-enabled = false
|
||||||
|
# certificate= "/etc/ssl/influxdb.pem"
|
||||||
|
|
||||||
|
# Log an error for every malformed point.
|
||||||
|
# log-point-errors = true
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Only points
|
||||||
|
# metrics received over the telnet protocol undergo batching.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 1000
|
||||||
|
|
||||||
|
# Number of batches that may be pending in memory
|
||||||
|
# batch-pending = 5
|
||||||
|
|
||||||
|
# Flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "1s"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [[udp]]
|
||||||
|
###
|
||||||
|
### Controls the listeners for InfluxDB line protocol data via UDP.
|
||||||
|
###
|
||||||
|
|
||||||
|
[[udp]]
|
||||||
|
# enabled = false
|
||||||
|
# bind-address = ":8089"
|
||||||
|
# database = "udp"
|
||||||
|
# retention-policy = ""
|
||||||
|
|
||||||
|
# InfluxDB precision for timestamps on received points ("" or "n", "u", "ms", "s", "m", "h")
|
||||||
|
# precision = ""
|
||||||
|
|
||||||
|
# These next lines control how batching works. You should have this enabled
|
||||||
|
# otherwise you could get dropped metrics or poor performance. Batching
|
||||||
|
# will buffer points in memory if you have many coming in.
|
||||||
|
|
||||||
|
# Flush if this many points get buffered
|
||||||
|
# batch-size = 5000
|
||||||
|
|
||||||
|
# Number of batches that may be pending in memory
|
||||||
|
# batch-pending = 10
|
||||||
|
|
||||||
|
# Will flush at least this often even if we haven't hit buffer limit
|
||||||
|
# batch-timeout = "1s"
|
||||||
|
|
||||||
|
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
|
||||||
|
# read-buffer = 0
|
||||||
|
|
||||||
|
###
|
||||||
|
### [continuous_queries]
|
||||||
|
###
|
||||||
|
### Controls how continuous queries are run within InfluxDB.
|
||||||
|
###
|
||||||
|
|
||||||
|
[continuous_queries]
|
||||||
|
# Determines whether the continuous query service is enabled.
|
||||||
|
# enabled = true
|
||||||
|
|
||||||
|
# Controls whether queries are logged when executed by the CQ service.
|
||||||
|
# log-enabled = true
|
||||||
|
|
||||||
|
# Controls whether queries are logged to the self-monitoring data store.
|
||||||
|
# query-stats-enabled = false
|
||||||
|
|
||||||
|
# interval for how often continuous queries will be checked if they need to run
|
||||||
|
# run-interval = "1s"
|
||||||
|
|
||||||
|
###
|
||||||
|
### [tls]
|
||||||
|
###
|
||||||
|
### Global configuration settings for TLS in InfluxDB.
|
||||||
|
###
|
||||||
|
|
||||||
|
[tls]
|
||||||
|
# Determines the available set of cipher suites. See https://golang.org/pkg/crypto/tls/#pkg-constants
|
||||||
|
# for a list of available ciphers, which depends on the version of Go (use the query
|
||||||
|
# SHOW DIAGNOSTICS to see the version of Go used to build InfluxDB). If not specified, uses
|
||||||
|
# the default settings from Go's crypto/tls package.
|
||||||
|
# ciphers = [
|
||||||
|
# "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||||
|
# "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
# ]
|
||||||
|
|
||||||
|
# Minimum version of the tls protocol that will be negotiated. If not specified, uses the
|
||||||
|
# default settings from Go's crypto/tls package.
|
||||||
|
# min-version = "tls1.2"
|
||||||
|
|
||||||
|
# Maximum version of the tls protocol that will be negotiated. If not specified, uses the
|
||||||
|
# default settings from Go's crypto/tls package.
|
||||||
|
# max-version = "tls1.2"
|
||||||
@@ -39,6 +39,9 @@ sensorpkgs:
|
|||||||
{% if grains['os'] != 'CentOS' %}
|
{% if grains['os'] != 'CentOS' %}
|
||||||
- python-docker
|
- python-docker
|
||||||
- python-m2crypto
|
- python-m2crypto
|
||||||
|
{% else %}
|
||||||
|
- net-tools
|
||||||
|
- tcpdump
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# Always keep these packages up to date
|
# Always keep these packages up to date
|
||||||
@@ -61,22 +64,9 @@ docker:
|
|||||||
service.running:
|
service.running:
|
||||||
- enable: True
|
- enable: True
|
||||||
|
|
||||||
# Set up docker network
|
salt-minion:
|
||||||
# This is broken right now.
|
service.running:
|
||||||
#dockernet:
|
- enable: True
|
||||||
# docker_network.present:
|
|
||||||
# - name: so-elastic-net
|
|
||||||
# - driver: bridge
|
|
||||||
|
|
||||||
# dockernet work around
|
|
||||||
#dockernet:
|
|
||||||
# cmd.script:
|
|
||||||
# - source: salt://common/scripts/dockernet.sh
|
|
||||||
|
|
||||||
|
|
||||||
# Snag the so-core docker
|
|
||||||
toosmooth/so-core:test2:
|
|
||||||
docker_image.present
|
|
||||||
|
|
||||||
# Drop the correct nginx config based on role
|
# Drop the correct nginx config based on role
|
||||||
|
|
||||||
@@ -129,3 +119,267 @@ so-core:
|
|||||||
- 443:443
|
- 443:443
|
||||||
- watch:
|
- watch:
|
||||||
- file: /opt/so/conf/nginx/nginx.conf
|
- file: /opt/so/conf/nginx/nginx.conf
|
||||||
|
|
||||||
|
# Add Telegraf to monitor all the things.
|
||||||
|
tgraflogdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/log/telegraf
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
tgrafetcdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/telegraf/etc
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
tgrafetsdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/telegraf/scripts
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
tgrafsyncscripts:
|
||||||
|
file.recurse:
|
||||||
|
- name: /opt/so/conf/telegraf/scripts
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- file_mode: 755
|
||||||
|
- template: jinja
|
||||||
|
- source: salt://common/telegraf/scripts
|
||||||
|
|
||||||
|
tgrafconf:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/telegraf/etc/telegraf.conf
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
- source: salt://common/telegraf/etc/telegraf.conf
|
||||||
|
|
||||||
|
so-telegraf:
|
||||||
|
docker_container.running:
|
||||||
|
- image: soshybridhunter/so-telegraf:HH1.0.4
|
||||||
|
- environment:
|
||||||
|
- HOST_PROC=/host/proc
|
||||||
|
- HOST_ETC=/host/etc
|
||||||
|
- HOST_SYS=/host/sys
|
||||||
|
- HOST_MOUNT_PREFIX=/host
|
||||||
|
- network_mode: host
|
||||||
|
- binds:
|
||||||
|
- /opt/so/log/telegraf:/var/log/telegraf:rw
|
||||||
|
- /opt/so/conf/telegraf/etc/telegraf.conf:/etc/telegraf/telegraf.conf:ro
|
||||||
|
- /var/run/utmp:/var/run/utmp:ro
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||||
|
- /:/host/root:ro
|
||||||
|
- /sys:/host/sys:ro
|
||||||
|
- /proc:/host/proc:ro
|
||||||
|
- /nsm:/host/nsm:ro
|
||||||
|
- /etc:/host/etc:ro
|
||||||
|
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
|
||||||
|
- /etc/pki/ca.crt:/etc/telegraf/ca.crt:ro
|
||||||
|
{% else %}
|
||||||
|
- /etc/ssl/certs/intca.crt:/etc/telegraf/ca.crt:ro
|
||||||
|
{% endif %}
|
||||||
|
- /etc/pki/influxdb.crt:/etc/telegraf/telegraf.crt:ro
|
||||||
|
- /etc/pki/influxdb.key:/etc/telegraf/telegraf.key:ro
|
||||||
|
- /opt/so/conf/telegraf/scripts:/scripts:ro
|
||||||
|
- /opt/so/log/stenographer:/var/log/stenographer:ro
|
||||||
|
- /opt/so/log/suricata:/var/log/suricata:ro
|
||||||
|
- watch:
|
||||||
|
- /opt/so/conf/telegraf/etc/telegraf.conf
|
||||||
|
- /opt/so/conf/telegraf/scripts
|
||||||
|
|
||||||
|
# If its a master or eval lets install the back end for now
|
||||||
|
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
|
||||||
|
|
||||||
|
# Influx DB
|
||||||
|
influxconfdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/influxdb/etc
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
influxdbdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/influxdb
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
influxdbconf:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/influxdb/etc/influxdb.conf
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
- source: salt://common/influxdb/etc/influxdb.conf
|
||||||
|
|
||||||
|
so-influxdb:
|
||||||
|
docker_container.running:
|
||||||
|
- image: soshybridhunter/so-influxdb:HH1.0.4
|
||||||
|
- hostname: influxdb
|
||||||
|
- environment:
|
||||||
|
- INFLUXDB_HTTP_LOG_ENABLED=false
|
||||||
|
- binds:
|
||||||
|
- /opt/so/conf/influxdb/etc/influxdb.conf:/etc/influxdb/influxdb.conf:ro
|
||||||
|
- /nsm/influxdb:/var/lib/influxdb:rw
|
||||||
|
- /etc/pki/influxdb.crt:/etc/ssl/influxdb.crt:ro
|
||||||
|
- /etc/pki/influxdb.key:/etc/ssl/influxdb.key:ro
|
||||||
|
- port_bindings:
|
||||||
|
- 0.0.0.0:8086:8086
|
||||||
|
|
||||||
|
# Grafana all the things
|
||||||
|
grafanadir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/grafana
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
grafanaconfdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/grafana/etc
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
grafanadashdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/grafana/grafana_dashboards
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
grafanadashmdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/grafana/grafana_dashboards/master
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
grafanadashevaldir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/grafana/grafana_dashboards/eval
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
grafanadashfndir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/grafana/grafana_dashboards/forward_nodes
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
grafanadashsndir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/grafana/grafana_dashboards/storage_nodes
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
grafanaconf:
|
||||||
|
file.recurse:
|
||||||
|
- name: /opt/so/conf/grafana/etc
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
- source: salt://common/grafana/etc
|
||||||
|
|
||||||
|
{% if salt['pillar.get']('mastertab', False) %}
|
||||||
|
{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).iteritems() %}
|
||||||
|
dashboard-master:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
- source: salt://common/grafana/grafana_dashboards/master/master.json
|
||||||
|
- defaults:
|
||||||
|
SERVERNAME: {{ SN }}
|
||||||
|
MANINT: {{ SNDATA.manint }}
|
||||||
|
MONINT: {{ SNDATA.manint }}
|
||||||
|
CPUS: {{ SNDATA.totalcpus }}
|
||||||
|
UID: {{ SNDATA.guid }}
|
||||||
|
ROOTFS: {{ SNDATA.rootfs }}
|
||||||
|
NSMFS: {{ SNDATA.nsmfs }}
|
||||||
|
|
||||||
|
{%- endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if salt['pillar.get']('sensorstab', False) %}
|
||||||
|
{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).iteritems() %}
|
||||||
|
dashboard-{{ SN }}:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/grafana/grafana_dashboards/forward_nodes/{{ SN }}-Sensor.json
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
- source: salt://common/grafana/grafana_dashboards/forward_nodes/sensor.json
|
||||||
|
- defaults:
|
||||||
|
SERVERNAME: {{ SN }}
|
||||||
|
MONINT: {{ SNDATA.monint }}
|
||||||
|
MANINT: {{ SNDATA.manint }}
|
||||||
|
CPUS: {{ SNDATA.totalcpus }}
|
||||||
|
UID: {{ SNDATA.guid }}
|
||||||
|
ROOTFS: {{ SNDATA.rootfs }}
|
||||||
|
NSMFS: {{ SNDATA.nsmfs }}
|
||||||
|
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if salt['pillar.get']('nodestab', False) %}
|
||||||
|
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
|
||||||
|
dashboard-{{ SN }}:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/grafana/grafana_dashboards/storage_nodes/{{ SN }}-Node.json
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
- source: salt://common/grafana/grafana_dashboards/storage_nodes/storage.json
|
||||||
|
- defaults:
|
||||||
|
SERVERNAME: {{ SN }}
|
||||||
|
MANINT: {{ SNDATA.manint }}
|
||||||
|
MONINT: {{ SNDATA.manint }}
|
||||||
|
CPUS: {{ SNDATA.totalcpus }}
|
||||||
|
UID: {{ SNDATA.guid }}
|
||||||
|
ROOTFS: {{ SNDATA.rootfs }}
|
||||||
|
NSMFS: {{ SNDATA.nsmfs }}
|
||||||
|
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if salt['pillar.get']('evaltab', False) %}
|
||||||
|
{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).iteritems() %}
|
||||||
|
dashboard-{{ SN }}:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/grafana/grafana_dashboards/eval/{{ SN }}-Node.json
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
- source: salt://common/grafana/grafana_dashboards/eval/eval.json
|
||||||
|
- defaults:
|
||||||
|
SERVERNAME: {{ SN }}
|
||||||
|
MANINT: {{ SNDATA.manint }}
|
||||||
|
MONINT: {{ SNDATA.manint }}
|
||||||
|
CPUS: {{ SNDATA.totalcpus }}
|
||||||
|
UID: {{ SNDATA.guid }}
|
||||||
|
ROOTFS: {{ SNDATA.rootfs }}
|
||||||
|
NSMFS: {{ SNDATA.nsmfs }}
|
||||||
|
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Install the docker. This needs to be behind nginx at some point
|
||||||
|
so-grafana:
|
||||||
|
docker_container.running:
|
||||||
|
- image: soshybridhunter/so-grafana:HH1.0.4
|
||||||
|
- hostname: grafana
|
||||||
|
- user: socore
|
||||||
|
- binds:
|
||||||
|
- /nsm/grafana:/var/lib/grafana:rw
|
||||||
|
- /opt/so/conf/grafana/etc/datasources:/etc/grafana/provisioning/datasources:rw
|
||||||
|
- /opt/so/conf/grafana/etc/dashboards:/etc/grafana/provisioning/dashboards:rw
|
||||||
|
- /opt/so/conf/grafana/grafana_dashboards:/etc/grafana/grafana_dashboards:rw
|
||||||
|
- environment:
|
||||||
|
- GF_SECURITY_ADMIN_PASSWORD=augusta
|
||||||
|
- port_bindings:
|
||||||
|
- 0.0.0.0:3000:3000
|
||||||
|
- watch:
|
||||||
|
- file: /opt/so/conf/grafana/*
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|||||||
2313
salt/common/telegraf/etc/telegraf.conf
Normal file
2313
salt/common/telegraf/etc/telegraf.conf
Normal file
File diff suppressed because it is too large
Load Diff
17
salt/common/telegraf/scripts/broloss.sh
Normal file
17
salt/common/telegraf/scripts/broloss.sh
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
BROLOG=$(tac /host/nsm/bro/logs/packetloss.log | head -2)
|
||||||
|
declare RESULT=($BROLOG)
|
||||||
|
CURRENTDROP=${RESULT[3]}
|
||||||
|
PASTDROP=${RESULT[9]}
|
||||||
|
DROPPED=$(($CURRENTDROP - $PASTDROP))
|
||||||
|
if [ $DROPPED == 0 ]; then
|
||||||
|
LOSS=0
|
||||||
|
echo "brodrop drop=0"
|
||||||
|
else
|
||||||
|
CURRENTPACKETS=${RESULT[5]}
|
||||||
|
PASTPACKETS=${RESULT[11]}
|
||||||
|
TOTAL=$(($CURRENTPACKETS - $PASTPACKETS))
|
||||||
|
LOSS=$(echo $DROPPED $TOTAL / p | dc)
|
||||||
|
echo "brodrop drop=$LOSS"
|
||||||
|
fi
|
||||||
5
salt/common/telegraf/scripts/checkfiles.sh
Normal file
5
salt/common/telegraf/scripts/checkfiles.sh
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
FILES=$(ls -1x /host/nsm/faf/complete/ | wc -l)
|
||||||
|
|
||||||
|
echo "faffiles files=$FILES"
|
||||||
8
salt/common/telegraf/scripts/oldpcap.sh
Normal file
8
salt/common/telegraf/scripts/oldpcap.sh
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Get the data
|
||||||
|
OLDPCAP=$(find /host/nsm/pcap -type f -exec stat -c'%n %Z' {} + | sort | grep -v "\." | head -n 1 | awk {'print $2'})
|
||||||
|
DATE=$(date +%s)
|
||||||
|
AGE=$(($DATE - $OLDPCAP))
|
||||||
|
|
||||||
|
echo "pcapage seconds=$AGE"
|
||||||
6
salt/common/telegraf/scripts/redis.sh
Normal file
6
salt/common/telegraf/scripts/redis.sh
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
UNPARSED=$(redis-cli llen logstash:unparsed | awk '{print $1}')
|
||||||
|
PARSED=$(redis-cli llen logstash:parsed | awk '{print $1}')
|
||||||
|
|
||||||
|
echo "redisqueue unparsed=$UNPARSED,parsed=$PARSED"
|
||||||
6
salt/common/telegraf/scripts/stenoloss.sh
Normal file
6
salt/common/telegraf/scripts/stenoloss.sh
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Get the data
|
||||||
|
DROP=$(tac /var/log/stenographer/stenographer.log | grep -m1 drop | awk '{print $14}' | awk -F "=" '{print $2}')
|
||||||
|
|
||||||
|
echo "stenodrop drop=$DROP"
|
||||||
25
salt/common/telegraf/scripts/suriloss.sh
Normal file
25
salt/common/telegraf/scripts/suriloss.sh
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
SURILOG=$(tac /var/log/suricata/stats.log | grep kernel | head -4)
|
||||||
|
CHECKIT=$(echo $SURILOG | grep -o 'drop' | wc -l)
|
||||||
|
|
||||||
|
if [ $CHECKIT == 2 ]; then
|
||||||
|
declare RESULT=($SURILOG)
|
||||||
|
|
||||||
|
CURRENTDROP=${RESULT[4]}
|
||||||
|
PASTDROP=${RESULT[14]}
|
||||||
|
DROPPED=$(($CURRENTDROP - $PASTDROP))
|
||||||
|
if [ $DROPPED == 0 ]; then
|
||||||
|
LOSS=0
|
||||||
|
echo "suridrop drop=0"
|
||||||
|
else
|
||||||
|
CURRENTPACKETS=${RESULT[9]}
|
||||||
|
PASTPACKETS=${RESULT[19]}
|
||||||
|
TOTAL=$(($CURRENTPACKETS - $PASTPACKETS))
|
||||||
|
|
||||||
|
LOSS=$(echo $DROPPED $TOTAL / p | dc)
|
||||||
|
echo "suridrop drop=$LOSS"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "suridrop drop=0"
|
||||||
|
fi
|
||||||
0
salt/common/tools/brostatus.sh
Normal file
0
salt/common/tools/brostatus.sh
Normal file
57
salt/common/tools/so-brologs
Normal file
57
salt/common/tools/so-brologs
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
bro_logs_enabled() {
|
||||||
|
|
||||||
|
echo "brologs:" > /opt/so/saltstack/pillar/brologs.sls
|
||||||
|
echo " enabled:" >> /opt/so/saltstack/pillar/brologs.sls
|
||||||
|
for BLOG in ${BLOGS[@]}; do
|
||||||
|
echo " - $BLOG" | tr -d '"' >> /opt/so/saltstack/pillar/brologs.sls
|
||||||
|
done
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_master_adv_service_brologs() {
|
||||||
|
|
||||||
|
BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
|
||||||
|
"conn" "Connection Logging" ON \
|
||||||
|
"dce_rpc" "RPC Logs" ON \
|
||||||
|
"dhcp" "DHCP Logs" ON \
|
||||||
|
"dhcpv6" "DHCP IPv6 Logs" ON \
|
||||||
|
"dnp3" "DNP3 Logs" ON \
|
||||||
|
"dns" "DNS Logs" ON \
|
||||||
|
"dpd" "DPD Logs" ON \
|
||||||
|
"files" "Files Logs" ON \
|
||||||
|
"ftp" "FTP Logs" ON \
|
||||||
|
"http" "HTTP Logs" ON \
|
||||||
|
"intel" "Intel Hits Logs" ON \
|
||||||
|
"irc" "IRC Chat Logs" ON \
|
||||||
|
"kerberos" "Kerberos Logs" ON \
|
||||||
|
"modbus" "MODBUS Logs" ON \
|
||||||
|
"mqtt" "MQTT Logs" ON \
|
||||||
|
"notice" "Zeek Notice Logs" ON \
|
||||||
|
"ntlm" "NTLM Logs" ON \
|
||||||
|
"openvpn" "OPENVPN Logs" ON \
|
||||||
|
"pe" "PE Logs" ON \
|
||||||
|
"radius" "Radius Logs" ON \
|
||||||
|
"rfb" "RFB Logs" ON \
|
||||||
|
"rdp" "RDP Logs" ON \
|
||||||
|
"signatures" "Signatures Logs" ON \
|
||||||
|
"sip" "SIP Logs" ON \
|
||||||
|
"smb_files" "SMB Files Logs" ON \
|
||||||
|
"smb_mapping" "SMB Mapping Logs" ON \
|
||||||
|
"smtp" "SMTP Logs" ON \
|
||||||
|
"snmp" "SNMP Logs" ON \
|
||||||
|
"software" "Software Logs" ON \
|
||||||
|
"ssh" "SSH Logs" ON \
|
||||||
|
"ssl" "SSL Logs" ON \
|
||||||
|
"syslog" "Syslog Logs" ON \
|
||||||
|
"telnet" "Telnet Logs" ON \
|
||||||
|
"tunnel" "Tunnel Logs" ON \
|
||||||
|
"weird" "Zeek Weird Logs" ON \
|
||||||
|
"mysql" "MySQL Logs" ON \
|
||||||
|
"socks" "SOCKS Logs" ON \
|
||||||
|
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_master_adv_service_brologs
|
||||||
|
bro_logs_enabled
|
||||||
@@ -257,7 +257,7 @@ so-curator:
|
|||||||
- binds:
|
- binds:
|
||||||
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
|
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
|
||||||
- /opt/so/conf/curator/action/:/etc/curator/action:ro
|
- /opt/so/conf/curator/action/:/etc/curator/action:ro
|
||||||
- /opt/so/log/curator:/var/log/curator
|
- /opt/so/log/curator:/var/log/curator:rw
|
||||||
|
|
||||||
|
|
||||||
# Begin Curator Cron Jobs
|
# Begin Curator Cron Jobs
|
||||||
@@ -315,7 +315,7 @@ so-elastalert:
|
|||||||
- user: elastalert
|
- user: elastalert
|
||||||
- detach: True
|
- detach: True
|
||||||
- binds:
|
- binds:
|
||||||
- /etc/elastalert/rules/:/etc/elastalert/rules/
|
- /etc/elastalert/rules/:/etc/elastalert/rules/:ro
|
||||||
- /opt/so/log/elastalert:/var/log/elastalert
|
- /opt/so/log/elastalert:/var/log/elastalert:rw
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -30,9 +30,9 @@ filebeat.prospectors:
|
|||||||
paths:
|
paths:
|
||||||
- /suricata/eve.json
|
- /suricata/eve.json
|
||||||
fields:
|
fields:
|
||||||
type: snort
|
type: ids
|
||||||
|
engine: suricata
|
||||||
fields_under_root: true
|
fields_under_root: true
|
||||||
tags: ["ids"]
|
|
||||||
clean_removed: false
|
clean_removed: false
|
||||||
close_removed: false
|
close_removed: false
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Firewall Magic
|
# Firewall Magic for the grid
|
||||||
|
|
||||||
# Keep localhost in the game
|
# Keep localhost in the game
|
||||||
iptables_allow_localhost:
|
iptables_allow_localhost:
|
||||||
@@ -144,6 +144,27 @@ enable_masternode_ES_9300_{{ip}}:
|
|||||||
- position: 1
|
- position: 1
|
||||||
- save: True
|
- save: True
|
||||||
|
|
||||||
|
#enable_masternode_influxdb_8083_{{ip}}:
|
||||||
|
# iptables.insert:
|
||||||
|
# - table: filter
|
||||||
|
# - chain: DOCKER-USER
|
||||||
|
# - jump: ACCEPT
|
||||||
|
# - proto: tcp
|
||||||
|
# - source: {{ ip }}
|
||||||
|
# - dport: 8083
|
||||||
|
# - position: 1
|
||||||
|
# - save: True
|
||||||
|
|
||||||
|
enable_masternode_influxdb_8086_{{ip}}:
|
||||||
|
iptables.insert:
|
||||||
|
- table: filter
|
||||||
|
- chain: DOCKER-USER
|
||||||
|
- jump: ACCEPT
|
||||||
|
- proto: tcp
|
||||||
|
- source: {{ ip }}
|
||||||
|
- dport: 8086
|
||||||
|
- position: 1
|
||||||
|
- save: True
|
||||||
|
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
@@ -194,6 +215,30 @@ enable_salt_minions_3142_{{ip}}:
|
|||||||
- position: 1
|
- position: 1
|
||||||
- save: True
|
- save: True
|
||||||
|
|
||||||
|
# Allow Influx DB access to minions so they can send in stats
|
||||||
|
|
||||||
|
#enable_minion_influxdb_8083_{{ip}}:
|
||||||
|
# iptables.insert:
|
||||||
|
# - table: filter
|
||||||
|
# - chain: DOCKER-USER
|
||||||
|
# - jump: ACCEPT
|
||||||
|
# - proto: tcp
|
||||||
|
# - source: {{ ip }}
|
||||||
|
# - dport: 8083
|
||||||
|
# - position: 1
|
||||||
|
# - save: True
|
||||||
|
|
||||||
|
enable_minions_influxdb_8086_{{ip}}:
|
||||||
|
iptables.insert:
|
||||||
|
- table: filter
|
||||||
|
- chain: DOCKER-USER
|
||||||
|
- jump: ACCEPT
|
||||||
|
- proto: tcp
|
||||||
|
- source: {{ ip }}
|
||||||
|
- dport: 8086
|
||||||
|
- position: 1
|
||||||
|
- save: True
|
||||||
|
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
# Allow Forward Nodes to send their beats traffic
|
# Allow Forward Nodes to send their beats traffic
|
||||||
@@ -279,6 +324,17 @@ enable_standard_analyst_443_{{ip}}:
|
|||||||
- position: 1
|
- position: 1
|
||||||
- save: True
|
- save: True
|
||||||
|
|
||||||
|
enable_standard_analyst_3000_{{ip}}:
|
||||||
|
iptables.insert:
|
||||||
|
- table: filter
|
||||||
|
- chain: DOCKER-USER
|
||||||
|
- jump: ACCEPT
|
||||||
|
- proto: tcp
|
||||||
|
- source: {{ ip }}
|
||||||
|
- dport: 3000
|
||||||
|
- position: 1
|
||||||
|
- save: True
|
||||||
|
|
||||||
#THIS IS TEMPORARY
|
#THIS IS TEMPORARY
|
||||||
enable_standard_analyst_5601_{{ip}}:
|
enable_standard_analyst_5601_{{ip}}:
|
||||||
iptables.insert:
|
iptables.insert:
|
||||||
|
|||||||
@@ -5,7 +5,8 @@
|
|||||||
"settings":{
|
"settings":{
|
||||||
"number_of_replicas":0,
|
"number_of_replicas":0,
|
||||||
"number_of_shards":1,
|
"number_of_shards":1,
|
||||||
"index.refresh_interval":"30s"
|
"index.refresh_interval":"30s",
|
||||||
|
"index.mapping.total_fields.limit": 10000
|
||||||
},
|
},
|
||||||
"mappings":{
|
"mappings":{
|
||||||
"doc":{
|
"doc":{
|
||||||
@@ -206,6 +207,10 @@
|
|||||||
"basic_constraints_path_length":{
|
"basic_constraints_path_length":{
|
||||||
"type":"long"
|
"type":"long"
|
||||||
},
|
},
|
||||||
|
"beat_host":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"bound_port":{
|
"bound_port":{
|
||||||
"type":"long"
|
"type":"long"
|
||||||
},
|
},
|
||||||
@@ -860,6 +865,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"dhcp":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"dir":{
|
"dir":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -884,6 +893,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"dns":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"domain_age":{
|
"domain_age":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -919,6 +932,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"email":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"enabled":{
|
"enabled":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -951,6 +968,14 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"engine":{
|
||||||
|
"type":"text",
|
||||||
|
"fields":{
|
||||||
|
"keyword":{
|
||||||
|
"type":"keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"entry":{
|
"entry":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -1066,6 +1091,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"fileinfo":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"file_ip":{
|
"file_ip":{
|
||||||
"type":"ip",
|
"type":"ip",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -1101,6 +1130,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"flow":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"flow_id":{
|
"flow_id":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -1338,6 +1371,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"http":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"id":{
|
"id":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -1346,6 +1383,14 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"ids_event_type":{
|
||||||
|
"type":"text",
|
||||||
|
"fields":{
|
||||||
|
"keyword":{
|
||||||
|
"type":"keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"iin":{
|
"iin":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -1647,6 +1692,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"krb5":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"last_alert":{
|
"last_alert":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -1826,6 +1875,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"metadata":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"method":{
|
"method":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -1907,6 +1960,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"netflow":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"next_protocol":{
|
"next_protocol":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -2802,6 +2859,14 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"smb":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
|
"smtp":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"software_type":{
|
"software_type":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -3094,6 +3159,10 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"tcp":{
|
||||||
|
"type":"object",
|
||||||
|
"dynamic": true
|
||||||
|
},
|
||||||
"tcp_flags":{
|
"tcp_flags":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -3243,6 +3312,14 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"tx_id":{
|
||||||
|
"type":"text",
|
||||||
|
"fields":{
|
||||||
|
"keyword":{
|
||||||
|
"type":"keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"type":{
|
"type":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields":{
|
"fields":{
|
||||||
@@ -3294,6 +3371,14 @@
|
|||||||
"uri_length":{
|
"uri_length":{
|
||||||
"type":"long"
|
"type":"long"
|
||||||
},
|
},
|
||||||
|
"url":{
|
||||||
|
"type":"text",
|
||||||
|
"fields": {
|
||||||
|
"keyword":{
|
||||||
|
"type":"keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"username":{
|
"username":{
|
||||||
"type":"text",
|
"type":"text",
|
||||||
"fields": {
|
"fields": {
|
||||||
@@ -219,9 +219,13 @@ path.logs: /var/log/logstash
|
|||||||
#
|
#
|
||||||
# Where to find custom plugins
|
# Where to find custom plugins
|
||||||
# path.plugins: []
|
# path.plugins: []
|
||||||
|
{% if grains['role'] == 'so-master' %}
|
||||||
{% set pipeline_workers = salt['pillar.get']('master:ls_pipeline_workers', '1') %}
|
{% set pipeline_workers = salt['pillar.get']('master:ls_pipeline_workers', '1') %}
|
||||||
{% set pipeline_batch = salt['pillar.get']('master:ls_pipeline_batch_size', '125') %}
|
{% set pipeline_batch = salt['pillar.get']('master:ls_pipeline_batch_size', '125') %}
|
||||||
|
{% else %}
|
||||||
|
{% set pipeline_workers = salt['pillar.get']('node:ls_pipeline_workers', '1') %}
|
||||||
|
{% set pipeline_batch = salt['pillar.get']('node:ls_pipeline_batch_size', '125') %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
pipeline.workers: {{ pipeline_workers }}
|
pipeline.workers: {{ pipeline_workers }}
|
||||||
pipeline.batch.size: {{ pipeline_batch }}
|
pipeline.batch.size: {{ pipeline_batch }}
|
||||||
@@ -9,14 +9,14 @@
|
|||||||
# Last Update: 12/9/2016
|
# Last Update: 12/9/2016
|
||||||
|
|
||||||
filter {
|
filter {
|
||||||
if [event_type] == "snort" and "test_data" not in [tags] {
|
if [event_type] == "ids" and "test_data" not in [tags] {
|
||||||
mutate {
|
mutate {
|
||||||
##add_tag => [ "conf_file_9033"]
|
##add_tag => [ "conf_file_9033"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
output {
|
output {
|
||||||
if [event_type] == "snort" and "test_data" not in [tags] {
|
if [event_type] == "ids" and "test_data" not in [tags] {
|
||||||
#stdout { codec => rubydebug }
|
#stdout { codec => rubydebug }
|
||||||
elasticsearch {
|
elasticsearch {
|
||||||
hosts => "{{ ES }}"
|
hosts => "{{ ES }}"
|
||||||
|
|||||||
@@ -21,13 +21,11 @@
|
|||||||
|
|
||||||
{% elif grains['role'] == 'so-node' %}
|
{% elif grains['role'] == 'so-node' %}
|
||||||
{% set lsheap = salt['pillar.get']('node:lsheap', '') %}
|
{% set lsheap = salt['pillar.get']('node:lsheap', '') %}
|
||||||
{% set lsaccessip = salt['pillar.get']('node:lsaccessip', '') %}
|
|
||||||
{% set nodetype = salt['pillar.get']('node:node_type', 'storage') %}
|
{% set nodetype = salt['pillar.get']('node:node_type', 'storage') %}
|
||||||
|
|
||||||
{% elif grains['role'] == 'so-master' %}
|
{% elif grains['role'] == 'so-master' %}
|
||||||
|
|
||||||
{% set lsheap = salt['pillar.get']('master:lsheap', '') %}
|
{% set lsheap = salt['pillar.get']('master:lsheap', '') %}
|
||||||
{% set lsaccessip = salt['pillar.get']('master:lsaccessip', '') %}
|
|
||||||
{% set freq = salt['pillar.get']('master:freq', '0') %}
|
{% set freq = salt['pillar.get']('master:freq', '0') %}
|
||||||
{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
|
{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
|
||||||
{% set nodetype = salt['grains.get']('role', '') %}
|
{% set nodetype = salt['grains.get']('role', '') %}
|
||||||
@@ -35,7 +33,6 @@
|
|||||||
{% elif grains['role'] == 'so-eval' %}
|
{% elif grains['role'] == 'so-eval' %}
|
||||||
|
|
||||||
{% set lsheap = salt['pillar.get']('master:lsheap', '') %}
|
{% set lsheap = salt['pillar.get']('master:lsheap', '') %}
|
||||||
{% set lsaccessip = salt['pillar.get']('master:lsaccessip', '') %}
|
|
||||||
{% set freq = salt['pillar.get']('master:freq', '0') %}
|
{% set freq = salt['pillar.get']('master:freq', '0') %}
|
||||||
{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
|
{% set dstats = salt['pillar.get']('master:domainstats', '0') %}
|
||||||
{% set nodetype = salt['grains.get']('role', '') %}
|
{% set nodetype = salt['grains.get']('role', '') %}
|
||||||
@@ -63,6 +60,20 @@ lscustdir:
|
|||||||
- group: 939
|
- group: 939
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
|
lsdyndir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/logstash/dynamic
|
||||||
|
- user: 931
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
lsetcdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/logstash/etc
|
||||||
|
- user: 931
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
lscustparserdir:
|
lscustparserdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /opt/so/conf/logstash/custom/parsers
|
- name: /opt/so/conf/logstash/custom/parsers
|
||||||
@@ -78,14 +89,29 @@ lscusttemplatedir:
|
|||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
# Copy down all the configs including custom - TODO add watch restart
|
# Copy down all the configs including custom - TODO add watch restart
|
||||||
lssync:
|
lsetcsync:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /opt/so/conf/logstash
|
- name: /opt/so/conf/logstash/etc
|
||||||
- source: salt://logstash/files
|
- source: salt://logstash/etc
|
||||||
- user: 931
|
- user: 931
|
||||||
- group: 939
|
- group: 939
|
||||||
- template: jinja
|
- template: jinja
|
||||||
|
|
||||||
|
lssync:
|
||||||
|
file.recurse:
|
||||||
|
- name: /opt/so/conf/logstash/dynamic
|
||||||
|
- source: salt://logstash/files/dynamic
|
||||||
|
- user: 931
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
|
||||||
|
lscustsync:
|
||||||
|
file.recurse:
|
||||||
|
- name: /opt/so/conf/logstash/custom
|
||||||
|
- source: salt://logstash/files/custom
|
||||||
|
- user: 931
|
||||||
|
- group: 939
|
||||||
|
|
||||||
# Copy the config file for enabled logstash plugins/parsers
|
# Copy the config file for enabled logstash plugins/parsers
|
||||||
lsconfsync:
|
lsconfsync:
|
||||||
file.managed:
|
file.managed:
|
||||||
@@ -123,7 +149,7 @@ lslogdir:
|
|||||||
|
|
||||||
so-logstash:
|
so-logstash:
|
||||||
docker_container.running:
|
docker_container.running:
|
||||||
- image: soshybridhunter/so-logstash:HH1.0.3
|
- image: soshybridhunter/so-logstash:HH1.0.4
|
||||||
- hostname: so-logstash
|
- hostname: so-logstash
|
||||||
- name: so-logstash
|
- name: so-logstash
|
||||||
- user: logstash
|
- user: logstash
|
||||||
@@ -138,11 +164,11 @@ so-logstash:
|
|||||||
- 0.0.0.0:6053:6053
|
- 0.0.0.0:6053:6053
|
||||||
- 0.0.0.0:9600:9600
|
- 0.0.0.0:9600:9600
|
||||||
- binds:
|
- binds:
|
||||||
- /opt/so/conf/logstash/log4j2.properties:/usr/share/logstash/config/log4j2.properties:ro
|
- /opt/so/conf/logstash/etc/log4j2.properties:/usr/share/logstash/config/log4j2.properties:ro
|
||||||
- /opt/so/conf/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
|
- /opt/so/conf/logstash/etc/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
|
||||||
- /opt/so/conf/logstash/logstash-template.json:/logstash-template.json:ro
|
- /opt/so/conf/logstash/etc/logstash-template.json:/logstash-template.json:ro
|
||||||
- /opt/so/conf/logstash/logstash-ossec-template.json:/logstash-ossec-template.json:ro
|
- /opt/so/conf/logstash/etc/logstash-ossec-template.json:/logstash-ossec-template.json:ro
|
||||||
- /opt/so/conf/logstash/beats-template.json:/beats-template.json:ro
|
- /opt/so/conf/logstash/etc/beats-template.json:/beats-template.json:ro
|
||||||
- /opt/so/conf/logstash/custom:/usr/share/logstash/pipeline.custom:ro
|
- /opt/so/conf/logstash/custom:/usr/share/logstash/pipeline.custom:ro
|
||||||
- /opt/so/conf/logstash/rulesets:/usr/share/logstash/rulesets:ro
|
- /opt/so/conf/logstash/rulesets:/usr/share/logstash/rulesets:ro
|
||||||
- /opt/so/conf/logstash/dynamic:/usr/share/logstash/pipeline.dynamic
|
- /opt/so/conf/logstash/dynamic:/usr/share/logstash/pipeline.dynamic
|
||||||
@@ -160,4 +186,4 @@ so-logstash:
|
|||||||
- /opt/so/log/suricata:/suricata:ro
|
- /opt/so/log/suricata:/suricata:ro
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
- watch:
|
- watch:
|
||||||
- file: /opt/so/conf/logstash
|
- file: /opt/so/conf/logstash/etc
|
||||||
|
|||||||
@@ -57,6 +57,20 @@ pcapdir:
|
|||||||
- group: 941
|
- group: 941
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
|
pcaptmpdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/pcaptmp
|
||||||
|
- user: 941
|
||||||
|
- group: 941
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
pcapoutdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/pcapout
|
||||||
|
- user: 941
|
||||||
|
- group: 941
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
pcapindexdir:
|
pcapindexdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /nsm/pcapindex
|
- name: /nsm/pcapindex
|
||||||
@@ -83,5 +97,6 @@ so-steno:
|
|||||||
- /opt/so/conf/steno/config:/etc/stenographer/config:rw
|
- /opt/so/conf/steno/config:/etc/stenographer/config:rw
|
||||||
- /nsm/pcap:/nsm/pcap:rw
|
- /nsm/pcap:/nsm/pcap:rw
|
||||||
- /nsm/pcapindex:/nsm/pcapindex:rw
|
- /nsm/pcapindex:/nsm/pcapindex:rw
|
||||||
- /tmp:/tmp:rw
|
- /nsm/pcaptmp:/tmp:rw
|
||||||
|
- /nsm/pcapout:/nsm/pcapout:rw
|
||||||
- /opt/so/log/stenographer:/var/log/stenographer:rw
|
- /opt/so/log/stenographer:/var/log/stenographer:rw
|
||||||
|
|||||||
5
salt/schedule.sls
Normal file
5
salt/schedule.sls
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
schedule:
|
||||||
|
schedule.present:
|
||||||
|
- function: state.highstate
|
||||||
|
- minutes: 15
|
||||||
|
- maxrunning: 1
|
||||||
@@ -16,6 +16,20 @@ m2cryptopkgs:
|
|||||||
- python-m2crypto
|
- python-m2crypto
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
# Create a cert for the talking to influxdb
|
||||||
|
/etc/pki/influxdb.crt:
|
||||||
|
x509.certificate_managed:
|
||||||
|
- ca_server: {{ master }}
|
||||||
|
- signing_policy: influxdb
|
||||||
|
- public_key: /etc/pki/influxdb.key
|
||||||
|
- CN: {{ master }}
|
||||||
|
- days_remaining: 3000
|
||||||
|
- backup: True
|
||||||
|
- managed_private_key:
|
||||||
|
name: /etc/pki/influxdb.key
|
||||||
|
bits: 4096
|
||||||
|
backup: True
|
||||||
|
|
||||||
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
|
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
|
||||||
|
|
||||||
# Request a cert and drop it where it needs to go to be distributed
|
# Request a cert and drop it where it needs to go to be distributed
|
||||||
|
|||||||
@@ -1,6 +1,11 @@
|
|||||||
%YAML 1.1
|
%YAML 1.1
|
||||||
---
|
---
|
||||||
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
|
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
|
||||||
|
{%- if grains['role'] == 'so-eval' %}
|
||||||
|
{%- set MTU = 1500 %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
|
||||||
|
{%- endif %}
|
||||||
{%- if salt['pillar.get']('sensor:homenet') %}
|
{%- if salt['pillar.get']('sensor:homenet') %}
|
||||||
{%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
|
{%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
|
||||||
{%- else %}
|
{%- else %}
|
||||||
@@ -77,7 +82,7 @@ stats:
|
|||||||
enabled: yes
|
enabled: yes
|
||||||
# The interval field (in seconds) controls at what interval
|
# The interval field (in seconds) controls at what interval
|
||||||
# the loggers are invoked.
|
# the loggers are invoked.
|
||||||
interval: 8
|
interval: 30
|
||||||
|
|
||||||
# Configure the type of alert (and other) logging you would like.
|
# Configure the type of alert (and other) logging you would like.
|
||||||
outputs:
|
outputs:
|
||||||
@@ -931,7 +936,7 @@ host-mode: auto
|
|||||||
# Preallocated size for packet. Default is 1514 which is the classical
|
# Preallocated size for packet. Default is 1514 which is the classical
|
||||||
# size for pcap on ethernet. You should adjust this value to the highest
|
# size for pcap on ethernet. You should adjust this value to the highest
|
||||||
# packet size (MTU + hardware header) on your system.
|
# packet size (MTU + hardware header) on your system.
|
||||||
#default-packet-size: 1514
|
default-packet-size: {{ MTU + 15 }}
|
||||||
|
|
||||||
# Unix command socket can be used to pass commands to suricata.
|
# Unix command socket can be used to pass commands to suricata.
|
||||||
# An external tool can then connect to get information from suricata
|
# An external tool can then connect to get information from suricata
|
||||||
@@ -1339,7 +1344,7 @@ threading:
|
|||||||
# detect-thread-ratio variable:
|
# detect-thread-ratio variable:
|
||||||
threads: {{ salt['pillar.get']('sensor:suriprocs') }}
|
threads: {{ salt['pillar.get']('sensor:suriprocs') }}
|
||||||
prio:
|
prio:
|
||||||
default: "medium"
|
default: "high"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{%- if salt['pillar.get']('sensor:suripins') %}
|
{%- if salt['pillar.get']('sensor:suripins') %}
|
||||||
|
|||||||
@@ -1,6 +1,11 @@
|
|||||||
%YAML 1.1
|
%YAML 1.1
|
||||||
---
|
---
|
||||||
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
|
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
|
||||||
|
{%- if grains['role'] == 'so-eval' %}
|
||||||
|
{%- set MTU = 1500 %}
|
||||||
|
{%- else %}
|
||||||
|
{%- set MTU = salt['pillar.get']('sensor:mtu', '1500') %}
|
||||||
|
{%- endif %}
|
||||||
{%- if salt['pillar.get']('sensor:homenet') %}
|
{%- if salt['pillar.get']('sensor:homenet') %}
|
||||||
{%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
|
{%- set homenet = salt['pillar.get']('sensor:hnsensor', '') %}
|
||||||
{%- else %}
|
{%- else %}
|
||||||
|
|||||||
11
salt/top.sls
11
salt/top.sls
@@ -1,6 +1,7 @@
|
|||||||
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
|
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
|
||||||
base:
|
base:
|
||||||
'G@role:so-sensor':
|
'G@role:so-sensor':
|
||||||
|
- ca
|
||||||
- ssl
|
- ssl
|
||||||
- common
|
- common
|
||||||
- firewall
|
- firewall
|
||||||
@@ -10,6 +11,7 @@ base:
|
|||||||
- bro
|
- bro
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
- filebeat
|
- filebeat
|
||||||
|
- schedule
|
||||||
|
|
||||||
'G@role:so-eval':
|
'G@role:so-eval':
|
||||||
- ca
|
- ca
|
||||||
@@ -26,6 +28,7 @@ base:
|
|||||||
- suricata
|
- suricata
|
||||||
- bro
|
- bro
|
||||||
- utility
|
- utility
|
||||||
|
- schedule
|
||||||
|
|
||||||
|
|
||||||
'G@role:so-master':
|
'G@role:so-master':
|
||||||
@@ -40,6 +43,7 @@ base:
|
|||||||
- logstash
|
- logstash
|
||||||
- kibana
|
- kibana
|
||||||
- utility
|
- utility
|
||||||
|
- schedule
|
||||||
|
|
||||||
# Storage node logic
|
# Storage node logic
|
||||||
|
|
||||||
@@ -48,6 +52,7 @@ base:
|
|||||||
- common
|
- common
|
||||||
- firewall
|
- firewall
|
||||||
- logstash
|
- logstash
|
||||||
|
- schedule
|
||||||
|
|
||||||
'G@role:so-node and I@node:node_type:hot':
|
'G@role:so-node and I@node:node_type:hot':
|
||||||
- match: pillar
|
- match: pillar
|
||||||
@@ -55,22 +60,28 @@ base:
|
|||||||
- firewall
|
- firewall
|
||||||
- logstash
|
- logstash
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
|
- schedule
|
||||||
|
|
||||||
'G@role:so-node and I@node:node_type:warm':
|
'G@role:so-node and I@node:node_type:warm':
|
||||||
- match: pillar
|
- match: pillar
|
||||||
- common
|
- common
|
||||||
- firewall
|
- firewall
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
|
- schedule
|
||||||
|
|
||||||
'G@role:so-node and I@node:node_type:storage':
|
'G@role:so-node and I@node:node_type:storage':
|
||||||
- match: compound
|
- match: compound
|
||||||
|
- ca
|
||||||
|
- ssl
|
||||||
- common
|
- common
|
||||||
- firewall
|
- firewall
|
||||||
- logstash
|
- logstash
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
|
- schedule
|
||||||
|
|
||||||
'G@role:mastersensor':
|
'G@role:mastersensor':
|
||||||
- common
|
- common
|
||||||
- firewall
|
- firewall
|
||||||
- sensor
|
- sensor
|
||||||
- master
|
- master
|
||||||
|
- schedule
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ echo "Applying cross cluster search config..."
|
|||||||
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MASTER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
|
-d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MASTER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
|
||||||
|
|
||||||
# Add all the storage nodes to cross cluster searching.
|
# Add all the storage nodes to cross cluster searching.
|
||||||
{%- for SN, SNIP in salt['pillar.get']('nodestab', {}).iteritems() %}}
|
|
||||||
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNIP['ip'] }}:9300"]}}}}}'
|
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
|
||||||
|
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}'
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ TOTAL_MEM=`grep MemTotal /proc/meminfo | awk '{print $2}' | sed -r 's/.{3}$//'`
|
|||||||
NICS=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
|
NICS=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
|
||||||
CPUCORES=$(cat /proc/cpuinfo | grep processor | wc -l)
|
CPUCORES=$(cat /proc/cpuinfo | grep processor | wc -l)
|
||||||
LISTCORES=$(cat /proc/cpuinfo | grep processor | awk '{print $3 " \"" "core" "\""}')
|
LISTCORES=$(cat /proc/cpuinfo | grep processor | awk '{print $3 " \"" "core" "\""}')
|
||||||
|
RANDOMUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1)
|
||||||
|
|
||||||
# End Global Variable Section
|
# End Global Variable Section
|
||||||
|
|
||||||
@@ -73,6 +74,58 @@ add_socore_user_notmaster() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Enable Bro Logs
|
||||||
|
bro_logs_enabled() {
|
||||||
|
|
||||||
|
echo "brologs:" > pillar/brologs.sls
|
||||||
|
echo " enabled:" >> pillar/brologs.sls
|
||||||
|
|
||||||
|
if [ $MASTERADV == 'ADVANCED' ]; then
|
||||||
|
for BLOG in ${BLOGS[@]}; do
|
||||||
|
echo " - $BLOG" | tr -d '"' >> pillar/brologs.sls
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo " - conn" >> pillar/brologs.sls
|
||||||
|
echo " - dce_rpc" >> pillar/brologs.sls
|
||||||
|
echo " - dhcp" >> pillar/brologs.sls
|
||||||
|
echo " - dhcpv6" >> pillar/brologs.sls
|
||||||
|
echo " - dnp3" >> pillar/brologs.sls
|
||||||
|
echo " - dns" >> pillar/brologs.sls
|
||||||
|
echo " - dpd" >> pillar/brologs.sls
|
||||||
|
echo " - files" >> pillar/brologs.sls
|
||||||
|
echo " - ftp" >> pillar/brologs.sls
|
||||||
|
echo " - http" >> pillar/brologs.sls
|
||||||
|
echo " - intel" >> pillar/brologs.sls
|
||||||
|
echo " - irc" >> pillar/brologs.sls
|
||||||
|
echo " - kerberos" >> pillar/brologs.sls
|
||||||
|
echo " - modbus" >> pillar/brologs.sls
|
||||||
|
echo " - mqtt" >> pillar/brologs.sls
|
||||||
|
echo " - notice" >> pillar/brologs.sls
|
||||||
|
echo " - ntlm" >> pillar/brologs.sls
|
||||||
|
echo " - openvpn" >> pillar/brologs.sls
|
||||||
|
echo " - pe" >> pillar/brologs.sls
|
||||||
|
echo " - radius" >> pillar/brologs.sls
|
||||||
|
echo " - rfb" >> pillar/brologs.sls
|
||||||
|
echo " - rdp" >> pillar/brologs.sls
|
||||||
|
echo " - signatures" >> pillar/brologs.sls
|
||||||
|
echo " - sip" >> pillar/brologs.sls
|
||||||
|
echo " - smb_files" >> pillar/brologs.sls
|
||||||
|
echo " - smb_mapping" >> pillar/brologs.sls
|
||||||
|
echo " - smtp" >> pillar/brologs.sls
|
||||||
|
echo " - snmp" >> pillar/brologs.sls
|
||||||
|
echo " - software" >> pillar/brologs.sls
|
||||||
|
echo " - ssh" >> pillar/brologs.sls
|
||||||
|
echo " - ssl" >> pillar/brologs.sls
|
||||||
|
echo " - syslog" >> pillar/brologs.sls
|
||||||
|
echo " - telnet" >> pillar/brologs.sls
|
||||||
|
echo " - tunnel" >> pillar/brologs.sls
|
||||||
|
echo " - weird" >> pillar/brologs.sls
|
||||||
|
echo " - mysql" >> pillar/brologs.sls
|
||||||
|
echo " - socks" >> pillar/brologs.sls
|
||||||
|
echo " - x509" >> pillar/brologs.sls
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
calculate_useable_cores() {
|
calculate_useable_cores() {
|
||||||
|
|
||||||
# Calculate reasonable core usage
|
# Calculate reasonable core usage
|
||||||
@@ -98,6 +151,15 @@ chown_salt_master() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_master() {
|
||||||
|
# Clear out the old master public key in case this is a re-install.
|
||||||
|
# This only happens if you re-install the master.
|
||||||
|
if [ -f /etc/salt/pki/minion/minion_master.pub]; then
|
||||||
|
rm /etc/salt/pki/minion/minion_master.pub
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
configure_minion() {
|
configure_minion() {
|
||||||
|
|
||||||
# You have to pass the TYPE to this function so it knows if its a master or not
|
# You have to pass the TYPE to this function so it knows if its a master or not
|
||||||
@@ -154,6 +216,11 @@ create_bond() {
|
|||||||
# Create the bond interface
|
# Create the bond interface
|
||||||
echo "Setting up Bond"
|
echo "Setting up Bond"
|
||||||
|
|
||||||
|
# Set the MTU
|
||||||
|
if [ $NSMSETUP != 'ADVANCED' ]; then
|
||||||
|
MTU=1500
|
||||||
|
fi
|
||||||
|
|
||||||
# Do something different based on the OS
|
# Do something different based on the OS
|
||||||
if [ $OS == 'centos' ]; then
|
if [ $OS == 'centos' ]; then
|
||||||
modprobe --first-time bonding
|
modprobe --first-time bonding
|
||||||
@@ -165,6 +232,7 @@ create_bond() {
|
|||||||
echo "BOOTPROTO=none" >> /etc/sysconfig/network-scripts/ifcfg-bond0
|
echo "BOOTPROTO=none" >> /etc/sysconfig/network-scripts/ifcfg-bond0
|
||||||
echo "BONDING_OPTS=\"mode=0\"" >> /etc/sysconfig/network-scripts/ifcfg-bond0
|
echo "BONDING_OPTS=\"mode=0\"" >> /etc/sysconfig/network-scripts/ifcfg-bond0
|
||||||
echo "ONBOOT=yes" >> /etc/sysconfig/network-scripts/ifcfg-bond0
|
echo "ONBOOT=yes" >> /etc/sysconfig/network-scripts/ifcfg-bond0
|
||||||
|
echo "MTU=$MTU" >> /etc/sysconfig/network-scripts/ifcfg-bond0
|
||||||
|
|
||||||
# Create Bond configs for the selected monitor interface
|
# Create Bond configs for the selected monitor interface
|
||||||
for BNIC in ${BNICS[@]}; do
|
for BNIC in ${BNICS[@]}; do
|
||||||
@@ -173,6 +241,7 @@ create_bond() {
|
|||||||
sed -i 's/ONBOOT=no/ONBOOT=yes/g' /etc/sysconfig/network-scripts/ifcfg-$BONDNIC
|
sed -i 's/ONBOOT=no/ONBOOT=yes/g' /etc/sysconfig/network-scripts/ifcfg-$BONDNIC
|
||||||
echo "MASTER=bond0" >> /etc/sysconfig/network-scripts/ifcfg-$BONDNIC
|
echo "MASTER=bond0" >> /etc/sysconfig/network-scripts/ifcfg-$BONDNIC
|
||||||
echo "SLAVE=yes" >> /etc/sysconfig/network-scripts/ifcfg-$BONDNIC
|
echo "SLAVE=yes" >> /etc/sysconfig/network-scripts/ifcfg-$BONDNIC
|
||||||
|
echo "MTU=$MTU" >> /etc/sysconfig/network-scripts/ifcfg-$BONDNIC
|
||||||
done
|
done
|
||||||
nmcli con reload
|
nmcli con reload
|
||||||
systemctl restart network
|
systemctl restart network
|
||||||
@@ -217,6 +286,7 @@ create_bond() {
|
|||||||
echo " post-up ethtool -G \$IFACE rx 4096; for i in rx tx sg tso ufo gso gro lro; do ethtool -K \$IFACE \$i off; done" >> /etc/network/interfaces.d/$BNIC
|
echo " post-up ethtool -G \$IFACE rx 4096; for i in rx tx sg tso ufo gso gro lro; do ethtool -K \$IFACE \$i off; done" >> /etc/network/interfaces.d/$BNIC
|
||||||
echo " post-up echo 1 > /proc/sys/net/ipv6/conf/\$IFACE/disable_ipv6" >> /etc/network/interfaces.d/$BNIC
|
echo " post-up echo 1 > /proc/sys/net/ipv6/conf/\$IFACE/disable_ipv6" >> /etc/network/interfaces.d/$BNIC
|
||||||
echo " bond-master bond0" >> /etc/network/interfaces.d/$BNIC
|
echo " bond-master bond0" >> /etc/network/interfaces.d/$BNIC
|
||||||
|
echo " mtu $MTU" >> /etc/network/interfaces.d/$BNIC
|
||||||
|
|
||||||
done
|
done
|
||||||
|
|
||||||
@@ -226,6 +296,7 @@ create_bond() {
|
|||||||
echo "iface bond0 inet manual" >> /etc/network/interfaces.d/bond0
|
echo "iface bond0 inet manual" >> /etc/network/interfaces.d/bond0
|
||||||
echo " bond-mode 0" >> /etc/network/interfaces.d/bond0
|
echo " bond-mode 0" >> /etc/network/interfaces.d/bond0
|
||||||
echo " bond-slaves $BN" >> /etc/network/interfaces.d/bond0
|
echo " bond-slaves $BN" >> /etc/network/interfaces.d/bond0
|
||||||
|
echo " mtu $MTU" >> /etc/network/interfaces.d/bond0
|
||||||
echo " up ip link set \$IFACE promisc on arp off up" >> /etc/network/interfaces.d/bond0
|
echo " up ip link set \$IFACE promisc on arp off up" >> /etc/network/interfaces.d/bond0
|
||||||
echo " down ip link set \$IFACE promisc off down" >> /etc/network/interfaces.d/bond0
|
echo " down ip link set \$IFACE promisc off down" >> /etc/network/interfaces.d/bond0
|
||||||
echo " post-up ethtool -G \$IFACE rx 4096; for i in rx tx sg tso ufo gso gro lro; do ethtool -K \$IFACE \$i off; done" >> /etc/network/interfaces.d/bond0
|
echo " post-up ethtool -G \$IFACE rx 4096; for i in rx tx sg tso ufo gso gro lro; do ethtool -K \$IFACE \$i off; done" >> /etc/network/interfaces.d/bond0
|
||||||
@@ -313,11 +384,19 @@ filter_nics() {
|
|||||||
FNICS=$(ip link | grep -vw $MNIC | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
|
FNICS=$(ip link | grep -vw $MNIC | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
|
||||||
|
|
||||||
}
|
}
|
||||||
|
get_filesystem_nsm(){
|
||||||
|
FSNSM=$(df /nsm | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
|
||||||
|
}
|
||||||
|
|
||||||
|
get_filesystem_root(){
|
||||||
|
FSROOT=$(df / | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
|
||||||
|
}
|
||||||
|
|
||||||
get_main_ip() {
|
get_main_ip() {
|
||||||
|
|
||||||
# Get the main IP address the box is using
|
# Get the main IP address the box is using
|
||||||
MAINIP=$(ip route get 1 | awk '{print $NF;exit}')
|
MAINIP=$(ip route get 1 | awk '{print $NF;exit}')
|
||||||
|
MAININT=$(ip route get 1 | awk '{print $5;exit}')
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -383,6 +462,7 @@ master_pillar() {
|
|||||||
touch /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
touch /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
echo "master:" > /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
echo "master:" > /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
echo " mainip: $MAINIP" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
echo " mainip: $MAINIP" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
|
echo " mainint: $MAININT" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
echo " esheap: $ES_HEAP_SIZE" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
echo " esheap: $ES_HEAP_SIZE" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
echo " esclustername: {{ grains.host }}" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
echo " esclustername: {{ grains.host }}" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
if [ $INSTALLTYPE == 'EVALMODE' ]; then
|
if [ $INSTALLTYPE == 'EVALMODE' ]; then
|
||||||
@@ -391,6 +471,7 @@ master_pillar() {
|
|||||||
echo " ls_pipeline_batch_size: 125" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
echo " ls_pipeline_batch_size: 125" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
echo " ls_input_threads: 1" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
echo " ls_input_threads: 1" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
echo " ls_batch_count: 125" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
echo " ls_batch_count: 125" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
|
echo " mtu: 1500" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
else
|
else
|
||||||
echo " freq: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
echo " freq: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
echo " domainstats: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
echo " domainstats: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
|
||||||
@@ -440,6 +521,7 @@ node_pillar() {
|
|||||||
touch $TMP/$HOSTNAME.sls
|
touch $TMP/$HOSTNAME.sls
|
||||||
echo "node:" > $TMP/$HOSTNAME.sls
|
echo "node:" > $TMP/$HOSTNAME.sls
|
||||||
echo " mainip: $MAINIP" >> $TMP/$HOSTNAME.sls
|
echo " mainip: $MAINIP" >> $TMP/$HOSTNAME.sls
|
||||||
|
echo " mainint: $MAININT" >> $TMP/$HOSTNAME.sls
|
||||||
echo " esheap: $NODE_ES_HEAP_SIZE" >> $TMP/$HOSTNAME.sls
|
echo " esheap: $NODE_ES_HEAP_SIZE" >> $TMP/$HOSTNAME.sls
|
||||||
echo " esclustername: {{ grains.host }}" >> $TMP/$HOSTNAME.sls
|
echo " esclustername: {{ grains.host }}" >> $TMP/$HOSTNAME.sls
|
||||||
echo " lsheap: $NODE_LS_HEAP_SIZE" >> $TMP/$HOSTNAME.sls
|
echo " lsheap: $NODE_LS_HEAP_SIZE" >> $TMP/$HOSTNAME.sls
|
||||||
@@ -574,9 +656,8 @@ saltify() {
|
|||||||
salt_checkin() {
|
salt_checkin() {
|
||||||
# Master State to Fix Mine Usage
|
# Master State to Fix Mine Usage
|
||||||
if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
|
if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
|
||||||
salt-call state.apply ca >>~/sosetup.log 2>&1
|
echo "Building Certificate Authority"
|
||||||
# salt-call state.apply ssl >>~/sosetup.log 2>&1
|
salt-call state.apply ca
|
||||||
# salt-call state.apply common >>~/sosetup.log 2>&1
|
|
||||||
echo " *** Restarting Salt to fix any SSL errors. ***"
|
echo " *** Restarting Salt to fix any SSL errors. ***"
|
||||||
service salt-master restart
|
service salt-master restart
|
||||||
sleep 5
|
sleep 5
|
||||||
@@ -592,6 +673,8 @@ salt_checkin() {
|
|||||||
else
|
else
|
||||||
|
|
||||||
# Run Checkin
|
# Run Checkin
|
||||||
|
salt-call state.apply ca
|
||||||
|
salt-call state.apply ssl
|
||||||
salt-call state.highstate
|
salt-call state.highstate
|
||||||
|
|
||||||
fi
|
fi
|
||||||
@@ -638,6 +721,7 @@ sensor_pillar() {
|
|||||||
echo "sensor:" > $TMP/$HOSTNAME.sls
|
echo "sensor:" > $TMP/$HOSTNAME.sls
|
||||||
echo " interface: bond0" >> $TMP/$HOSTNAME.sls
|
echo " interface: bond0" >> $TMP/$HOSTNAME.sls
|
||||||
echo " mainip: $MAINIP" >> $TMP/$HOSTNAME.sls
|
echo " mainip: $MAINIP" >> $TMP/$HOSTNAME.sls
|
||||||
|
echo " mainint: $MAININT" >> $TMP/$HOSTNAME.sls
|
||||||
if [ $NSMSETUP == 'ADVANCED' ]; then
|
if [ $NSMSETUP == 'ADVANCED' ]; then
|
||||||
echo " bro_pins:" >> $TMP/$HOSTNAME.sls
|
echo " bro_pins:" >> $TMP/$HOSTNAME.sls
|
||||||
for PIN in $BROPINS; do
|
for PIN in $BROPINS; do
|
||||||
@@ -657,6 +741,7 @@ sensor_pillar() {
|
|||||||
echo " pcapbpf:" >> $TMP/$HOSTNAME.sls
|
echo " pcapbpf:" >> $TMP/$HOSTNAME.sls
|
||||||
echo " nidsbpf:" >> $TMP/$HOSTNAME.sls
|
echo " nidsbpf:" >> $TMP/$HOSTNAME.sls
|
||||||
echo " master: $MSRV" >> $TMP/$HOSTNAME.sls
|
echo " master: $MSRV" >> $TMP/$HOSTNAME.sls
|
||||||
|
echo " mtu: $MTU" >> $TMP/$HOSTNAME.sls
|
||||||
if [ $HNSENSOR != 'inherit' ]; then
|
if [ $HNSENSOR != 'inherit' ]; then
|
||||||
echo " hnsensor: $HNSENSOR" >> $TMP/$HOSTNAME.sls
|
echo " hnsensor: $HNSENSOR" >> $TMP/$HOSTNAME.sls
|
||||||
fi
|
fi
|
||||||
@@ -671,6 +756,7 @@ set_initial_firewall_policy() {
|
|||||||
if [ $INSTALLTYPE == 'MASTERONLY' ]; then
|
if [ $INSTALLTYPE == 'MASTERONLY' ]; then
|
||||||
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/minions.sls
|
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/minions.sls
|
||||||
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
|
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
|
||||||
|
/opt/so/saltstack/pillar/data/addtotab.sh mastertab $HOSTNAME $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $INSTALLTYPE == 'EVALMODE' ]; then
|
if [ $INSTALLTYPE == 'EVALMODE' ]; then
|
||||||
@@ -678,17 +764,19 @@ set_initial_firewall_policy() {
|
|||||||
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
|
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/masterfw.sls
|
||||||
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/forward_nodes.sls
|
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/forward_nodes.sls
|
||||||
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/storage_nodes.sls
|
printf " - $MAINIP\n" >> /opt/so/saltstack/pillar/firewall/storage_nodes.sls
|
||||||
|
/opt/so/saltstack/pillar/data/addtotab.sh evaltab $HOSTNAME $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $INSTALLTYPE == 'SENSORONLY' ]; then
|
if [ $INSTALLTYPE == 'SENSORONLY' ]; then
|
||||||
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
|
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
|
||||||
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes $MAINIP
|
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh forward_nodes $MAINIP
|
||||||
|
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh sensorstab $HOSTNAME $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM bond0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $INSTALLTYPE == 'STORAGENODE' ]; then
|
if [ $INSTALLTYPE == 'STORAGENODE' ]; then
|
||||||
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
|
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh minions $MAINIP
|
||||||
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh storage_nodes $MAINIP
|
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/firewall/addfirewall.sh storage_nodes $MAINIP
|
||||||
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $HOSTNAME $MAINIP
|
ssh -i /root/.ssh/so.key socore@$MSRV sudo /opt/so/saltstack/pillar/data/addtotab.sh nodestab $HOSTNAME $MAINIP $CPUCORES $RANDOMUID $MAININT $FSROOT $FSNSM
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
|
if [ $INSTALLTYPE == 'PARSINGNODE' ]; then
|
||||||
@@ -790,8 +878,8 @@ whiptail_bro_pins() {
|
|||||||
|
|
||||||
whiptail_bro_version() {
|
whiptail_bro_version() {
|
||||||
|
|
||||||
BROVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate meta data?" 20 78 4 "COMMUNITY" "Install Community Bro" ON \
|
BROVERSION=$(whiptail --title "Security Onion Setup" --radiolist "What tool would you like to use to generate meta data?" 20 78 4 "ZEEK" "Install Zeek (aka Bro)" ON \
|
||||||
"ZEEK" "Install Zeek" OFF "SURICATA" "SUPER EXPERIMENTAL" OFF 3>&1 1>&2 2>&3)
|
"COMMUNITY" "Install Community NSM" OFF "SURICATA" "SUPER EXPERIMENTAL" OFF 3>&1 1>&2 2>&3)
|
||||||
|
|
||||||
local exitstatus=$?
|
local exitstatus=$?
|
||||||
whiptail_check_exitstatus $exitstatus
|
whiptail_check_exitstatus $exitstatus
|
||||||
@@ -865,14 +953,17 @@ whiptail_install_type() {
|
|||||||
|
|
||||||
# What kind of install are we doing?
|
# What kind of install are we doing?
|
||||||
INSTALLTYPE=$(whiptail --title "Security Onion Setup" --radiolist \
|
INSTALLTYPE=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||||
"Choose Install Type:" 20 78 8 \
|
"Choose Install Type:" 20 78 14 \
|
||||||
"SENSORONLY" "Create a forward only sensor" ON \
|
"SENSORONLY" "Create a forward only sensor" ON \
|
||||||
"STORAGENODE" "Add a Storage Hot Node with parsing" OFF \
|
"STORAGENODE" "Add a Storage Hot Node with parsing" OFF \
|
||||||
"MASTERONLY" "Start a new grid" OFF \
|
"MASTERONLY" "Start a new grid" OFF \
|
||||||
"PARSINGNODE" "TODO Add a dedicated Parsing Node" OFF \
|
"PARSINGNODE" "TODO Add a dedicated Parsing Node" OFF \
|
||||||
"HOTNODE" "TODO Add a Hot Node (Storage Node without Parsing)" OFF \
|
"HOTNODE" "TODO Add a Hot Node (Storage Node without Parsing)" OFF \
|
||||||
"WARMNODE" "TODO Add a Warm Node to an existing Hot or Storage node" OFF \
|
"WARMNODE" "TODO Add a Warm Node to an existing Hot or Storage node" OFF \
|
||||||
"EVALMODE" "Evaluate all the things" OFF 3>&1 1>&2 2>&3 )
|
"EVALMODE" "Evaluate all the things" OFF \
|
||||||
|
"WAZUH" "TODO Stand Alone Wazuh Node" OFF \
|
||||||
|
"STRELKA" "TODO Stand Alone Strelka Node" OFF \
|
||||||
|
"FLEET" "TODO Stand Alone Fleet OSQuery Node" OFF 3>&1 1>&2 2>&3 )
|
||||||
|
|
||||||
local exitstatus=$?
|
local exitstatus=$?
|
||||||
whiptail_check_exitstatus $exitstatus
|
whiptail_check_exitstatus $exitstatus
|
||||||
@@ -937,6 +1028,75 @@ whiptail_management_server() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Ask if you want to do advanced setup of the Master
|
||||||
|
whiptail_master_adv() {
|
||||||
|
MASTERADV=$(whiptail --title "Security Onion Setup" --radiolist \
|
||||||
|
"Choose what type of master install:" 20 78 4 \
|
||||||
|
"BASIC" "Install master with recommended settings" ON \
|
||||||
|
"ADVANCED" "Do additional configuration to the master" OFF 3>&1 1>&2 2>&3 )
|
||||||
|
}
|
||||||
|
|
||||||
|
# Ask which additional components to install
|
||||||
|
whiptail_master_adv_service_brologs() {
|
||||||
|
|
||||||
|
BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
|
||||||
|
"conn" "Connection Logging" ON \
|
||||||
|
"dce_rpc" "RPC Logs" ON \
|
||||||
|
"dhcp" "DHCP Logs" ON \
|
||||||
|
"dhcpv6" "DHCP IPv6 Logs" ON \
|
||||||
|
"dnp3" "DNP3 Logs" ON \
|
||||||
|
"dns" "DNS Logs" ON \
|
||||||
|
"dpd" "DPD Logs" ON \
|
||||||
|
"files" "Files Logs" ON \
|
||||||
|
"ftp" "FTP Logs" ON \
|
||||||
|
"http" "HTTP Logs" ON \
|
||||||
|
"intel" "Intel Hits Logs" ON \
|
||||||
|
"irc" "IRC Chat Logs" ON \
|
||||||
|
"kerberos" "Kerberos Logs" ON \
|
||||||
|
"modbus" "MODBUS Logs" ON \
|
||||||
|
"mqtt" "MQTT Logs" ON \
|
||||||
|
"notice" "Zeek Notice Logs" ON \
|
||||||
|
"ntlm" "NTLM Logs" ON \
|
||||||
|
"openvpn" "OPENVPN Logs" ON \
|
||||||
|
"pe" "PE Logs" ON \
|
||||||
|
"radius" "Radius Logs" ON \
|
||||||
|
"rfb" "RFB Logs" ON \
|
||||||
|
"rdp" "RDP Logs" ON \
|
||||||
|
"signatures" "Signatures Logs" ON \
|
||||||
|
"sip" "SIP Logs" ON \
|
||||||
|
"smb_files" "SMB Files Logs" ON \
|
||||||
|
"smb_mapping" "SMB Mapping Logs" ON \
|
||||||
|
"smtp" "SMTP Logs" ON \
|
||||||
|
"snmp" "SNMP Logs" ON \
|
||||||
|
"software" "Software Logs" ON \
|
||||||
|
"ssh" "SSH Logs" ON \
|
||||||
|
"ssl" "SSL Logs" ON \
|
||||||
|
"syslog" "Syslog Logs" ON \
|
||||||
|
"telnet" "Telnet Logs" ON \
|
||||||
|
"tunnel" "Tunnel Logs" ON \
|
||||||
|
"weird" "Zeek Weird Logs" ON \
|
||||||
|
"mysql" "MySQL Logs" ON \
|
||||||
|
"socks" "SOCKS Logs" ON \
|
||||||
|
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_master_adv_service_grafana() {
|
||||||
|
echo "blah"
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_master_adv_service_osquery() {
|
||||||
|
#MOSQ=$()
|
||||||
|
echo "blah"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
whiptail_master_adv_service_wazuh() {
|
||||||
|
echo "blah"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
whiptail_network_notice() {
|
whiptail_network_notice() {
|
||||||
|
|
||||||
whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Hit YES to continue." 8 78
|
whiptail --title "Security Onion Setup" --yesno "Since this is a network install we assume the management interface, DNS, Hostname, etc are already set up. Hit YES to continue." 8 78
|
||||||
@@ -1147,10 +1307,13 @@ if (whiptail_you_sure); then
|
|||||||
|
|
||||||
if [ $INSTALLTYPE == 'MASTERONLY' ]; then
|
if [ $INSTALLTYPE == 'MASTERONLY' ]; then
|
||||||
|
|
||||||
|
# Would you like to do an advanced install?
|
||||||
|
whiptail_master_adv
|
||||||
|
|
||||||
# Pick the Management NIC
|
# Pick the Management NIC
|
||||||
whiptail_management_nic
|
whiptail_management_nic
|
||||||
|
|
||||||
# Choose Zeek or Community Bro
|
# Choose Zeek or Community NSM
|
||||||
whiptail_bro_version
|
whiptail_bro_version
|
||||||
|
|
||||||
# Select Snort or Suricata
|
# Select Snort or Suricata
|
||||||
@@ -1171,8 +1334,25 @@ if (whiptail_you_sure); then
|
|||||||
# Find out how to handle updates
|
# Find out how to handle updates
|
||||||
whiptail_master_updates
|
whiptail_master_updates
|
||||||
|
|
||||||
|
# Do Advacned Setup if they chose it
|
||||||
|
if [ $MASTERADV == 'ADVANCED' ]; then
|
||||||
|
# Ask which bro logs to enable - Need to add Suricata check
|
||||||
|
if [ $BROVERSION != 'SURICATA' ]; then
|
||||||
|
whiptail_master_adv_service_brologs
|
||||||
|
fi
|
||||||
|
whiptail_master_adv_service_osquery
|
||||||
|
whiptail_master_adv_service_grafana
|
||||||
|
whiptail_master_adv_service_wazuh
|
||||||
|
fi
|
||||||
|
|
||||||
# Last Chance to back out
|
# Last Chance to back out
|
||||||
whiptail_make_changes
|
whiptail_make_changes
|
||||||
|
clear_master
|
||||||
|
mkdir -p /nsm
|
||||||
|
get_filesystem_root
|
||||||
|
get_filesystem_nsm
|
||||||
|
# Enable Bro Logs
|
||||||
|
bro_logs_enabled
|
||||||
|
|
||||||
# Figure out the main IP address
|
# Figure out the main IP address
|
||||||
get_main_ip
|
get_main_ip
|
||||||
@@ -1265,6 +1445,10 @@ if (whiptail_you_sure); then
|
|||||||
whiptail_basic_suri
|
whiptail_basic_suri
|
||||||
fi
|
fi
|
||||||
whiptail_make_changes
|
whiptail_make_changes
|
||||||
|
clear_master
|
||||||
|
mkdir -p /nsm
|
||||||
|
get_filesystem_root
|
||||||
|
get_filesystem_nsm
|
||||||
copy_ssh_key
|
copy_ssh_key
|
||||||
set_initial_firewall_policy
|
set_initial_firewall_policy
|
||||||
sensor_pillar
|
sensor_pillar
|
||||||
@@ -1314,8 +1498,12 @@ if (whiptail_you_sure); then
|
|||||||
RULESETUP=ETOPEN
|
RULESETUP=ETOPEN
|
||||||
NSMSETUP=BASIC
|
NSMSETUP=BASIC
|
||||||
NIDS=Suricata
|
NIDS=Suricata
|
||||||
BROVERSION=COMMUNITY
|
BROVERSION=ZEEK
|
||||||
whiptail_make_changes
|
whiptail_make_changes
|
||||||
|
clear_master
|
||||||
|
mkdir -p /nsm
|
||||||
|
get_filesystem_root
|
||||||
|
get_filesystem_nsm
|
||||||
get_main_ip
|
get_main_ip
|
||||||
# Add the user so we can sit back and relax
|
# Add the user so we can sit back and relax
|
||||||
echo ""
|
echo ""
|
||||||
@@ -1369,12 +1557,16 @@ if (whiptail_you_sure); then
|
|||||||
else
|
else
|
||||||
NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE
|
NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE
|
||||||
NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE
|
NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE
|
||||||
LSPIPELINEWORKERS=1
|
LSPIPELINEWORKERS=$CPUCORES
|
||||||
LSPIPELINEBATCH=125
|
LSPIPELINEBATCH=125
|
||||||
LSINPUTTHREADS=1
|
LSINPUTTHREADS=1
|
||||||
LSINPUTBATCHCOUNT=125
|
LSINPUTBATCHCOUNT=125
|
||||||
fi
|
fi
|
||||||
whiptail_make_changes
|
whiptail_make_changes
|
||||||
|
clear_master
|
||||||
|
mkdir -p /nsm
|
||||||
|
get_filesystem_root
|
||||||
|
get_filesystem_nsm
|
||||||
copy_ssh_key
|
copy_ssh_key
|
||||||
set_initial_firewall_policy
|
set_initial_firewall_policy
|
||||||
saltify
|
saltify
|
||||||
|
|||||||
Reference in New Issue
Block a user