Merge pull request #2 from TOoSmOotH/master

Tech Preview 1.0
This commit is contained in:
Mike Reeves
2018-10-18 11:33:52 -04:00
committed by GitHub
130 changed files with 23414 additions and 1 deletions

2
.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
.DS_Store
.idea

View File

@@ -1 +1,3 @@
# securityonion-saltstack # securityonion-saltstack
See the wiki for details

3
exclude-list.txt Normal file
View File

@@ -0,0 +1,3 @@
salt/bro/files/local.bro
salt/bro/files/local.bro.community
salt/suricata/suricata.yaml

59
files/master Normal file
View File

@@ -0,0 +1,59 @@
##### Primary configuration settings #####
##########################################
# This configuration file is used to manage the behavior of the Salt Master.
# Values that are commented out but have an empty line after the comment are
# defaults that do not need to be set in the config. If there is no blank line
# after the comment then the value is presented as an example and is not the
# default.
# The user under which the salt master will run. Salt will update all
# permissions to allow the specified user to run the master. The exception is
# the job cache, which must be deleted if this user is changed. If the
# modified files cause conflicts, set verify_env to False.
# user: socore
##### File Server settings #####
##########################################
# Salt runs a lightweight file server written in zeromq to deliver files to
# minions. This file server is built into the master daemon and does not
# require a dedicated port.
# The file server works on environments passed to the master, each environment
# can have multiple root directories, the subdirectories in the multiple file
# roots cannot match, otherwise the downloaded files will not be able to be
# reliably ensured. A base environment is required to house the top file.
# Example:
# file_roots:
# base:
# - /srv/salt/
# dev:
# - /srv/salt/dev/services
# - /srv/salt/dev/states
# prod:
# - /srv/salt/prod/services
# - /srv/salt/prod/states
#
file_roots:
base:
- /opt/so/saltstack/salt
# The master_roots setting configures a master-only copy of the file_roots dictionary,
# used by the state compiler.
# master_roots: /opt/so/saltstack/salt-master
##### Pillar settings #####
##########################################
# Salt Pillars allow for the building of global data that can be made selectively
# available to different minions based on minion grain filtering. The Salt
# Pillar is laid out in the same fashion as the file server, with environments,
# a top file and sls files. However, pillar data does not need to be in the
# highstate format, and is generally just key/value pairs.
pillar_roots:
base:
- /opt/so/saltstack/pillar
peer:
.*:
- x509.sign_remote_certificate

1
files/utils/so-checkin Normal file
View File

@@ -0,0 +1 @@
sudo salt-call state.highstate

1
files/utils/so-getparsed Normal file
View File

@@ -0,0 +1 @@
sudo docker exec -it so-redis redis-cli llen logstash:unparsed

View File

@@ -0,0 +1 @@
sudo docker exec -it so-redis redis-cli llen logstash:unparsed

1
files/utils/so-listindex Normal file
View File

@@ -0,0 +1 @@
curl -X GET "localhost:9200/_cat/indices?v"

1
files/utils/so-start Normal file
View File

@@ -0,0 +1 @@
sudo salt-call state.highstate

42
pillar/brologs.sls Normal file
View File

@@ -0,0 +1,42 @@
brologs:
enabled:
- conn
- dce_rpc
- dhcp
- dhcpv6
- dnp3
- dns
- dpd
- files
- ftp
- http
- intel
- irc
- kerberos
- modbus
- mqtt
- notice
- ntlm
- openvpn
- pe
- radius
- rfb
- rdp
- signatures
- sip
- smb_files
- smb_mapping
- smtp
- snmp
- software
- ssh
- ssl
- syslog
- telnet
- tunnel
- weird
- mysql
- socks
- x509
disabled:

16
pillar/data/addtotab.sh Normal file
View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
# This script adds sensors/nodes/etc to the nodes tab
TYPE=$1
NAME=$2
IPADDRESS=$3
if grep -q $IPADDRESS "/opt/so/saltstack/pillar/data/nodestab.sls"; then
echo "Storage Node Already in There"
else
echo " $NAME:" >> /opt/so/saltstack/pillar/data/nodestab.sls
echo " ip: $IPADDRESS" >> /opt/so/saltstack/pillar/data/nodestab.sls
salt-call state.apply utility
fi

1
pillar/data/nodestab.sls Normal file
View File

@@ -0,0 +1 @@
nodestab:

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
# This script adds ip addresses to specific rule sets defined by the user
POLICY=$1
IPADDRESS=$2
if grep -q $2 "/opt/so/saltstack/pillar/firewall/$1.sls"; then
echo "Firewall Rule Already There"
else
echo " - $2" >> /opt/so/saltstack/pillar/firewall/$1.sls
salt-call state.apply firewall
fi

View File

@@ -0,0 +1,3 @@
analyst:
- 127.0.0.1

View File

@@ -0,0 +1,3 @@
beats_endpoint:
- 127.0.0.1

View File

@@ -0,0 +1,3 @@
forward_nodes:
- 127.0.0.1

View File

@@ -0,0 +1,2 @@
masterfw:
- 127.0.0.1

View File

@@ -0,0 +1,3 @@
minions:
- 127.0.0.1

View File

@@ -0,0 +1,2 @@
storage_nodes:
- 127.0.0.1

View File

@@ -0,0 +1,10 @@
# Example Pillar file for a master
master:
esaccessip: 127.0.0.1
esheap: CHANGEME
esclustername: {{ grains.host }}
freq: 0
domainstats: 0
lsheap: 1500m
lsaccessip: 127.0.0.1
elastalert: 1

View File

@@ -0,0 +1,5 @@
schedule:
highstate:
funtion: state.highstate
minutes: 15
maxrunning: 1

5
pillar/nodes/example.sls Normal file
View File

@@ -0,0 +1,5 @@
# Example Pillar file for a sensor
node:
ls_heapsize: CHANGEME
es_heapsize: CHANGEME
node_type: CHANGEME

View File

@@ -0,0 +1,5 @@
schedule:
highstate:
funtion: state.highstate
minutes: 15
maxrunning: 1

View File

@@ -0,0 +1,14 @@
# Example Pillar file for a sensor
sensor:
interface: CHANGEME
bro_pins:
- 1
- 2
- 3
- 4
brobpf:
pcapbpf:
nidsbpf:
s3bucket:
s3key:

View File

@@ -0,0 +1,5 @@
schedule:
highstate:
funtion: state.highstate
minutes: 15
maxrunning: 1

View File

@@ -0,0 +1,6 @@
# This is for global salt items such as ntp servers etc.
static:
ntpserver:
homenet:
proxy:
masterupdate:

20
pillar/top.sls Normal file
View File

@@ -0,0 +1,20 @@
base:
'G@role:so-sensor':
- sensors.schedule
- sensors.{{ grains.host }}
- static
- firewall.*
- brologs
'G@role:so-master':
- masters.schedule
- masters.{{ grains.host }}
- static
- firewall.*
- data.*
'G@role:so-node':
- nodes.schedule
- nodes.{{ grains.host }}
- static
- firewall.*

141
salt/bro/files/local.bro Normal file
View File

@@ -0,0 +1,141 @@
##! Local site policy. Customize as appropriate.
##!
##! This file will not be overwritten when upgrading or reinstalling!
# This script logs which scripts were loaded during each run.
@load misc/loaded-scripts
# Apply the default tuning scripts for common tuning settings.
@load tuning/defaults
# Estimate and log capture loss.
@load misc/capture-loss
# Enable logging of memory, packet and lag statistics.
@load misc/stats
# Load the scan detection script.
@load misc/scan
# Detect traceroute being run on the network. This could possibly cause
# performance trouble when there are a lot of traceroutes on your network.
# Enable cautiously.
#@load misc/detect-traceroute
# Generate notices when vulnerable versions of software are discovered.
# The default is to only monitor software found in the address space defined
# as "local". Refer to the software framework's documentation for more
# information.
@load frameworks/software/vulnerable
# Detect software changing (e.g. attacker installing hacked SSHD).
@load frameworks/software/version-changes
# This adds signatures to detect cleartext forward and reverse windows shells.
@load-sigs frameworks/signatures/detect-windows-shells
# Load all of the scripts that detect software in various protocols.
@load protocols/ftp/software
@load protocols/smtp/software
@load protocols/ssh/software
@load protocols/http/software
# The detect-webapps script could possibly cause performance trouble when
# running on live traffic. Enable it cautiously.
#@load protocols/http/detect-webapps
# This script detects DNS results pointing toward your Site::local_nets
# where the name is not part of your local DNS zone and is being hosted
# externally. Requires that the Site::local_zones variable is defined.
@load protocols/dns/detect-external-names
# Script to detect various activity in FTP sessions.
@load protocols/ftp/detect
# Scripts that do asset tracking.
@load protocols/conn/known-hosts
@load protocols/conn/known-services
@load protocols/ssl/known-certs
# This script enables SSL/TLS certificate validation.
@load protocols/ssl/validate-certs
# This script prevents the logging of SSL CA certificates in x509.log
@load protocols/ssl/log-hostcerts-only
# Uncomment the following line to check each SSL certificate hash against the ICSI
# certificate notary service; see http://notary.icsi.berkeley.edu .
# @load protocols/ssl/notary
# If you have libGeoIP support built in, do some geographic detections and
# logging for SSH traffic.
@load protocols/ssh/geo-data
# Detect hosts doing SSH bruteforce attacks.
@load protocols/ssh/detect-bruteforcing
# Detect logins using "interesting" hostnames.
@load protocols/ssh/interesting-hostnames
# Detect SQL injection attacks.
@load protocols/http/detect-sqli
#### Network File Handling ####
# Enable MD5 and SHA1 hashing for all files.
@load frameworks/files/hash-all-files
# Detect SHA1 sums in Team Cymru's Malware Hash Registry.
@load frameworks/files/detect-MHR
# Uncomment the following line to enable detection of the heartbleed attack. Enabling
# this might impact performance a bit.
# @load policy/protocols/ssl/heartbleed
# Uncomment the following line to enable logging of connection VLANs. Enabling
# this adds two VLAN fields to the conn.log file. This may not work properly
# since we use AF_PACKET and it strips VLAN tags.
# @load policy/protocols/conn/vlan-logging
# Uncomment the following line to enable logging of link-layer addresses. Enabling
# this adds the link-layer address for each connection endpoint to the conn.log file.
# @load policy/protocols/conn/mac-logging
# Uncomment the following line to enable the SMB analyzer. The analyzer
# is currently considered a preview and therefore not loaded by default.
@load policy/protocols/smb
######################################
## Security Onion Scripts Section ##
######################################
# Add the interface to the log event
#@load securityonion/add-interface-to-logs.bro
# Add Sensor Name to the conn.log
#@load securityonion/conn-add-sensorname.bro
# File Extraction
#@load securityonion/file-extraction
# Intel from Mandiant APT1 Report
#@load securityonion/apt1
# ShellShock - detects successful exploitation of Bash vulnerability CVE-2014-6271
#@load securityonion/shellshock
# JA3 - SSL Detection Goodness
@load policy/ja3
#############################
## End SO Scripts Section ##
#############################
#############################
## Custom Script Section ##
#############################
# You can load your own intel into:
# /opt/so/saltstack/bro/policy/intel/ on the master
@load intel
# Load a custom Bro policy
# /opt/so/saltstack/bro/policy/custom/ on the master
#@load custom/somebropolicy.bro

View File

@@ -0,0 +1,129 @@
##! Local site policy. Customize as appropriate.
##!
##! This file will not be overwritten when upgrading or reinstalling!
# This script logs which scripts were loaded during each run.
@load misc/loaded-scripts
# Apply the default tuning scripts for common tuning settings.
@load tuning/defaults
# Estimate and log capture loss.
@load misc/capture-loss
# Enable logging of memory, packet and lag statistics.
@load misc/stats
# Load the scan detection script.
@load misc/scan
# Detect traceroute being run on the network. This could possibly cause
# performance trouble when there are a lot of traceroutes on your network.
# Enable cautiously.
#@load misc/detect-traceroute
# Generate notices when vulnerable versions of software are discovered.
# The default is to only monitor software found in the address space defined
# as "local". Refer to the software framework's documentation for more
# information.
@load frameworks/software/vulnerable
# Detect software changing (e.g. attacker installing hacked SSHD).
@load frameworks/software/version-changes
# This adds signatures to detect cleartext forward and reverse windows shells.
@load-sigs frameworks/signatures/detect-windows-shells
# Load all of the scripts that detect software in various protocols.
@load protocols/ftp/software
@load protocols/smtp/software
@load protocols/ssh/software
@load protocols/http/software
# The detect-webapps script could possibly cause performance trouble when
# running on live traffic. Enable it cautiously.
#@load protocols/http/detect-webapps
# This script detects DNS results pointing toward your Site::local_nets
# where the name is not part of your local DNS zone and is being hosted
# externally. Requires that the Site::local_zones variable is defined.
@load protocols/dns/detect-external-names
# Script to detect various activity in FTP sessions.
@load protocols/ftp/detect
# Scripts that do asset tracking.
@load protocols/conn/known-hosts
@load protocols/conn/known-services
@load protocols/ssl/known-certs
# This script enables SSL/TLS certificate validation.
@load protocols/ssl/validate-certs
# This script prevents the logging of SSL CA certificates in x509.log
@load protocols/ssl/log-hostcerts-only
# Uncomment the following line to check each SSL certificate hash against the ICSI
# certificate notary service; see http://notary.icsi.berkeley.edu .
# @load protocols/ssl/notary
# If you have libGeoIP support built in, do some geographic detections and
# logging for SSH traffic.
@load protocols/ssh/geo-data
# Detect hosts doing SSH bruteforce attacks.
@load protocols/ssh/detect-bruteforcing
# Detect logins using "interesting" hostnames.
@load protocols/ssh/interesting-hostnames
# Detect SQL injection attacks.
@load protocols/http/detect-sqli
#### Network File Handling ####
# Enable MD5 and SHA1 hashing for all files.
@load frameworks/files/hash-all-files
# Detect SHA1 sums in Team Cymru's Malware Hash Registry.
@load frameworks/files/detect-MHR
# Uncomment the following line to enable detection of the heartbleed attack. Enabling
# this might impact performance a bit.
# @load policy/protocols/ssl/heartbleed
# Uncomment the following line to enable logging of connection VLANs. Enabling
# this adds two VLAN fields to the conn.log file. This may not work properly
# since we use AF_PACKET and it strips VLAN tags.
# @load policy/protocols/conn/vlan-logging
# Uncomment the following line to enable logging of link-layer addresses. Enabling
# this adds the link-layer address for each connection endpoint to the conn.log file.
# @load policy/protocols/conn/mac-logging
# Uncomment the following line to enable the SMB analyzer. The analyzer
# is currently considered a preview and therefore not loaded by default.
# @load policy/protocols/smb
# Add the interface to the log event
#@load securityonion/add-interface-to-logs.bro
# Add Sensor Name to the conn.log
#@load securityonion/conn-add-sensorname.bro
# File Extraction
#@load securityonion/file-extraction
# Intel from Mandiant APT1 Report
#@load securityonion/apt1
# ShellShock - detects successful exploitation of Bash vulnerability CVE-2014-6271
#@load securityonion/shellshock
# JA3 - SSL Detection Goodness
@load policy/ja3
# You can load your own intel into:
# /opt/so/saltstack/bro/policy/intel/ on the master
@load intel
# Load a custom Bro policy
# /opt/so/saltstack/bro/policy/custom/ on the master
#@load custom/somebropolicy.bro

45
salt/bro/files/node.cfg Normal file
View File

@@ -0,0 +1,45 @@
{%- set interface = salt['pillar.get']('sensor:interface', '') %}
{%- if salt['pillar.get']('sensor:bro_pins') or salt['pillar.get']('sensor:bro_lbprocs') %}
{%- if salt['pillar.get']('sensor:bro_proxies') %}
{%- set proxies = salt['pillar.get']('sensor:bro_proxies', '1') %}
{%- else %}
{%- if salt['pillar.get']('sensor:bro_pins') %}
{%- set proxies = (salt['pillar.get']('sensor:bro_pins')|length/10)|round(0, 'ceil')|int %}
{%- else %}
{%- set proxies = (salt['pillar.get']('sensor:bro_lbprocs')/10)|round(0, 'ceil')|int %}
{%- endif %}
{%- endif %}
[manager]
type=manager
host=localhost
{% for demproxies in range(proxies) %}
[proxy-{{ demproxies }}]
type=proxy
host=localhost
{% endfor %}
[worker-1]
type=worker
host=localhost
interface=af_packet::{{ interface }}
lb_method=custom
{%- if salt['pillar.get']('sensor:bro_lbprocs') %}
lb_procs={{ salt['pillar.get']('sensor:bro_lbprocs', '1') }}
{%- else %}
lb_procs={{ salt['pillar.get']('sensor:bro_pins')|length }}
{%- endif %}
{%- if salt['pillar.get']('sensor:bro_pins') %}
pin_cpus={{ salt['pillar.get']('sensor:bro_pins')|join(", ") }}
{%- endif %}
af_packet_fanout_id=23
af_packet_fanout_mode=AF_Packet::FANOUT_HASH
af_packet_buffer_size=128*1024*1024
{%- else %}
[brosa]
type=standalone
host=localhost
interface={{ interface }}
{%- endif %}

103
salt/bro/init.sls Normal file
View File

@@ -0,0 +1,103 @@
# Bro Salt State
# Add Bro group
brogroup:
group.present:
- name: bro
- gid: 937
# Add Bro User
bro:
user.present:
- uid: 937
- gid: 937
- home: /home/bro
# Create some directories
bropolicydir:
file.directory:
- name: /opt/so/conf/bro/policy
- user: 937
- group: 939
- makedirs: True
# Bro Log Directory
brologdir:
file.directory:
- name: /nsm/bro/logs
- user: 937
- group: 939
- makedirs: True
# Bro Spool Directory
brospooldir:
file.directory:
- name: /nsm/bro/spool/manager
- user: 937
- makedirs: true
# Sync the policies
bropolicysync:
file.recurse:
- name: /opt/so/conf/bro/policy
- source: salt://bro/policy
- user: 937
- group: 939
- template: jinja
# Sync node.cfg
nodecfgsync:
file.managed:
- name: /opt/so/conf/bro/node.cfg
- source: salt://bro/files/node.cfg
- user: 937
- group: 939
- template: jinja
# Sync local.bro
{% if salt['pillar.get']('static:broversion', '') == 'COMMUNITY' %}
localbrosync:
file.managed:
- name: /opt/so/conf/bro/local.bro
- source: salt://bro/files/local.bro.community
- user: 937
- group: 939
- template: jinja
so-bro:
docker_container.running:
- image: toosmooth/so-communitybro:techpreview
- privileged: True
- binds:
- /nsm/bro/logs:/nsm/bro/logs:rw
- /nsm/bro/spool:/nsm/bro/spool:rw
- /opt/so/conf/bro/local.bro:/opt/bro/share/bro/site/local.bro:ro
- /opt/so/conf/bro/node.cfg:/opt/bro/etc/node.cfg:ro
- /opt/so/conf/bro/policy/securityonion:/opt/bro/share/bro/policy/securityonion:ro
- /opt/so/conf/bro/policy/custom:/opt/bro/share/bro/policy/custom:ro
- /opt/so/conf/bro/policy/intel:/opt/bro/share/bro/policy/intel:rw
- network_mode: host
{% else %}
localbrosync:
file.managed:
- name: /opt/so/conf/bro/local.bro
- source: salt://bro/files/local.bro
- user: 937
- group: 939
- template: jinja
so-bro:
docker_container.running:
- image: toosmooth/so-bro:techpreview
- privileged: True
- binds:
- /nsm/bro/logs:/nsm/bro/logs:rw
- /nsm/bro/spool:/nsm/bro/spool:rw
- /opt/so/conf/bro/local.bro:/opt/bro/share/bro/site/local.bro:ro
- /opt/so/conf/bro/node.cfg:/opt/bro/etc/node.cfg:ro
- /opt/so/conf/bro/policy/securityonion:/opt/bro/share/bro/policy/securityonion:ro
- /opt/so/conf/bro/policy/custom:/opt/bro/share/bro/policy/custom:ro
- /opt/so/conf/bro/policy/intel:/opt/bro/share/bro/policy/intel:rw
- network_mode: host
{% endif %}

View File

@@ -0,0 +1 @@
#Intel

View File

@@ -0,0 +1,20 @@
{%- set interface = salt['pillar.get']('sensor:interface', '0') %}
global interface = "{{ interface }}";
event bro_init()
{
if ( ! reading_live_traffic() )
return;
Log::remove_default_filter(HTTP::LOG);
Log::add_filter(HTTP::LOG, [$name = "http-interfaces",
$path_func(id: Log::ID, path: string, rec: HTTP::Info) =
{
local peer = get_event_peer()$descr;
if ( peer in Cluster::nodes && Cluster::nodes[peer]?$interface )
return cat("http_", Cluster::nodes[peer]$interface);
else
return "http";
}
]);
}

View File

@@ -0,0 +1,9 @@
@load frameworks/intel/seen
@load frameworks/intel/do_notice
@load frameworks/files/hash-all-files
redef Intel::read_files += {
fmt("%s/apt1-fqdn.dat", @DIR),
fmt("%s/apt1-md5.dat", @DIR),
fmt("%s/apt1-certs.dat", @DIR)
};

View File

@@ -0,0 +1,26 @@
#fields indicator indicator_type meta.source meta.desc meta.do_notice
b054e26ef827fbbf5829f84a9bdbb697a5b042fc Intel::CERT_HASH Mandiant APT1 Report ALPHA T
7bc0cc2cf7c3a996c32dbe7e938993f7087105b4 Intel::CERT_HASH Mandiant APT1 Report AOL T
7855c132af1390413d4e4ff4ead321f8802d8243 Intel::CERT_HASH Mandiant APT1 Report AOL T
f3e3c590d7126bd227733e9d8313d2575c421243 Intel::CERT_HASH Mandiant APT1 Report AOL T
d4d4e896ce7d73b573f0a0006080a246aec61fe7 Intel::CERT_HASH Mandiant APT1 Report AOL T
bcdf4809c1886ac95478bbafde246d0603934298 Intel::CERT_HASH Mandiant APT1 Report AOL T
6b4855df8afc8d57a671fe5ed628f6d88852a922 Intel::CERT_HASH Mandiant APT1 Report AOL T
d50fdc82c328319ac60f256d3119b8708cd5717b Intel::CERT_HASH Mandiant APT1 Report AOL T
70b48d5177eebe9c762e9a37ecabebfd10e1b7e9 Intel::CERT_HASH Mandiant APT1 Report AOL T
3a6a299b764500ce1b6e58a32a257139d61a3543 Intel::CERT_HASH Mandiant APT1 Report AOL T
bf4f90e0029b2263af1141963ddf2a0c71a6b5fb Intel::CERT_HASH Mandiant APT1 Report AOL T
b21139583dec0dae344cca530690ec1f344acc79 Intel::CERT_HASH Mandiant APT1 Report AOL T
21971ffef58baf6f638df2f7e2cceb4c58b173c8 Intel::CERT_HASH Mandiant APT1 Report EMAIL T
04ecff66973c92a1c348666d5a4738557cce0cfc Intel::CERT_HASH Mandiant APT1 Report IBM T
f97d1a703aec44d0f53a3a294e33acda43a49de1 Intel::CERT_HASH Mandiant APT1 Report IBM T
c0d32301a7c96ecb0bc8e381ec19e6b4eaf5d2fe Intel::CERT_HASH Mandiant APT1 Report IBM T
1b27a897cda019da2c3a6dc838761871e8bf5b5d Intel::CERT_HASH Mandiant APT1 Report LAME T
d515996e8696612dc78fc6db39006466fc6550df Intel::CERT_HASH Mandiant APT1 Report MOON-NIGHT T
8f79315659e59c79f1301ef4aee67b18ae2d9f1c Intel::CERT_HASH Mandiant APT1 Report NONAME T
a57a84975e31e376e3512da7b05ad06ef6441f53 Intel::CERT_HASH Mandiant APT1 Report NS T
b3db37a0edde97b3c3c15da5f2d81d27af82f583 Intel::CERT_HASH Mandiant APT1 Report SERVER (PEM) T
6d8f1454f6392361fb2464b744d4fc09eee5fcfd Intel::CERT_HASH Mandiant APT1 Report SUR T
b66e230f404b2cc1c033ccacda5d0a14b74a2752 Intel::CERT_HASH Mandiant APT1 Report VIRTUALLYTHERE T
4acbadb86a91834493dde276736cdf8f7ef5d497 Intel::CERT_HASH Mandiant APT1 Report WEBMAIL T
86a48093d9b577955c4c9bd19e30536aae5543d4 Intel::CERT_HASH Mandiant APT1 Report YAHOO T

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,10 @@
global sensorname = "{{ grains.host }}";
redef record Conn::Info += {
sensorname: string &log &optional;
};
event connection_state_remove(c: connection)
{
c$conn$sensorname = sensorname;
}

View File

@@ -0,0 +1 @@
@load ./extract

View File

@@ -0,0 +1,21 @@
global ext_map: table[string] of string = {
["application/x-dosexec"] = "exe",
["text/plain"] = "txt",
["image/jpeg"] = "jpg",
["image/png"] = "png",
["text/html"] = "html",
} &default ="";
event file_sniff(f: fa_file, meta: fa_metadata)
{
if ( ! meta?$mime_type || meta$mime_type != "application/x-dosexec" )
return;
local ext = "";
if ( meta?$mime_type )
ext = ext_map[meta$mime_type];
local fname = fmt("/nsm/bro/extracted/%s-%s.%s", f$source, f$id, ext);
Files::add_analyzer(f, Files::ANALYZER_EXTRACT, [$extract_filename=fname]);
}

View File

@@ -0,0 +1,3 @@
@load tuning/json-logs
redef LogAscii::json_timestamps = JSON::TS_ISO8601;
redef LogAscii::use_json = T;

View File

@@ -0,0 +1,40 @@
x509_signing_policies:
filebeat:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "digitalSignature, nonRepudiation"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3000
- copypath: /etc/pki/issued_certs/
registry:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "critical keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3000
- copypath: /etc/pki/issued_certs/
masterssl:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "critical keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3000
- copypath: /etc/pki/issued_certs/

48
salt/ca/init.sls Normal file
View File

@@ -0,0 +1,48 @@
{% set master = salt['grains.get']('master') %}
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
/etc/pki:
file.directory: []
/etc/pki/issued_certs:
file.directory: []
pki_private_key:
x509.private_key_managed:
- name: /etc/pki/ca.key
- bits: 4096
- passphrase:
- cipher: aes_256_cbc
- backup: True
/etc/pki/ca.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key
- CN: {{ master }}
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- extendedkeyUsage: "serverAuth, clientAuth"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3650
- days_remaining: 0
- backup: True
- managed_private_key:
name: /etc/pki/ca.key
bits: 4096
backup: True
- require:
- file: /etc/pki
mine.send:
module.run:
- func: x509.get_pem_entries
- kwargs:
glob_path: /etc/pki/ca.crt
- onchanges:
- x509: /etc/pki/ca.crt

129
salt/common/init.sls Normal file
View File

@@ -0,0 +1,129 @@
# Add socore Group
socoregroup:
group.present:
- name: socore
- gid: 939
# Add socore user
socore:
user.present:
- uid: 939
- gid: 939
- home: /opt/so
- createhome: True
- shell: /bin/bash
# Create a state directory
statedir:
file.directory:
- name: /opt/so/state
- user: 939
- group: 939
- makedirs: True
salttmp:
file.directory:
- name: /opt/so/tmp
- user: 939
- group: 939
- makedirs: True
# Install packages needed for the sensor
sensorpkgs:
pkg.installed:
- skip_suggestions: False
- pkgs:
- docker-ce
{% if grains['os'] != 'CentOS' %}
- python-docker
- python-m2crypto
{% endif %}
# Always keep these packages up to date
alwaysupdated:
pkg.latest:
- pkgs:
- openssl
- openssh-server
- bash
- skip_suggestions: True
# Set time to UTC
Etc/UTC:
timezone.system
# Make sure Docker is running!
docker:
service.running:
- enable: True
# Set up docker network
# This is broken right now.
#dockernet:
# docker_network.present:
# - name: so-elastic-net
# - driver: bridge
# dockernet work around
#dockernet:
# cmd.script:
# - source: salt://common/scripts/dockernet.sh
# Snag the so-core docker
toosmooth/so-core:test2:
docker_image.present
# Drop the correct nginx config based on role
nginxconfdir:
file.directory:
- name: /opt/so/conf/nginx
- user: 939
- group: 939
- makedirs: True
nginxconf:
file.managed:
- name: /opt/so/conf/nginx/nginx.conf
- user: 939
- group: 939
- template: jinja
- source: salt://common/nginx/nginx.conf.{{ grains.role }}
nginxlogdir:
file.directory:
- name: /opt/so/log/nginx/
- user: 939
- group: 939
- makedirs: True
nginxtmp:
file.directory:
- name: /opt/so/tmp/nginx/tmp
- user: 939
- group: 939
- makedirs: True
# Start the core docker
so-core:
docker_container.running:
- image: toosmooth/so-core:techpreview
- hostname: so-core
- user: socore
- binds:
- /opt/so:/opt/so:rw
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/so/log/nginx/:/var/log/nginx:rw
- /opt/so/tmp/nginx/:/var/lib/nginx:rw
- /opt/so/tmp/nginx/:/run:rw
- /etc/pki/masterssl.crt:/etc/pki/nginx/server.crt:ro
- /etc/pki/masterssl.key:/etc/pki/nginx/server.key:ro
- cap_add: NET_BIND_SERVICE
- port_bindings:
- 80:80
- 443:443

View File

@@ -0,0 +1,89 @@
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
# Settings for a TLS enabled server.
#
# server {
# listen 443 ssl http2 default_server;
# listen [::]:443 ssl http2 default_server;
# server_name _;
# root /usr/share/nginx/html;
#
# ssl_certificate "/etc/pki/nginx/server.crt";
# ssl_certificate_key "/etc/pki/nginx/private/server.key";
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 10m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
#
# # Load configuration files for the default server block.
# include /etc/nginx/default.d/*.conf;
#
# location / {
# }
#
# error_page 404 /404.html;
# location = /40x.html {
# }
#
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
# }
}

View File

@@ -0,0 +1,110 @@
{%- set masterip = salt['pillar.get']('master:mainip', '') %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
#server {
# listen 80 default_server;
# listen [::]:80 default_server;
# server_name _;
# root /opt/socore/html;
# index index.html;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
# location / {
# }
# error_page 404 /404.html;
# location = /40x.html {
# }
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
#}
server {
listen 80 default_server;
server_name _;
return 301 https://$host$request_uri;
}
# Settings for a TLS enabled server.
server {
listen 443 ssl http2 default_server;
#listen [::]:443 ssl http2 default_server;
server_name _;
root /opt/socore/html;
index index.html;
ssl_certificate "/etc/pki/nginx/server.crt";
ssl_certificate_key "/etc/pki/nginx/server.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
# Load configuration files for the default server block.
#include /etc/nginx/default.d/*.conf;
#location / {
# try_files $uri $uri.html /index.html;
# }
location / {
proxy_pass http://{{ masterip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
}

View File

@@ -0,0 +1,89 @@
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Load modular configuration files from the /etc/nginx/conf.d directory.
# See http://nginx.org/en/docs/ngx_core_module.html#include
# for more information.
include /etc/nginx/conf.d/*.conf;
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
root /usr/share/nginx/html;
# Load configuration files for the default server block.
include /etc/nginx/default.d/*.conf;
location / {
}
error_page 404 /404.html;
location = /40x.html {
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
}
}
# Settings for a TLS enabled server.
#
# server {
# listen 443 ssl http2 default_server;
# listen [::]:443 ssl http2 default_server;
# server_name _;
# root /usr/share/nginx/html;
#
# ssl_certificate "/etc/pki/nginx/server.crt";
# ssl_certificate_key "/etc/pki/nginx/private/server.key";
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 10m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
#
# # Load configuration files for the default server block.
# include /etc/nginx/default.d/*.conf;
#
# location / {
# }
#
# error_page 404 /404.html;
# location = /40x.html {
# }
#
# error_page 500 502 503 504 /50x.html;
# location = /50x.html {
# }
# }
}

View File

@@ -0,0 +1,8 @@
#!/bin/bash
if [ ! -f /opt/so/state/dockernet.state ]; then
docker network create -d bridge so-elastic-net
touch /opt/so/state/dockernet.state
else
exit
fi

View File

@@ -0,0 +1,30 @@
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: close
description: >-
Close indices older than 2 days (based on index name), for logstash-
prefixed indices.
options:
delete_aliases: False
timeout_override:
continue_if_exception: False
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
exclude:
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 2
exclude:

View File

@@ -0,0 +1,23 @@
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete indices when $disk_space value (in GB) is exceeded.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: space
source: creation_date
use_age: True
disk_space: 43

View File

@@ -0,0 +1,22 @@
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
client:
hosts:
- elasticsearch
port: 9200
url_prefix:
use_ssl: False
certificate:
client_cert:
client_key:
ssl_no_validate: False
http_auth:
timeout: 30
master_only: False
logging:
loglevel: INFO
logfile: '/var/log/curator/curator.log'
logformat: default
blacklist: ['elasticsearch', 'urllib3']

View File

@@ -0,0 +1,24 @@
{% if grains['role'] == 'so-master' %}
{%- set esclustername = salt['pillar.get']('master:esclustername', '') %}
cluster.name: "{{ esclustername }}"
network.host: 0.0.0.0
# minimum_master_nodes need to be explicitly set when bound on a public IP
# set to 1 to allow single node clusters
# Details: https://github.com/elastic/elasticsearch/pull/17288
discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.
path.logs: /var/log/elasticsearch
action.destructive_requires_name: true
{%- else %}
{%- set esclustername = salt['grains.get']('host', '') %}
{%- set nodeip = salt['pillar.get']('node:mainip', '') -%}
cluster.name: "{{ esclustername }}"
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
path.logs: /var/log/elasticsearch
action.destructive_requires_name: true
transport.bind_host: 0.0.0.0
transport.publish_host: {{ nodeip }}
transport.publish_port: 9300
{%- endif %}

View File

@@ -0,0 +1,20 @@
status = error
#appender.console.type = Console
#appender.console.name = console
#appender.console.layout.type = PatternLayout
#appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
#rootLogger.level = info
#rootLogger.appenderRef.console.ref = console
# This is a test -- if this here, then the volume is mounted correctly.
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
rootLogger.level = info
rootLogger.appenderRef.rolling.ref = rolling

313
salt/elasticsearch/init.sls Normal file
View File

@@ -0,0 +1,313 @@
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% if grains['role'] == 'so-master' %}
{% set esclustername = salt['pillar.get']('master:esclustername', '') %}
{% set esheap = salt['pillar.get']('master:esheap', '') %}
{% set freq = salt['pillar.get']('master:freq', '0') %}
{% set dstats = salt['pillar.get']('master:dstats', '0') %}
{% set esalert = salt['pillar.get']('master:elastalert', '1') %}
{% elif grains['role'] == 'so-node' %}
{% set esclustername = salt['pillar.get']('node:esclustername', '') %}
{% set esheap = salt['pillar.get']('node:esheap', '') %}
{% set freq = salt['pillar.get']('node:freq', '0') %}
{% set dstats = salt['pillar.get']('node:dstats', '0') %}
{% set esalert = salt['pillar.get']('node:elastalert', '1') %}
{% endif %}
vm.max_map_count:
sysctl.present:
- value: 262144
# Add ES Group
elasticsearchgroup:
group.present:
- name: elasticsearch
- gid: 930
# Add ES user
elasticsearch:
user.present:
- uid: 930
- gid: 930
- home: /opt/so/conf/elasticsearch
- createhome: False
esconfdir:
file.directory:
- name: /opt/so/conf/elasticsearch
- user: 930
- group: 939
- makedirs: True
eslog4jfile:
file.managed:
- name: /opt/so/conf/elasticsearch/log4j2.properties
- source: salt://elasticsearch/files/log4j2.properties
- user: 930
- group: 939
- template: jinja
esyml:
file.managed:
- name: /opt/so/conf/elasticsearch/elasticsearch.yml
- source: salt://elasticsearch/files/elasticsearch.yml
- user: 930
- group: 939
- template: jinja
nsmesdir:
file.directory:
- name: /nsm/elasticsearch
- user: 930
- group: 939
- makedirs: True
eslogdir:
file.directory:
- name: /opt/so/log/elasticsearch
- user: 930
- group: 939
- makedirs: True
so-elasticsearch:
docker_container.running:
- image: securityonionsolutions/so-elasticsearch:latest
- hostname: elasticsearch
- name: elasticsearch
- user: elasticsearch
- environment:
- bootstrap.memory_lock=true
- cluster.name={{ esclustername }}
- ES_JAVA_OPTS=-Xms{{ esheap }} -Xmx{{ esheap }}
- http.host=0.0.0.0
- transport.host=127.0.0.1
- ulimits:
- memlock=-1:-1
- nofile=65536:65536
- nproc=4096
- port_bindings:
- 0.0.0.0:9200:9200
- 0.0.0.0:9300:9300
- binds:
- /opt/so/conf/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- /opt/so/conf/elasticsearch/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
# Tell the main cluster I am here
#curl -XPUT http://\$ELASTICSEARCH_HOST:\$ELASTICSEARCH_PORT/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"$HOSTNAME": {"skip_unavailable": "true", "seeds": ["$DOCKER_INTERFACE:$REVERSE_PORT"]}}}}}'
# See if Freqserver is enabled
{% if freq == 1 %}
# Create the user
fservergroup:
group.present:
- name: freqserver
- gid: 935
# Add ES user
freqserver:
user.present:
- uid: 935
- gid: 935
- home: /opt/so/conf/freqserver
- createhome: False
# Create the log directory
freqlogdir:
file.directory:
- name: /opt/so/log/freq_server
- user: 935
- group: 935
- makedirs: True
so-freq:
docker_container.running:
- image: securityonionsolutions/so-freqserver
- hostname: freqserver
- user: freqserver
- binds:
- /opt/so/log/freq_server:/var/log/freq_server:rw
{% endif %}
{% if dstats == 1 %}
# Create the group
dstatsgroup:
group.present:
- name: domainstats
- gid: 936
# Add user
domainstats:
user.present:
- uid: 936
- gid: 936
- home: /opt/so/conf/domainstats
- createhome: False
# Create the log directory
dstatslogdir:
file.directory:
- name: /opt/so/log/domainstats
- user: 936
- group: 939
- makedirs: True
so-domainstats:
docker_container.running:
- image: securityonionsolutions/so-domainstats
- hostname: domainstats
- name: domainstats
- user: domainstats
- binds:
- /opt/so/log/domainstats:/var/log/domain_stats
{% endif %}
# Curator
# Create the group
curatorgroup:
group.present:
- name: curator
- gid: 934
# Add user
curator:
user.present:
- uid: 934
- gid: 934
- home: /opt/so/conf/curator
- createhome: False
# Create the log directory
curactiondir:
file.directory:
- name: /opt/so/conf/curator/action
- user: 934
- group: 939
- makedirs: True
curlogdir:
file.directory:
- name: /opt/so/log/curator
- user: 934
- group: 939
curclose:
file.managed:
- name: /opt/so/conf/curator/action/close.yml
- source: salt://elasticsearch/files/curator/action/close.yml
- user: 934
- group: 939
- template: jinja
curdel:
file.managed:
- name: /opt/so/conf/curator/action/delete.yml
- source: salt://elasticsearch/files/curator/action/delete.yml
- user: 934
- group: 939
- template: jinja
curconf:
file.managed:
- name: /opt/so/conf/curator/curator.yml
- source: salt://elasticsearch/files/curator/curator.yml
- user: 934
- group: 939
- template: jinja
so-curator:
docker_container.running:
- image: securityonionsolutions/so-curator
- hostname: curator
- name: curator
- user: curator
- interactive: True
- tty: True
- binds:
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
- /opt/so/conf/curator/action/:/etc/curator/action:ro
- /opt/so/log/curator:/var/log/curator
# Begin Curator Cron Jobs
# Close
# Delete
# Hot Warm
# Segment Merge
# End Curator Cron Jobs
# Elastalert
{% if esalert == 1 %}
# Create the group
elastagroup:
group.present:
- name: elastalert
- gid: 933
# Add user
elastalert:
user.present:
- uid: 933
- gid: 933
- home: /opt/so/conf/elastalert
- createhome: False
elastalogdir:
file.directory:
- name: /opt/so/log/elastalert
- user: 933
- group: 939
- makedirs: True
elastarules:
file.directory:
- name: /opt/so/rules/elastalert
- user: 933
- group: 939
- makedirs: True
elastaconf:
file.directory:
- name: /opt/so/conf/elastalert
- user: 933
- group: 939
- makedirs: True
so-elastalert:
docker_container.running:
- image: securityonionsolutions/so-elastalert
- hostname: elastalert
- name: elastalert
- user: elastalert
- detach: True
- binds:
- /etc/elastalert/rules/:/etc/elastalert/rules/
- /opt/so/log/elastalert:/var/log/elastalert
{% endif %}

View File

@@ -0,0 +1,302 @@
{%- set MASTER = salt['pillar.get']('sensor:master', '') %}
{%- set HOSTNAME = salt['grains.get']('host', '') %}
name: {{ HOSTNAME }}
#========================== Modules configuration ============================
filebeat.modules:
#=========================== Filebeat prospectors =============================
# List of prospectors to fetch data.
filebeat.prospectors:
#------------------------------ Log prospector --------------------------------
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
- type: log
paths:
- /nsm/bro/spool/{{ LOGNAME }}.log
fields:
type: bro_{{ LOGNAME }}
fields_under_root: true
tags: ["bro"]
clean_removed: false
close_removed: false
{%- endfor %}
- type: log
paths:
- /suricata/eve.json
fields:
type: snort
fields_under_root: true
tags: ["ids"]
clean_removed: false
close_removed: false
#----------------------------- Logstash output ---------------------------------
output.logstash:
# Boolean flag to enable or disable the output module.
enabled: true
# The Logstash hosts
hosts: ["{{ MASTER }}:5044"]
# Number of workers per Logstash host.
worker: 1
# Set gzip compression level.
compression_level: 3
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
# Certificate for SSL client authentication
ssl.certificate: "/usr/share/filebeat/filebeat.crt"
# Client Certificate Key
ssl.key: "/usr/share/filebeat/filebeat.key"
# Elasticsearch template settings
#setup.template.settings:
# A dictionary of settings to place into the settings.index dictionary
# of the Elasticsearch template. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html
#index:
#number_of_shards: 1
#codec: best_compression
#number_of_routing_shards: 30
# A dictionary of settings for the _source field. For more details, please check
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
#_source:
#enabled: false
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
#setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
# Optional HTTP Path
#path: ""
# Use SSL settings for HTTPS. Default is true.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
#================================ Logging ======================================
# There are four options for the log output: file, stderr, syslog, eventlog
# The file output is the default.
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: info
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are "beat", "publish", "service"
# Multiple selectors can be chained.
#logging.selectors: [ ]
# Send all logging output to syslog. The default is false.
#logging.to_syslog: false
# Send all logging output to Windows Event Logs. The default is false.
#logging.to_eventlog: false
# If enabled, filebeat periodically logs its internal metrics that have changed
# in the last period. For each metric that changed, the delta from the value at
# the beginning of the period is logged. Also, the total values for
# all non-zero internal metrics are logged on shutdown. The default is true.
#logging.metrics.enabled: true
# The period after which to log the internal metrics. The default is 30s.
#logging.metrics.period: 30s
# Logging to rotating files. Set logging.to_files to false to disable logging to
# files.
logging.to_files: true
logging.files:
# Configure the path where the logs are written. The default is the logs directory
# under the home path (the binary location).
path: /var/log/filebeat
# The name of the files where the logs are written to.
name: filebeat
# Configure log file size limit. If limit is reached, log file will be
# automatically rotated
rotateeverybytes: 10485760 # = 10MB
# Number of rotated log files to keep. Oldest files will be deleted first.
keepfiles: 7
# The permissions mask to apply when rotating log files. The default value is 0600.
# Must be a valid Unix-style file permissions mask expressed in octal notation.
#permissions: 0600
# Set to true to log messages in json format.
#logging.json: false
#============================== Xpack Monitoring =====================================
# filebeat can export internal metrics to a central Elasticsearch monitoring cluster.
# This requires xpack monitoring to be enabled in Elasticsearch.
# The reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#xpack.monitoring.enabled: false
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well. Any setting that is not set is
# automatically inherited from the Elasticsearch output configuration, so if you
# have the Elasticsearch output configured, you can simply uncomment the
# following line, and leave the rest commented out.
#xpack.monitoring.elasticsearch:
# Array of hosts to connect to.
# Scheme and port can be left out and will be set to the default (http and 9200)
# In case you specify and additional path, the scheme is required: http://localhost:9200/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
#hosts: ["localhost:9200"]
# Set gzip compression level.
#compression_level: 0
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "beats_system"
#password: "changeme"
# Dictionary of HTTP parameters to pass within the url with index operations.
#parameters:
#param1: value1
#param2: value2
# Custom HTTP headers to add to each request
#headers:
# X-My-Header: Contents of the header
# Proxy server url
#proxy_url: http://proxy:3128
# The number of times a particular Elasticsearch index operation is attempted. If
# the indexing operation doesn't succeed after this many retries, the events are
# dropped. The default is 3.
#max_retries: 3
# The maximum number of events to bulk in a single Elasticsearch bulk API index request.
# The default is 50.
#bulk_max_size: 50
# Configure http request timeout before failing an request to Elasticsearch.
#timeout: 90
# Use SSL settings for HTTPS.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# SSL configuration. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
# Configure what types of renegotiation are supported. Valid options are
# never, once, and freely. Default is never.
#ssl.renegotiation: never
#================================ HTTP Endpoint ======================================
# Each beat can expose internal metrics through a HTTP endpoint. For security
# reasons the endpoint is disabled by default. This feature is currently experimental.
# Stats can be access through http://localhost:5066/stats . For pretty JSON output
# append ?pretty to the URL.
# Defines if the HTTP endpoint is enabled.
#http.enabled: false
# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost.
#http.host: localhost
# Port on which the HTTP endpoint will bind. Default is 5066.

64
salt/filebeat/init.sls Normal file
View File

@@ -0,0 +1,64 @@
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set lsaccessip = salt['pillar.get']('master:lsaccessip', '') %}
# Filebeat Setup
filebeatetcdir:
file.directory:
- name: /opt/so/conf/filebeat/etc
- user: 939
- group: 939
- makedirs: True
filebeatpkidir:
file.directory:
- name: /opt/so/conf/filebeat/etc/pki
- user: 939
- group: 939
- makedirs: True
# This needs to be owned by root
filebeatconfsync:
file.recurse:
- name: /opt/so/conf/filebeat/etc
- source: salt://filebeat/etc
- user: 0
- group: 0
- template: jinja
#filebeatcrt:
# file.managed:
# - name: /opt/so/conf/filebeat/etc/pki/filebeat.crt
# - source: salt://filebeat/files/filebeat.crt
#filebeatkey:
# file.managed:
# - name: /opt/so/conf/filebeat/etc/pki/filebeat.key
# - source: salt://filebeat/files/filebeat.key
so-filebeat:
docker_container.running:
- image: toosmooth/so-filebeat:techpreview
- hostname: so-filebeat
- user: root
- binds:
- /opt/so/log/filebeat:/var/log/filebeat:rw
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /nsm/bro/spool/manager:/nsm/bro/spool:ro
- /opt/so/log/suricata:/suricata:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro

372
salt/firewall/init.sls Normal file
View File

@@ -0,0 +1,372 @@
# Firewall Magic
# Keep localhost in the game
iptables_allow_localhost:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 127.0.0.1
- save: True
# Allow related/established sessions
iptables_allow_established:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
- save: True
# Always allow SSH so we can like log in
iptables_allow_ssh:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- dport: 22
- proto: tcp
- save: True
# I like pings
iptables_allow_pings:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- proto: icmp
- save: True
# Create the chain for logging
iptables_LOGGING_chain:
iptables.chain_present:
- name: LOGGING
- table: filter
- family: ipv4
iptables_LOGGING_limit:
iptables.append:
- table: filter
- chain: LOGGING
- match: limit
- jump: LOG
- limit: 2/min
- log-level: 4
- log-prefix: "IPTables-dropped: "
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_log_input_drops:
iptables.append:
- table: filter
- chain: INPUT
- jump: LOGGING
- save: True
# Enable global DOCKER-USER block rule
enable_docker_user_fw_policy:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: LOGGING
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
- save: True
enable_docker_user_established:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
- save: True
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
# Rules if you are a Master
{% if grains['role'] == 'so-master' %}
#This should be more granular
iptables_allow_master_docker:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 172.17.0.0/24
- position: 1
- save: True
{% for ip in pillar.get('masterfw') %}
# Allow Redis
enable_maternode_redis_6379_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 6379
- position: 1
- save: True
enable_masternode_kibana_5601_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5601
- position: 1
- save: True
enable_masternode_ES_9200_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9200
- position: 1
- save: True
enable_masternode_ES_9300_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9300
- position: 1
- save: True
{% endfor %}
# Make it so all the minions can talk to salt and update etc.
{% for ip in pillar.get('minions') %}
enable_salt_minions_4505_{{ip}}:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 4505
- position: 1
- save: True
enable_salt_minions_4506_{{ip}}:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 4506
- position: 1
- save: True
enable_salt_minions_5000_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5000
- position: 1
- save: True
enable_salt_minions_3142_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 3142
- position: 1
- save: True
{% endfor %}
# Allow Forward Nodes to send their beats traffic
{% for ip in pillar.get('forward_nodes') %}
enable_forwardnode_beats_5044_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5044
- position: 1
- save: True
{% endfor %}
{% for ip in pillar.get('storage_nodes') %}
enable_storagenode_redis_6379_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 6379
- position: 1
- save: True
enable_storagenode_ES_9300_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9300
- position: 1
- save: True
{% endfor %}
# Allow Beats Endpoints to send their beats traffic
{% for ip in pillar.get('beats_endpoint') %}
enable_standard_beats_5044_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5044
- position: 1
- save: True
{% endfor %}
# Allow Analysts
{% for ip in pillar.get('analyst') %}
enable_standard_analyst_80_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 80
- position: 1
- save: True
enable_standard_analyst_443_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 443
- position: 1
- save: True
#THIS IS TEMPORARY
enable_standard_analyst_5601_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 5601
- position: 1
- save: True
{% endfor %}
# Rules for storage nodes connecting to master
{% endif %}
# Rules if you are a Storage Node
{% if grains['role'] == 'so-node' %}
#This should be more granular
iptables_allow_docker:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- source: 172.17.0.0/24
- position: 1
- save: True
enable_docker_ES_9200:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: 172.17.0.0/24
- dport: 9200
- position: 1
- save: True
enable_docker_ES_9300:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: 172.17.0.0/24
- dport: 9300
- position: 1
- save: True
{% for ip in pillar.get('masterfw') %}
enable_cluster_ES_9300_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9300
- position: 1
- save: True
{% endfor %}
{% endif %}
# Rules if you are a Sensor
{% if grains['role'] == 'so-sensor' %}
{% endif %}
# Rules if you are a Hot Node
# Rules if you are a Warm Node
# Some Fixer upper type rules
# Drop it like it's hot
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_drop_all_the_things:
iptables.append:
- table: filter
- chain: LOGGING
- jump: DROP
- save: True

View File

@@ -0,0 +1,10 @@
# idstools - disable.conf
# Example of disabling a rule by signature ID (gid is optional).
# 1:2019401
# 2019401
# Example of disabling a rule by regular expression.
# - All regular expression matches are case insensitive.
# re:hearbleed
# re:MS(0[7-9]|10)-\d+

View File

@@ -0,0 +1,10 @@
# idstools-rulecat - enable.conf
# Example of enabling a rule by signature ID (gid is optional).
# 1:2019401
# 2019401
# Example of enabling a rule by regular expression.
# - All regular expression matches are case insensitive.
# re:hearbleed
# re:MS(0[7-9]|10)-\d+

View File

@@ -0,0 +1,14 @@
# idstools-rulecat - modify.conf
# Format: <sid> "<from>" "<to>"
# Example changing the seconds for rule 2019401 to 3600.
#2019401 "seconds \d+" "seconds 3600"
# Change all trojan-activity rules to drop. Its better to setup a
# drop.conf for this, but this does show the use of back references.
#re:classtype:trojan-activity "(alert)(.*)" "drop\\2"
# For compatibility, most Oinkmaster modifysid lines should work as
# well.
#modifysid * "^drop(.*)noalert(.*)" | "alert${1}noalert${2}"

View File

@@ -0,0 +1,6 @@
--suricata-version=4.0
--merged=/opt/so/rules/nids/all.rules
--local=/opt/so/rules/nids/local.rules
--disable=/opt/so/idstools/etc/disable.conf
--enable=/opt/so/idstools/etc/enable.conf
--modify=/opt/so/idstools/etc/modify.conf

62
salt/idstools/init.sls Normal file
View File

@@ -0,0 +1,62 @@
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# IDSTools Setup
idstoolsdir:
file.directory:
- name: /opt/so/conf/idstools/etc
- user: 939
- group: 939
- makedirs: True
idstoolsetcsync:
file.recurse:
- name: /opt/so/conf/idstools/etc
- source: salt://idstools/etc
- user: 939
- group: 939
- template: jinja
rulesdir:
file.directory:
- name: /opt/so/rules/nids
- user: 939
- group: 939
- makedirs: True
synclocalnidsrules:
file.managed:
- name: /opt/so/rules/nids/local.rules
- source: salt://idstools/localrules/local.rules
- user: 939
- group: 939
ruleslink:
file.symlink:
- name: /opt/so/saltstack/salt/suricata/rules
- target: /opt/so/rules/nids
toosmooth/so-idstools:test2:
docker_image.present
so-idstools:
docker_container.running:
- image: toosmooth/so-idstools:test2
- hostname: so-idstools
- user: socore
- binds:
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
- /opt/so/rules/nids:/opt/so/rules/nids:rw

View File

@@ -0,0 +1 @@
# Put your own custom Snort/Suricata rules in here.

View File

@@ -0,0 +1,53 @@
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 30 ]]; do
curl --output /dev/null --silent --head --fail http://{{ ES }}:9200
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
exit
fi
# Make sure Kibana is running
MAX_WAIT=240
# Check to see if Kibana is available
wait_step=0
until curl -s -XGET http://{{ ES }}:5601 > /dev/null ; do
wait_step=$(( ${wait_step} + 1 ))
echo "Waiting on Kibana...Attempt #$wait_step"
if [ ${wait_step} -gt ${MAX_WAIT} ]; then
echo "ERROR: Kibana not available for more than ${MAX_WAIT} seconds."
exit 5
fi
sleep 1s;
done
# Apply Kibana template
echo
echo "Applying Kibana template..."
curl -s -XPUT http://{{ ES }}:9200/_template/kibana \
-H 'Content-Type: application/json' \
-d'{"index_patterns" : ".kibana", "settings": { "number_of_shards" : 1, "number_of_replicas" : 0 }, "mappings" : { "search": {"properties": {"hits": {"type": "integer"}, "version": {"type": "integer"}}}}}'
echo
curl -s -XPUT "{{ ES }}:9200/.kibana/_settings" \
-H 'Content-Type: application/json' \
-d'{"index" : {"number_of_replicas" : 0}}'
echo

View File

@@ -0,0 +1,8 @@
{ "attributes":
{
"defaultIndex": "*:logstash-*",
"discover:sampleSize":"10",
"dashboard:defaultDarkTheme":true,
"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\",\n \"mode\": \"quick\"\n}"
}
}

View File

@@ -0,0 +1,11 @@
---
# Default Kibana configuration from kibana-docker.
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
server.name: kibana
server.host: "0"
elasticsearch.url: http://{{ ES }}:9200
#elasticsearch.username: elastic
#elasticsearch.password: changeme
#xpack.monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.requestTimeout: 90000
logging.dest: /var/log/kibana/kibana.log

84
salt/kibana/init.sls Normal file
View File

@@ -0,0 +1,84 @@
{% set master = salt['grains.get']('master') %}
# Add ES Group
kibanasearchgroup:
group.present:
- name: kibana
- gid: 932
# Add ES user
kibana:
user.present:
- uid: 932
- gid: 932
- home: /opt/so/conf/kibana
- createhome: False
# Drop the correct nginx config based on role
kibanaconfdir:
file.directory:
- name: /opt/so/conf/kibana/etc
- user: 932
- group: 939
- makedirs: True
synckibanaconfig:
file.recurse:
- name: /opt/so/conf/kibana/etc
- source: salt://kibana/etc
- user: 932
- group: 939
- template: jinja
kibanalogdir:
file.directory:
- name: /opt/so/log/kibana
- user: 932
- group: 939
- makedirs: True
kibanacustdashdir:
file.directory:
- name: /opt/so/conf/kibana/customdashboards
- user: 932
- group: 939
- makedirs: True
synckibanacustom:
file.recurse:
- name: /opt/so/conf/kibana/customdashboards
- source: salt://kibana/custom
- user: 932
- group: 939
- template: jinja
# File.Recurse for custom saved dashboards
# Start the kibana docker
so-kibana:
docker_container.running:
- image: toosmooth/so-kibana:techpreview
- hostname: kibana
- user: kibana
- environment:
- KIBANA_DEFAULTAPPID=dashboard/94b52620-342a-11e7-9d52-4f090484f59e
- ELASTICSEARCH_HOST={{ master }}
- ELASTICSEARCH_PORT=9200
- MASTER={{ master }}
- binds:
- /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw
- /opt/so/log/kibana:/var/log/kibana:rw
- /opt/so/conf/kibana/custdashboards:/usr/share/kibana/custdashboards:ro
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- port_bindings:
- 0.0.0.0:5601:5601
# Keep the setting correct
#KibanaHappy:
# cmd.script:
# - shell: /bin/bash
# - runas: socore
# - source: salt://kibana/bin/keepkibanahappy.sh
# - template: jinja

View File

@@ -0,0 +1,17 @@
# This is where can specify which LogStash configs get loaded.
#
# The custom folder on the master gets automatically synced to each logstash
# node.
#
# To enable a custom configuration see the following example and uncomment:
# /usr/share/logstash/pipeline.custom/1234_input_custom.conf
##
# All of the defaults are loaded.
/usr/share/logstash/pipeline.so/0000_input_syslogng.conf
/usr/share/logstash/pipeline.so/0001_input_json.conf
/usr/share/logstash/pipeline.so/0002_input_windows_json.conf
/usr/share/logstash/pipeline.so/0003_input_syslog.conf
/usr/share/logstash/pipeline.so/0005_input_suricata.conf
/usr/share/logstash/pipeline.so/0006_input_beats.conf
/usr/share/logstash/pipeline.so/0007_input_import.conf
/usr/share/logstash/pipeline.dynamic/9999_output_redis.conf

View File

@@ -0,0 +1,106 @@
# This is where can specify which LogStash configs get loaded.
#
# The custom folder on the master gets automatically synced to each logstash
# node.
#
# To enable a custom configuration see the following example and uncomment:
# /usr/share/logstash/pipeline.custom/1234_input_custom.conf
##
# All of the defaults are loaded.
/usr/share/logstash/pipeline.so/0000_input_syslogng.conf
/usr/share/logstash/pipeline.so/0001_input_json.conf
/usr/share/logstash/pipeline.so/0002_input_windows_json.conf
/usr/share/logstash/pipeline.so/0003_input_syslog.conf
/usr/share/logstash/pipeline.so/0005_input_suricata.conf
/usr/share/logstash/pipeline.so/0006_input_beats.conf
/usr/share/logstash/pipeline.so/0007_input_import.conf
/usr/share/logstash/pipeline.so/1000_preprocess_log_elapsed.conf
/usr/share/logstash/pipeline.so/1001_preprocess_syslogng.conf
/usr/share/logstash/pipeline.so/1002_preprocess_json.conf
/usr/share/logstash/pipeline.so/1003_preprocess_bro.conf
/usr/share/logstash/pipeline.so/1004_preprocess_syslog_types.conf
/usr/share/logstash/pipeline.so/1026_preprocess_dhcp.conf
/usr/share/logstash/pipeline.so/1029_preprocess_esxi.conf
/usr/share/logstash/pipeline.so/1030_preprocess_greensql.conf
/usr/share/logstash/pipeline.so/1031_preprocess_iis.conf
/usr/share/logstash/pipeline.so/1032_preprocess_mcafee.conf
/usr/share/logstash/pipeline.so/1033_preprocess_snort.conf
/usr/share/logstash/pipeline.so/1034_preprocess_syslog.conf
/usr/share/logstash/pipeline.so/1100_preprocess_bro_conn.conf
/usr/share/logstash/pipeline.so/1101_preprocess_bro_dhcp.conf
/usr/share/logstash/pipeline.so/1102_preprocess_bro_dns.conf
/usr/share/logstash/pipeline.so/1103_preprocess_bro_dpd.conf
/usr/share/logstash/pipeline.so/1104_preprocess_bro_files.conf
/usr/share/logstash/pipeline.so/1105_preprocess_bro_ftp.conf
/usr/share/logstash/pipeline.so/1106_preprocess_bro_http.conf
/usr/share/logstash/pipeline.so/1107_preprocess_bro_irc.conf
/usr/share/logstash/pipeline.so/1108_preprocess_bro_kerberos.conf
/usr/share/logstash/pipeline.so/1109_preprocess_bro_notice.conf
/usr/share/logstash/pipeline.so/1110_preprocess_bro_rdp.conf
/usr/share/logstash/pipeline.so/1111_preprocess_bro_signatures.conf
/usr/share/logstash/pipeline.so/1112_preprocess_bro_smtp.conf
/usr/share/logstash/pipeline.so/1113_preprocess_bro_snmp.conf
/usr/share/logstash/pipeline.so/1114_preprocess_bro_software.conf
/usr/share/logstash/pipeline.so/1115_preprocess_bro_ssh.conf
/usr/share/logstash/pipeline.so/1116_preprocess_bro_ssl.conf
/usr/share/logstash/pipeline.so/1117_preprocess_bro_syslog.conf
/usr/share/logstash/pipeline.so/1118_preprocess_bro_tunnel.conf
/usr/share/logstash/pipeline.so/1119_preprocess_bro_weird.conf
/usr/share/logstash/pipeline.so/1121_preprocess_bro_mysql.conf
/usr/share/logstash/pipeline.so/1122_preprocess_bro_socks.conf
/usr/share/logstash/pipeline.so/1123_preprocess_bro_x509.conf
/usr/share/logstash/pipeline.so/1124_preprocess_bro_intel.conf
/usr/share/logstash/pipeline.so/1125_preprocess_bro_modbus.conf
/usr/share/logstash/pipeline.so/1126_preprocess_bro_sip.conf
/usr/share/logstash/pipeline.so/1127_preprocess_bro_radius.conf
/usr/share/logstash/pipeline.so/1128_preprocess_bro_pe.conf
/usr/share/logstash/pipeline.so/1129_preprocess_bro_rfb.conf
/usr/share/logstash/pipeline.so/1130_preprocess_bro_dnp3.conf
/usr/share/logstash/pipeline.so/1131_preprocess_bro_smb_files.conf
/usr/share/logstash/pipeline.so/1132_preprocess_bro_smb_mapping.conf
/usr/share/logstash/pipeline.so/1133_preprocess_bro_ntlm.conf
/usr/share/logstash/pipeline.so/1134_preprocess_bro_dce_rpc.conf
/usr/share/logstash/pipeline.so/1998_test_data.conf
/usr/share/logstash/pipeline.so/2000_network_flow.conf
/usr/share/logstash/pipeline.so/6000_bro.conf
/usr/share/logstash/pipeline.so/6001_bro_import.conf
/usr/share/logstash/pipeline.so/6002_syslog.conf
/usr/share/logstash/pipeline.so/6101_switch_brocade.conf
/usr/share/logstash/pipeline.so/6200_firewall_fortinet.conf
/usr/share/logstash/pipeline.so/6201_firewall_pfsense.conf
/usr/share/logstash/pipeline.so/6300_windows.conf
/usr/share/logstash/pipeline.so/6301_dns_windows.conf
/usr/share/logstash/pipeline.so/6400_suricata.conf
/usr/share/logstash/pipeline.so/6500_ossec.conf
/usr/share/logstash/pipeline.so/6501_ossec_sysmon.conf
/usr/share/logstash/pipeline.so/6502_ossec_autoruns.conf
/usr/share/logstash/pipeline.so/8000_postprocess_bro_cleanup.conf
/usr/share/logstash/pipeline.so/8001_postprocess_common_ip_augmentation.conf
/usr/share/logstash/pipeline.so/8006_postprocess_dns.conf
/usr/share/logstash/pipeline.so/8007_postprocess_dns_top1m_tagging.conf
/usr/share/logstash/pipeline.so/8007_postprocess_http.conf
/usr/share/logstash/pipeline.so/8008_postprocess_dns_whois_age.conf
/usr/share/logstash/pipeline.so/8200_postprocess_tagging.conf
/usr/share/logstash/pipeline.so/8502_postprocess_freq_analysis_bro_dns.conf
/usr/share/logstash/pipeline.so/8503_postprocess_freq_analysis_bro_http.conf
/usr/share/logstash/pipeline.so/8504_postprocess_freq_analysis_bro_ssl.conf
/usr/share/logstash/pipeline.so/8505_postprocess_freq_analysis_bro_x509.conf
/usr/share/logstash/pipeline.so/8998_postprocess_log_elapsed.conf
/usr/share/logstash/pipeline.so/8999_postprocess_rename_type.conf
/usr/share/logstash/pipeline.so/9000_output_bro.conf
/usr/share/logstash/pipeline.so/9001_output_switch.conf
/usr/share/logstash/pipeline.so/9002_output_import.conf
/usr/share/logstash/pipeline.so/9004_output_flow.conf
/usr/share/logstash/pipeline.so/9026_output_dhcp.conf
/usr/share/logstash/pipeline.so/9029_output_esxi.conf
/usr/share/logstash/pipeline.so/9030_output_greensql.conf
/usr/share/logstash/pipeline.so/9031_output_iis.conf
/usr/share/logstash/pipeline.so/9032_output_mcafee.conf
/usr/share/logstash/pipeline.so/9033_output_snort.conf
/usr/share/logstash/pipeline.so/9034_output_syslog.conf
/usr/share/logstash/pipeline.so/9200_output_firewall.conf
/usr/share/logstash/pipeline.so/9300_output_windows.conf
/usr/share/logstash/pipeline.so/9301_output_dns_windows.conf
/usr/share/logstash/pipeline.so/9400_output_suricata.conf
/usr/share/logstash/pipeline.so/9500_output_beats.conf
/usr/share/logstash/pipeline.so/9998_output_test_data.conf

View File

@@ -0,0 +1,17 @@
# This is where can specify which LogStash configs get loaded.
#
# The custom folder on the master gets automatically synced to each logstash
# node.
#
# To enable a custom configuration see the following example and uncomment:
# /usr/share/logstash/pipeline.custom/1234_input_custom.conf
##
# All of the defaults are loaded.
/usr/share/logstash/pipeline.so/0000_input_syslogng.conf
/usr/share/logstash/pipeline.so/0001_input_json.conf
/usr/share/logstash/pipeline.so/0002_input_windows_json.conf
/usr/share/logstash/pipeline.so/0003_input_syslog.conf
/usr/share/logstash/pipeline.so/0005_input_suricata.conf
/usr/share/logstash/pipeline.so/0006_input_beats.conf
/usr/share/logstash/pipeline.so/0007_input_import.conf
/usr/share/logstash/pipeline.dynamic/9999_output_redis.conf

View File

@@ -0,0 +1,84 @@
# This is where can specify which LogStash configs get loaded.
#
# The custom folder on the master gets automatically synced to each logstash
# node.
#
# To enable a custom configuration see the following example and uncomment:
# /usr/share/logstash/pipeline.custom/1234_input_custom.conf
##
# All of the defaults are loaded.
/usr/share/logstash/pipeline.dynamic/0900_input_redis.conf
/usr/share/logstash/pipeline.so/1000_preprocess_log_elapsed.conf
/usr/share/logstash/pipeline.so/1001_preprocess_syslogng.conf
/usr/share/logstash/pipeline.so/1002_preprocess_json.conf
/usr/share/logstash/pipeline.so/1003_preprocess_bro.conf
/usr/share/logstash/pipeline.so/1004_preprocess_syslog_types.conf
/usr/share/logstash/pipeline.so/1026_preprocess_dhcp.conf
/usr/share/logstash/pipeline.so/1029_preprocess_esxi.conf
/usr/share/logstash/pipeline.so/1030_preprocess_greensql.conf
/usr/share/logstash/pipeline.so/1031_preprocess_iis.conf
/usr/share/logstash/pipeline.so/1032_preprocess_mcafee.conf
/usr/share/logstash/pipeline.so/1033_preprocess_snort.conf
/usr/share/logstash/pipeline.so/1034_preprocess_syslog.conf
/usr/share/logstash/pipeline.so/1100_preprocess_bro_conn.conf
/usr/share/logstash/pipeline.so/1101_preprocess_bro_dhcp.conf
/usr/share/logstash/pipeline.so/1102_preprocess_bro_dns.conf
/usr/share/logstash/pipeline.so/1103_preprocess_bro_dpd.conf
/usr/share/logstash/pipeline.so/1104_preprocess_bro_files.conf
/usr/share/logstash/pipeline.so/1105_preprocess_bro_ftp.conf
/usr/share/logstash/pipeline.so/1106_preprocess_bro_http.conf
/usr/share/logstash/pipeline.so/1107_preprocess_bro_irc.conf
/usr/share/logstash/pipeline.so/1108_preprocess_bro_kerberos.conf
/usr/share/logstash/pipeline.so/1109_preprocess_bro_notice.conf
/usr/share/logstash/pipeline.so/1110_preprocess_bro_rdp.conf
/usr/share/logstash/pipeline.so/1111_preprocess_bro_signatures.conf
/usr/share/logstash/pipeline.so/1112_preprocess_bro_smtp.conf
/usr/share/logstash/pipeline.so/1113_preprocess_bro_snmp.conf
/usr/share/logstash/pipeline.so/1114_preprocess_bro_software.conf
/usr/share/logstash/pipeline.so/1115_preprocess_bro_ssh.conf
/usr/share/logstash/pipeline.so/1116_preprocess_bro_ssl.conf
/usr/share/logstash/pipeline.so/1117_preprocess_bro_syslog.conf
/usr/share/logstash/pipeline.so/1118_preprocess_bro_tunnel.conf
/usr/share/logstash/pipeline.so/1119_preprocess_bro_weird.conf
/usr/share/logstash/pipeline.so/1121_preprocess_bro_mysql.conf
/usr/share/logstash/pipeline.so/1122_preprocess_bro_socks.conf
/usr/share/logstash/pipeline.so/1123_preprocess_bro_x509.conf
/usr/share/logstash/pipeline.so/1124_preprocess_bro_intel.conf
/usr/share/logstash/pipeline.so/1125_preprocess_bro_modbus.conf
/usr/share/logstash/pipeline.so/1126_preprocess_bro_sip.conf
/usr/share/logstash/pipeline.so/1127_preprocess_bro_radius.conf
/usr/share/logstash/pipeline.so/1128_preprocess_bro_pe.conf
/usr/share/logstash/pipeline.so/1129_preprocess_bro_rfb.conf
/usr/share/logstash/pipeline.so/1130_preprocess_bro_dnp3.conf
/usr/share/logstash/pipeline.so/1131_preprocess_bro_smb_files.conf
/usr/share/logstash/pipeline.so/1132_preprocess_bro_smb_mapping.conf
/usr/share/logstash/pipeline.so/1133_preprocess_bro_ntlm.conf
/usr/share/logstash/pipeline.so/1134_preprocess_bro_dce_rpc.conf
/usr/share/logstash/pipeline.so/1998_test_data.conf
/usr/share/logstash/pipeline.so/2000_network_flow.conf
/usr/share/logstash/pipeline.so/6000_bro.conf
/usr/share/logstash/pipeline.so/6001_bro_import.conf
/usr/share/logstash/pipeline.so/6002_syslog.conf
/usr/share/logstash/pipeline.so/6101_switch_brocade.conf
/usr/share/logstash/pipeline.so/6200_firewall_fortinet.conf
/usr/share/logstash/pipeline.so/6201_firewall_pfsense.conf
/usr/share/logstash/pipeline.so/6300_windows.conf
/usr/share/logstash/pipeline.so/6301_dns_windows.conf
/usr/share/logstash/pipeline.so/6400_suricata.conf
/usr/share/logstash/pipeline.so/6500_ossec.conf
/usr/share/logstash/pipeline.so/6501_ossec_sysmon.conf
/usr/share/logstash/pipeline.so/6502_ossec_autoruns.conf
/usr/share/logstash/pipeline.so/8000_postprocess_bro_cleanup.conf
/usr/share/logstash/pipeline.so/8001_postprocess_common_ip_augmentation.conf
/usr/share/logstash/pipeline.so/8006_postprocess_dns.conf
/usr/share/logstash/pipeline.so/8007_postprocess_dns_top1m_tagging.conf
/usr/share/logstash/pipeline.so/8007_postprocess_http.conf
/usr/share/logstash/pipeline.so/8008_postprocess_dns_whois_age.conf
/usr/share/logstash/pipeline.so/8200_postprocess_tagging.conf
/usr/share/logstash/pipeline.so/8502_postprocess_freq_analysis_bro_dns.conf
/usr/share/logstash/pipeline.so/8503_postprocess_freq_analysis_bro_http.conf
/usr/share/logstash/pipeline.so/8504_postprocess_freq_analysis_bro_ssl.conf
/usr/share/logstash/pipeline.so/8505_postprocess_freq_analysis_bro_x509.conf
/usr/share/logstash/pipeline.so/8998_postprocess_log_elapsed.conf
/usr/share/logstash/pipeline.so/8999_postprocess_rename_type.conf
/usr/share/logstash/pipeline.dynamic/9999_output_redis.conf

View File

@@ -0,0 +1,17 @@
# This is where can specify which LogStash configs get loaded.
#
# The custom folder on the master gets automatically synced to each logstash
# node.
#
# To enable a custom configuration see the following example and uncomment:
# /usr/share/logstash/pipeline.custom/1234_input_custom.conf
##
# All of the defaults are loaded.
/usr/share/logstash/pipeline.so/0000_input_syslogng.conf
/usr/share/logstash/pipeline.so/0001_input_json.conf
/usr/share/logstash/pipeline.so/0002_input_windows_json.conf
/usr/share/logstash/pipeline.so/0003_input_syslog.conf
/usr/share/logstash/pipeline.so/0005_input_suricata.conf
/usr/share/logstash/pipeline.dynamic/0006_input_beats.conf
/usr/share/logstash/pipeline.so/0007_input_import.conf
/usr/share/logstash/pipeline.dynamic/9999_output_redis.conf

View File

@@ -0,0 +1,103 @@
# This is where can specify which LogStash configs get loaded.
#
# The custom folder on the master gets automatically synced to each logstash
# node.
#
# To enable a custom configuration see the following example and uncomment:
# /usr/share/logstash/pipeline.custom/1234_input_custom.conf
##
# All of the defaults are loaded.
/usr/share/logstash/pipeline.dynamic/0900_input_redis.conf
/usr/share/logstash/pipeline.so/1000_preprocess_log_elapsed.conf
/usr/share/logstash/pipeline.so/1001_preprocess_syslogng.conf
/usr/share/logstash/pipeline.so/1002_preprocess_json.conf
/usr/share/logstash/pipeline.so/1003_preprocess_bro.conf
/usr/share/logstash/pipeline.so/1004_preprocess_syslog_types.conf
/usr/share/logstash/pipeline.so/1026_preprocess_dhcp.conf
/usr/share/logstash/pipeline.so/1029_preprocess_esxi.conf
/usr/share/logstash/pipeline.so/1030_preprocess_greensql.conf
/usr/share/logstash/pipeline.so/1031_preprocess_iis.conf
/usr/share/logstash/pipeline.so/1032_preprocess_mcafee.conf
/usr/share/logstash/pipeline.so/1033_preprocess_snort.conf
/usr/share/logstash/pipeline.so/1034_preprocess_syslog.conf
/usr/share/logstash/pipeline.so/1100_preprocess_bro_conn.conf
/usr/share/logstash/pipeline.so/1101_preprocess_bro_dhcp.conf
/usr/share/logstash/pipeline.so/1102_preprocess_bro_dns.conf
/usr/share/logstash/pipeline.so/1103_preprocess_bro_dpd.conf
/usr/share/logstash/pipeline.so/1104_preprocess_bro_files.conf
/usr/share/logstash/pipeline.so/1105_preprocess_bro_ftp.conf
/usr/share/logstash/pipeline.so/1106_preprocess_bro_http.conf
/usr/share/logstash/pipeline.so/1107_preprocess_bro_irc.conf
/usr/share/logstash/pipeline.so/1108_preprocess_bro_kerberos.conf
/usr/share/logstash/pipeline.so/1109_preprocess_bro_notice.conf
/usr/share/logstash/pipeline.so/1110_preprocess_bro_rdp.conf
/usr/share/logstash/pipeline.so/1111_preprocess_bro_signatures.conf
/usr/share/logstash/pipeline.so/1112_preprocess_bro_smtp.conf
/usr/share/logstash/pipeline.so/1113_preprocess_bro_snmp.conf
/usr/share/logstash/pipeline.so/1114_preprocess_bro_software.conf
/usr/share/logstash/pipeline.so/1115_preprocess_bro_ssh.conf
/usr/share/logstash/pipeline.so/1116_preprocess_bro_ssl.conf
/usr/share/logstash/pipeline.so/1117_preprocess_bro_syslog.conf
/usr/share/logstash/pipeline.so/1118_preprocess_bro_tunnel.conf
/usr/share/logstash/pipeline.so/1119_preprocess_bro_weird.conf
/usr/share/logstash/pipeline.so/1121_preprocess_bro_mysql.conf
/usr/share/logstash/pipeline.so/1122_preprocess_bro_socks.conf
/usr/share/logstash/pipeline.so/1123_preprocess_bro_x509.conf
/usr/share/logstash/pipeline.so/1124_preprocess_bro_intel.conf
/usr/share/logstash/pipeline.so/1125_preprocess_bro_modbus.conf
/usr/share/logstash/pipeline.so/1126_preprocess_bro_sip.conf
/usr/share/logstash/pipeline.so/1127_preprocess_bro_radius.conf
/usr/share/logstash/pipeline.so/1128_preprocess_bro_pe.conf
/usr/share/logstash/pipeline.so/1129_preprocess_bro_rfb.conf
/usr/share/logstash/pipeline.so/1130_preprocess_bro_dnp3.conf
/usr/share/logstash/pipeline.so/1131_preprocess_bro_smb_files.conf
/usr/share/logstash/pipeline.so/1132_preprocess_bro_smb_mapping.conf
/usr/share/logstash/pipeline.so/1133_preprocess_bro_ntlm.conf
/usr/share/logstash/pipeline.so/1134_preprocess_bro_dce_rpc.conf
/usr/share/logstash/pipeline.so/1998_test_data.conf
/usr/share/logstash/pipeline.so/2000_network_flow.conf
/usr/share/logstash/pipeline.so/6000_bro.conf
/usr/share/logstash/pipeline.so/6001_bro_import.conf
/usr/share/logstash/pipeline.so/6002_syslog.conf
/usr/share/logstash/pipeline.so/6101_switch_brocade.conf
/usr/share/logstash/pipeline.so/6200_firewall_fortinet.conf
/usr/share/logstash/pipeline.so/6201_firewall_pfsense.conf
/usr/share/logstash/pipeline.so/6300_windows.conf
/usr/share/logstash/pipeline.so/6301_dns_windows.conf
/usr/share/logstash/pipeline.so/6400_suricata.conf
/usr/share/logstash/pipeline.so/6500_ossec.conf
/usr/share/logstash/pipeline.so/6501_ossec_sysmon.conf
/usr/share/logstash/pipeline.so/6502_ossec_autoruns.conf
/usr/share/logstash/pipeline.so/6600_winlogbeat_sysmon.conf
/usr/share/logstash/pipeline.so/6700_winlogbeat.conf
/usr/share/logstash/pipeline.so/8000_postprocess_bro_cleanup.conf
/usr/share/logstash/pipeline.so/8001_postprocess_common_ip_augmentation.conf
#/usr/share/logstash/pipeline.so/8006_postprocess_dns.conf
#/usr/share/logstash/pipeline.so/8007_postprocess_dns_top1m_tagging.conf
/usr/share/logstash/pipeline.so/8007_postprocess_http.conf
#/usr/share/logstash/pipeline.so/8008_postprocess_dns_whois_age.conf
/usr/share/logstash/pipeline.so/8200_postprocess_tagging.conf
#/usr/share/logstash/pipeline.so/8502_postprocess_freq_analysis_bro_dns.conf
#/usr/share/logstash/pipeline.so/8503_postprocess_freq_analysis_bro_http.conf
#/usr/share/logstash/pipeline.so/8504_postprocess_freq_analysis_bro_ssl.conf
#/usr/share/logstash/pipeline.so/8505_postprocess_freq_analysis_bro_x509.conf
/usr/share/logstash/pipeline.so/8998_postprocess_log_elapsed.conf
/usr/share/logstash/pipeline.so/8999_postprocess_rename_type.conf
/usr/share/logstash/pipeline.dynamic/9000_output_bro.conf
/usr/share/logstash/pipeline.dynamic/9001_output_switch.conf
/usr/share/logstash/pipeline.dynamic/9002_output_import.conf
/usr/share/logstash/pipeline.dynamic/9004_output_flow.conf
/usr/share/logstash/pipeline.dynamic/9026_output_dhcp.conf
/usr/share/logstash/pipeline.dynamic/9029_output_esxi.conf
/usr/share/logstash/pipeline.dynamic/9030_output_greensql.conf
/usr/share/logstash/pipeline.dynamic/9031_output_iis.conf
/usr/share/logstash/pipeline.dynamic/9032_output_mcafee.conf
/usr/share/logstash/pipeline.dynamic/9033_output_snort.conf
/usr/share/logstash/pipeline.dynamic/9034_output_syslog.conf
/usr/share/logstash/pipeline.dynamic/9200_output_firewall.conf
/usr/share/logstash/pipeline.dynamic/9300_output_windows.conf
/usr/share/logstash/pipeline.dynamic/9301_output_dns_windows.conf
/usr/share/logstash/pipeline.dynamic/9400_output_suricata.conf
/usr/share/logstash/pipeline.dynamic/9500_output_beats.conf
/usr/share/logstash/pipeline.dynamic/9600_output_ossec.conf
/usr/share/logstash/pipeline.dynamic/9998_output_test_data.conf

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,2 @@
#
#

View File

@@ -0,0 +1,28 @@
input {
beats {
port => "5044"
ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key"
tags => [ "beat" ]
}
}
filter {
if "ids" in [tags] {
mutate {
rename => { "host" => "beat_host" }
remove_tag => ["beat"]
add_field => { "sensor_name" => "%{[beat][name]}" }
add_field => { "syslog-host_from" => "%{[beat][name]}" }
}
}
if "bro" in [tags] {
mutate {
rename => { "host" => "beat_host" }
remove_tag => ["beat"]
add_field => { "sensor_name" => "%{[beat][name]}" }
add_field => { "syslog-host_from" => "%{[beat][name]}" }
}
}
}

View File

@@ -0,0 +1,10 @@
{%- set master = grains['master'] %}
input {
redis {
host => '{{ master }}'
data_type => 'list'
key => 'logstash:unparsed'
type => 'redis-input'
# threads => 1
}
}

View File

@@ -0,0 +1,28 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if "bro" in [tags] and "test_data" not in [tags] and "import" not in [tags] {
mutate {
##add_tag => [ "conf_file_9000"]
}
}
}
output {
if "bro" in [tags] and "test_data" not in [tags] and "import" not in [tags] {
# stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-bro-%{+YYYY.MM.dd}"
template_name => "logstash"
template => "/logstash-template.json"
template_overwrite => true
}
}
}
{%- endif %}

View File

@@ -0,0 +1,25 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if "switch" in [tags] and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9001"]
}
}
}
output {
if "switch" in [tags] and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-switch-%{+YYYY.MM.dd}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,25 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Updated by: Doug Burks
# Last Update: 5/16/2017
filter {
if "import" in [tags] and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9002"]
}
}
}
output {
if "import" in [tags] and "test_data" not in [tags] {
# stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-import-%{+YYYY.MM.dd}"
template_name => "logstash-*"
template => "/logstash-template.json"
template_overwrite => true
}
}
}
{%- endif %}

View File

@@ -0,0 +1,25 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "sflow" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9004"]
}
}
}
output {
if [event_type] == "sflow" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-flow-%{+YYYY.MM.dd}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,24 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "dhcp" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9026"]
}
}
}
output {
if [event_type] == "dhcp" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,23 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "esxi" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9029"]
}
}
}
output {
if [event_type] == "esxi" and "test_data" not in [tags] {
elasticsearch {
hosts => "{{ ES }}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,23 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "greensql" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9030"]
}
}
}
output {
if [event_type] == "greensql" and "test_data" not in [tags] {
elasticsearch {
hosts => "{{ ES }}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,24 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "iis" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9031"]
}
}
}
output {
if [event_type] == "iis" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,24 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "mcafee" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9032"]
}
}
}
output {
if [event_type] == "mcafee" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,27 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "snort" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9033"]
}
}
}
output {
if [event_type] == "snort" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-ids-%{+YYYY.MM.dd}"
template_name => "logstash"
template => "/logstash-template.json"
template_overwrite => true
}
}
}
{%- endif %}

View File

@@ -0,0 +1,26 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Updated by: Doug Burks
# Last Update: 5/15/2017
filter {
if "syslog" in [tags] and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9034"]
}
}
}
output {
if "syslog" in [tags] and "test_data" not in [tags] {
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-syslog-%{+YYYY.MM.dd}"
template_name => "logstash"
template => "/logstash-template.json"
template_overwrite => true
}
}
}
{%- endif %}

View File

@@ -0,0 +1,27 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if "firewall" in [tags] and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9200"]
}
}
}
output {
if "firewall" in [tags] and "test_data" not in [tags] {
# stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-firewall-%{+YYYY.MM.dd}"
template_name => "logstash"
template => "/logstash-template.json"
template_overwrite => true
}
}
}
{%- endif %}

View File

@@ -0,0 +1,25 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "windows" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9300"]
}
}
}
output {
if [event_type] == "windows" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-windows-%{+YYYY.MM.dd}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,25 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "dns" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9301"]
}
}
}
output {
if [event_type] == "dns" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-%{+YYYY.MM.dd}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,25 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if [event_type] == "suricata" and "test_data" not in [tags] {
mutate {
##add_tag => [ "conf_file_9400"]
}
}
}
output {
if [event_type] == "suricata" and "test_data" not in [tags] {
#stdout { codec => rubydebug }
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-ids-%{+YYYY.MM.dd}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,23 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Wes Lambert
# Last Update: 09/14/2018
filter {
if "beat" in [tags] {
mutate {
##add_tag => [ "conf_file_9500"]
}
}
}
output {
if "beat" in [tags] {
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-beats-%{+YYYY.MM.dd}"
template_name => "logstash-beats"
template => "/beats-template.json"
template_overwrite => true
}
}
}
{%- endif %}

View File

@@ -0,0 +1,27 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Updated by: Doug Burks
# Last Update: 9/19/2018
filter {
if [event_type] =~ "ossec" {
mutate {
##add_tag => [ "conf_file_9600"]
}
}
}
output {
if [event_type] =~ "ossec" or "ossec" in [tags] {
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-ossec-%{+YYYY.MM.dd}"
template_name => "logstash-ossec"
template => "/logstash-ossec-template.json"
template_overwrite => true
}
}
}
{%- endif %}

View File

@@ -0,0 +1,24 @@
{%- if grains['role'] != 'so-master' -%}
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
# Author: Justin Henderson
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
# Email: justin@hasecuritysolution.com
# Last Update: 12/9/2016
filter {
if "test_data" in [tags] {
mutate {
#add_tag => [ "conf_file_9998"]
}
}
}
output {
if "test_data" in [tags] {
elasticsearch {
hosts => "{{ ES }}"
index => "logstash-test-%{+YYYY.MM.dd}"
template => "/logstash-template.json"
}
}
}
{%- endif %}

View File

@@ -0,0 +1,21 @@
{%- if salt['grains.get']('role') == 'so-master' %}
{%- set master = salt['pillar.get']('master:mainip', '') -%}
{%- set nodetype = 'master' %}
{%- else %}
{%- set nodetype = salt['pillar.get']('node:node_type', 'storage') %}
{%- set master = grains['master'] %}
{%- endif %}
output {
redis {
host => '{{ master }}'
data_type => 'list'
{%- if nodetype == 'parser' %}
key => 'logstash:parsed'
{%- else %}
key => 'logstash:unparsed'
{%- endif %}
congestion_interval => 1
congestion_threshold => 50000000
# batch_events => 500
}
}

View File

@@ -0,0 +1,36 @@
status = error
name = LogstashPropertiesConfig
#appender.console.type = Console
#appender.console.name = plain_console
#appender.console.layout.type = PatternLayout
#appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n
#appender.json_console.type = Console
#appender.json_console.name = json_console
#appender.json_console.layout.type = JSONLayout
#appender.json_console.layout.compact = true
#appender.json_console.layout.eventEol = true
#Define logging settings.
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = /var/log/logstash/logstash.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
appender.rolling.filePattern = /var/log/logstash/logstash-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = /var/log/logstash
appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = logstash-*.log
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
appender.rolling.strategy.action.condition.nested_condition.age = 7D
rootLogger.level = info
rootLogger.appenderRef.rolling.ref = rolling
#rootLogger.level = ${sys:ls.log.level}
#rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More