mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge remote-tracking branch 'remotes/origin/dev' into feature/suripillar
This commit is contained in:
34
README.md
34
README.md
@@ -1,3 +1,37 @@
|
||||
## Hybrid Hunter Beta 1.4.0 - Beta 3
|
||||
|
||||
- Complete overhaul of the way we handle custom and default settings and data. You will now see a default and local directory under the saltstack directory. All customizations are stored in local.
|
||||
- The way firewall rules are handled has been completely revamped. This will allow the user to customize firewall rules much easier.
|
||||
- Users can now change their own password in SOC.
|
||||
- Hunt now allows users to enable auto-hunt. This is a toggle which, when enabled, automatically submits a new hunt when filtering, grouping, etc.
|
||||
- Title bar now reflects current Hunt query. This will assist users in locating a previous query from their browser history.
|
||||
- Zeek 3.0.7
|
||||
- Elastic 7.7.1
|
||||
- Suricata can now be used for meta data generation.
|
||||
- Suricata eve.json has been moved to `/nsm` to align with storage of other data.
|
||||
- Suricata will now properly rotate its logs.
|
||||
- Grafana dashboards now work properly in standalone mode.
|
||||
- Kibana Dashboard updates including osquery, community_id.
|
||||
- New Elasticsearch Ingest processor to generate community_id from any log that includes the required fields.
|
||||
- Community_id generated for additional logs: Zeek HTTP/SMTP/ , Sysmon shipped with Osquery or Winlogbeat.
|
||||
- Major streamlining of Fleet setup & configuration - no need to run a secondary setup script anymore.
|
||||
- Fleet Standalone node now includes the ability to set a FQDN to point osquery endpoints to.
|
||||
- Distributed installs now support ingesting Windows Eventlogs via Winlogbeat - includes full parsing support for Sysmon.
|
||||
- SOC Downloads section now includes a link to the supported version of Winlogbeat.
|
||||
- Basic syslog ingestion capability now included.
|
||||
- Elasticsearch index name transition fixes for various components.
|
||||
- Updated URLs for pivot fields in Kibana.
|
||||
- Instances of `hive` renamed to `thehive`.
|
||||
|
||||
### Known Issues:
|
||||
|
||||
- The Hunt feature is currently considered "Preview" and although very useful in its current state, not everything works. We wanted to get this out as soon as possible to get the feedback from you! Let us know what you want to see! Let us know what you think we should call it!
|
||||
- You cannot pivot to PCAP from Suricata alerts in Kibana or Hunt.
|
||||
- Navigator is currently not working when using hostname to access SOC. IP mode works correctly.
|
||||
- Due to the move to ECS, the current Playbook plays may not alert correctly at this time.
|
||||
- The osquery MacOS package does not install correctly.
|
||||
|
||||
|
||||
## Hybrid Hunter Beta 1.3.0 - Beta 2
|
||||
|
||||
### Changes:
|
||||
|
||||
20
files/firewall/assigned_hostgroups.local.map.yaml
Normal file
20
files/firewall/assigned_hostgroups.local.map.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
{% import_yaml 'firewall/portgroups.yaml' as default_portgroups %}
|
||||
{% set default_portgroups = default_portgroups.firewall.aliases.ports %}
|
||||
{% import_yaml 'firewall/portgroups.local.yaml' as local_portgroups %}
|
||||
{% if local_portgroups.firewall.aliases.ports %}
|
||||
{% set local_portgroups = local_portgroups.firewall.aliases.ports %}
|
||||
{% else %}
|
||||
{% set local_portgroups = {} %}
|
||||
{% endif %}
|
||||
{% set portgroups = salt['defaults.merge'](default_portgroups, local_portgroups, in_place=False) %}
|
||||
|
||||
role:
|
||||
eval:
|
||||
fleet:
|
||||
heavynode:
|
||||
helixsensor:
|
||||
master:
|
||||
mastersearch:
|
||||
standalone:
|
||||
searchnode:
|
||||
sensor:
|
||||
62
files/firewall/hostgroups.local.yaml
Normal file
62
files/firewall/hostgroups.local.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
firewall:
|
||||
hostgroups:
|
||||
analyst:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
beats_endpoint:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
beats_endpoint_ssl:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
fleet:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
heavy_node:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
master:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
minion:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
node:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
osquery_endpoint:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
search_node:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
sensor:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
syslog:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
wazuh_agent:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
wazuh_api:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
wazuh_authd:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
3
files/firewall/portgroups.local.yaml
Normal file
3
files/firewall/portgroups.local.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
firewall:
|
||||
aliases:
|
||||
ports:
|
||||
@@ -47,7 +47,7 @@ if [ $TYPE == 'sensorstab' ]; then
|
||||
echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||
salt-call state.apply grafana queue=True
|
||||
fi
|
||||
if [ $TYPE == 'evaltab' ]; then
|
||||
if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then
|
||||
echo " monint: $MONINT" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||
if [ ! $10 ]; then
|
||||
salt-call state.apply grafana queue=True
|
||||
|
||||
@@ -10,4 +10,4 @@ if grep -q $2 "$local_salt_dir/pillar/firewall/$1.sls"; then
|
||||
else
|
||||
echo " - $2" >> $local_salt_dir/pillar/firewall/$1.sls
|
||||
salt-call state.apply firewall queue=True
|
||||
fi
|
||||
fi
|
||||
62
pillar/firewall/ports.sls
Normal file
62
pillar/firewall/ports.sls
Normal file
@@ -0,0 +1,62 @@
|
||||
firewall:
|
||||
analyst:
|
||||
ports:
|
||||
tcp:
|
||||
- 80
|
||||
- 443
|
||||
udp:
|
||||
beats_endpoint:
|
||||
ports:
|
||||
tcp:
|
||||
- 5044
|
||||
forward_nodes:
|
||||
ports:
|
||||
tcp:
|
||||
- 443
|
||||
- 5044
|
||||
- 5644
|
||||
- 9822
|
||||
udp:
|
||||
master:
|
||||
ports:
|
||||
tcp:
|
||||
- 1514
|
||||
- 3200
|
||||
- 3306
|
||||
- 4200
|
||||
- 5601
|
||||
- 6379
|
||||
- 8086
|
||||
- 8090
|
||||
- 9001
|
||||
- 9200
|
||||
- 9300
|
||||
- 9400
|
||||
- 9500
|
||||
udp:
|
||||
- 1514
|
||||
minions:
|
||||
ports:
|
||||
tcp:
|
||||
- 3142
|
||||
- 4505
|
||||
- 4506
|
||||
- 5000
|
||||
- 8080
|
||||
- 8086
|
||||
- 55000
|
||||
osquery_endpoint:
|
||||
ports:
|
||||
tcp:
|
||||
- 8090
|
||||
search_nodes:
|
||||
ports:
|
||||
tcp:
|
||||
- 6379
|
||||
- 9300
|
||||
wazuh_endpoint:
|
||||
ports:
|
||||
tcp:
|
||||
- 1514
|
||||
udp:
|
||||
-1514
|
||||
@@ -2,5 +2,6 @@ logstash:
|
||||
pipelines:
|
||||
master:
|
||||
config:
|
||||
- so/0009_input_beats.conf
|
||||
- so/0010_input_hhbeats.conf
|
||||
- so/9999_output_redis.conf.jinja
|
||||
|
||||
@@ -5,12 +5,12 @@ logstash:
|
||||
- so/0900_input_redis.conf.jinja
|
||||
- so/9000_output_zeek.conf.jinja
|
||||
- so/9002_output_import.conf.jinja
|
||||
- so/9034_output_syslog.conf.jinja
|
||||
- so/9100_output_osquery.conf.jinja
|
||||
- so/9400_output_suricata.conf.jinja
|
||||
- so/9500_output_beats.conf.jinja
|
||||
- so/9600_output_ossec.conf.jinja
|
||||
- so/9700_output_strelka.conf.jinja
|
||||
templates:
|
||||
- so/so-beats-template.json
|
||||
- so/so-common-template.json
|
||||
- so/so-zeek-template.json
|
||||
|
||||
@@ -14,7 +14,6 @@ base:
|
||||
|
||||
'*_sensor':
|
||||
- static
|
||||
- firewall.*
|
||||
- brologs
|
||||
- healthcheck.sensor
|
||||
- minions.{{ grains.id }}
|
||||
@@ -22,7 +21,6 @@ base:
|
||||
'*_master or *_mastersearch':
|
||||
- match: compound
|
||||
- static
|
||||
- firewall.*
|
||||
- data.*
|
||||
- secrets
|
||||
- minions.{{ grains.id }}
|
||||
@@ -33,7 +31,6 @@ base:
|
||||
|
||||
'*_eval':
|
||||
- static
|
||||
- firewall.*
|
||||
- data.*
|
||||
- brologs
|
||||
- secrets
|
||||
@@ -44,7 +41,6 @@ base:
|
||||
- logstash
|
||||
- logstash.master
|
||||
- logstash.search
|
||||
- firewall.*
|
||||
- data.*
|
||||
- brologs
|
||||
- secrets
|
||||
@@ -54,18 +50,15 @@ base:
|
||||
|
||||
'*_node':
|
||||
- static
|
||||
- firewall.*
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
'*_heavynode':
|
||||
- static
|
||||
- firewall.*
|
||||
- brologs
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
'*_helix':
|
||||
- static
|
||||
- firewall.*
|
||||
- fireeye
|
||||
- brologs
|
||||
- logstash
|
||||
@@ -74,14 +67,12 @@ base:
|
||||
|
||||
'*_fleet':
|
||||
- static
|
||||
- firewall.*
|
||||
- data.*
|
||||
- secrets
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
'*_searchnode':
|
||||
- static
|
||||
- firewall.*
|
||||
- logstash
|
||||
- logstash.search
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
@@ -44,5 +44,3 @@ send_x509_pem_entries_to_mine:
|
||||
- mine.send:
|
||||
- func: x509.get_pem_entries
|
||||
- glob_path: /etc/pki/ca.crt
|
||||
- onchanges:
|
||||
- x509: /etc/pki/ca.crt
|
||||
|
||||
@@ -62,6 +62,7 @@ commonpkgs:
|
||||
- python3-dateutil
|
||||
- python3-m2crypto
|
||||
- python3-mysqldb
|
||||
- git
|
||||
heldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
@@ -96,12 +97,13 @@ commonpkgs:
|
||||
- device-mapper-persistent-data
|
||||
- lvm2
|
||||
- openssl
|
||||
- git
|
||||
|
||||
heldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.2.13-3.2.el7
|
||||
- docker-ce: 3:19.03.9-3.el7
|
||||
- docker-ce: 3:19.03.11-3.el7
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
@@ -128,4 +130,4 @@ utilsyncscripts:
|
||||
- group: 0
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- source: salt://common/tools/sbin
|
||||
- source: salt://common/tools/sbin
|
||||
|
||||
@@ -45,9 +45,22 @@ do
|
||||
SKIP=1
|
||||
;;
|
||||
w)
|
||||
FULLROLE="wazuh_endpoint"
|
||||
FULLROLE="wazuh_agent"
|
||||
SKIP=1
|
||||
;;
|
||||
s)
|
||||
FULLROLE="syslog"
|
||||
SKIP=1
|
||||
;;
|
||||
p)
|
||||
FULLROLE="wazuh_api"
|
||||
SKIP=1
|
||||
;;
|
||||
r)
|
||||
FULLROLE="wazuh_authd"
|
||||
SKIP=1
|
||||
;;
|
||||
|
||||
esac
|
||||
done
|
||||
|
||||
@@ -60,8 +73,10 @@ if [ "$SKIP" -eq 0 ]; then
|
||||
echo "[a] - Analyst - ports 80/tcp and 443/tcp"
|
||||
echo "[b] - Logstash Beat - port 5044/tcp"
|
||||
echo "[o] - Osquery endpoint - port 8090/tcp"
|
||||
echo "[w] - Wazuh endpoint - port 1514"
|
||||
echo ""
|
||||
echo "[s] - Syslog device - 514/tcp/udp"
|
||||
echo "[w] - Wazuh agent - port 1514/tcp/udp"
|
||||
echo "[p] - Wazuh API - port 55000/tcp"
|
||||
echo "[r] - Wazuh registration service - 1515/tcp"
|
||||
echo "Please enter your selection (a - analyst, b - beats, o - osquery, w - wazuh):"
|
||||
read ROLE
|
||||
echo "Enter a single ip address or range to allow (example: 10.10.10.10 or 10.10.0.0/16):"
|
||||
@@ -74,7 +89,13 @@ if [ "$SKIP" -eq 0 ]; then
|
||||
elif [ "$ROLE" == "o" ]; then
|
||||
FULLROLE=osquery_endpoint
|
||||
elif [ "$ROLE" == "w" ]; then
|
||||
FULLROLE=wazuh_endpoint
|
||||
FULLROLE=wazuh_agent
|
||||
elif [ "$ROLE" == "s" ]; then
|
||||
FULLROLE=syslog
|
||||
elif [ "$ROLE" == "p" ]; then
|
||||
FULLROLE=wazuh_api
|
||||
elif [ "$ROLE" == "r" ]; then
|
||||
FULLROLE=wazuh_authd
|
||||
else
|
||||
echo "I don't recognize that role"
|
||||
exit 1
|
||||
@@ -83,7 +104,8 @@ if [ "$SKIP" -eq 0 ]; then
|
||||
fi
|
||||
|
||||
echo "Adding $IP to the $FULLROLE role. This can take a few seconds"
|
||||
$default_salt_dir/pillar/firewall/addfirewall.sh $FULLROLE $IP
|
||||
/usr/sbin/so-firewall includehost $FULLROLE $IP
|
||||
salt-call state.apply firewall queue=True
|
||||
|
||||
# Check if Wazuh enabled
|
||||
if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then
|
||||
|
||||
111
salt/common/tools/sbin/so-docker-refresh
Normal file
111
salt/common/tools/sbin/so-docker-refresh
Normal file
@@ -0,0 +1,111 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
got_root(){
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
master_check() {
|
||||
# Check to see if this is a master
|
||||
MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
|
||||
if [ $MASTERCHECK == 'so-eval' ] || [ $MASTERCHECK == 'so-master' ] || [ $MASTERCHECK == 'so-mastersearch' ] || [ $MASTERCHECK == 'so-standalone' ] || [ $MASTERCHECK == 'so-helix' ]; then
|
||||
echo "This is a master. We can proceed"
|
||||
else
|
||||
echo "Please run soup on the master. The master controls all updates."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
update_docker_containers() {
|
||||
|
||||
# Download the containers from the interwebs
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
# Pull down the trusted docker image
|
||||
echo "Downloading $i"
|
||||
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
|
||||
# Tag it with the new registry destination
|
||||
docker tag soshybridhunter/$i $HOSTNAME:5000/soshybridhunter/$i
|
||||
docker push $HOSTNAME:5000/soshybridhunter/$i
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
version_check() {
|
||||
if [ -f /etc/soversion ]; then
|
||||
VERSION=$(cat /etc/soversion)
|
||||
else
|
||||
echo "Unable to detect version. I will now terminate."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
got_root
|
||||
master_check
|
||||
version_check
|
||||
|
||||
# Use the hostname
|
||||
HOSTNAME=$(hostname)
|
||||
# List all the containers
|
||||
if [ $MASTERCHECK != 'so-helix' ]; then
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-acng:$VERSION" \
|
||||
"so-thehive-cortex:$VERSION" \
|
||||
"so-curator:$VERSION" \
|
||||
"so-domainstats:$VERSION" \
|
||||
"so-elastalert$VERSION" \
|
||||
"so-elasticsearch:$VERSION" \
|
||||
"so-filebeat:$VERSION" \
|
||||
"so-fleet:$VERSION" \
|
||||
"so-fleet-launcher:$VERSION" \
|
||||
"so-freqserver:$VERSION" \
|
||||
"so-grafana:$VERSION" \
|
||||
"so-idstools:$VERSION" \
|
||||
"so-influxdb:$VERSION" \
|
||||
"so-kibana:$VERSION" \
|
||||
"so-kratos:$VERSION" \
|
||||
"so-logstash:$VERSION" \
|
||||
"so-mysql:$VERSION" \
|
||||
"so-navigator:$VERSION" \
|
||||
"so-nginx:$VERSION" \
|
||||
"so-playbook:$VERSION" \
|
||||
"so-redis:$VERSION" \
|
||||
"so-soc:$VERSION" \
|
||||
"so-soctopus:$VERSION" \
|
||||
"so-steno:$VERSION" \
|
||||
"so-strelka:$VERSION" \
|
||||
"so-suricata:$VERSION" \
|
||||
"so-telegraf:$VERSION" \
|
||||
"so-thehive:$VERSION" \
|
||||
"so-thehive-es:$VERSION" \
|
||||
"so-wazuh:$VERSION" \
|
||||
"so-zeek:$VERSION" )
|
||||
else
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-filebeat:$VERSION" \
|
||||
"so-idstools:$VERSION" \
|
||||
"so-logstash:$VERSION" \
|
||||
"so-nginx:$VERSION" \
|
||||
"so-redis:$VERSION" \
|
||||
"so-steno:$VERSION" \
|
||||
"so-suricata:$VERSION" \
|
||||
"so-telegraf:$VERSION" \
|
||||
"so-zeek:$VERSION" )
|
||||
fi
|
||||
|
||||
update_docker_containers
|
||||
26
salt/common/tools/sbin/so-elasticsearch-indices-rw
Normal file
26
salt/common/tools/sbin/so-elasticsearch-indices-rw
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('node:mainint', salt['pillar.get']('host:mainint')))))[0] }}
|
||||
ESPORT=9200
|
||||
THEHIVEESPORT=9400
|
||||
|
||||
echo "Removing read only attributes for indices..."
|
||||
echo
|
||||
for p in $ESPORT $THEHIVEESPORT; do
|
||||
curl -XPUT -H "Content-Type: application/json" http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
done
|
||||
305
salt/common/tools/sbin/so-firewall
Executable file
305
salt/common/tools/sbin/so-firewall
Executable file
@@ -0,0 +1,305 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
hostgroupsFilename = "/opt/so/saltstack/local/salt/firewall/hostgroups.local.yaml"
|
||||
portgroupsFilename = "/opt/so/saltstack/local/salt/firewall/portgroups.local.yaml"
|
||||
supportedProtocols = ['tcp', 'udp']
|
||||
|
||||
def showUsage(args):
|
||||
print('Usage: {} [OPTIONS] <COMMAND> [ARGS...]'.format(sys.argv[0]))
|
||||
print(' Options:')
|
||||
print(' --apply - After updating the firewall configuration files, apply the new firewall state')
|
||||
print('')
|
||||
print(' Available commands:')
|
||||
print(' help - Prints this usage information.')
|
||||
print(' includedhosts - Lists the IPs included in the given group. Args: <GROUP_NAME>')
|
||||
print(' excludedhosts - Lists the IPs excluded from the given group. Args: <GROUP_NAME>')
|
||||
print(' includehost - Includes the given IP in the given group. Args: <GROUP_NAME> <IP>')
|
||||
print(' excludehost - Excludes the given IP from the given group. Args: <GROUP_NAME> <IP>')
|
||||
print(' removehost - Removes an excluded IP from the given group. Args: <GROUP_NAME> <IP>')
|
||||
print(' addhostgroup - Adds a new, custom host group. Args: <GROUP_NAME>')
|
||||
print(' listports - Lists ports in the given group and protocol. Args: <GROUP_NAME> <PORT_PROTOCOL>')
|
||||
print(' addport - Adds a PORT to the given group. Args: <GROUP_NAME> <PORT_PROTOCOL> <PORT>')
|
||||
print(' removeport - Removes a PORT from the given group. Args: <GROUP_NAME> <PORT_PROTOCOL> <PORT>')
|
||||
print(' addportgroup - Adds a new, custom port group. Args: <GROUP_NAME>')
|
||||
print('')
|
||||
print(' Where:')
|
||||
print(' GROUP_NAME - The name of an alias group (Ex: analyst)')
|
||||
print(' IP - Either a single IP address (Ex: 8.8.8.8) or a CIDR block (Ex: 10.23.0.0/16).')
|
||||
print(' PORT_PROTOCOL - Must be one of the following: ' + str(supportedProtocols))
|
||||
print(' PORT - Either a single numeric port (Ex: 443), or a port range (Ex: 8000:8002).')
|
||||
sys.exit(1)
|
||||
|
||||
def loadYaml(filename):
|
||||
file = open(filename, "r")
|
||||
return yaml.load(file.read())
|
||||
|
||||
def writeYaml(filename, content):
|
||||
file = open(filename, "w")
|
||||
return yaml.dump(content, file)
|
||||
|
||||
def listIps(name, mode):
|
||||
content = loadYaml(hostgroupsFilename)
|
||||
if name not in content['firewall']['hostgroups']:
|
||||
print('Host group does not exist', file=sys.stderr)
|
||||
return 4
|
||||
hostgroup = content['firewall']['hostgroups'][name]
|
||||
ips = hostgroup['ips'][mode]
|
||||
if ips is not None:
|
||||
for ip in ips:
|
||||
print(ip)
|
||||
return 0
|
||||
|
||||
def addIp(name, ip, mode):
|
||||
content = loadYaml(hostgroupsFilename)
|
||||
if name not in content['firewall']['hostgroups']:
|
||||
print('Host group does not exist', file=sys.stderr)
|
||||
return 4
|
||||
hostgroup = content['firewall']['hostgroups'][name]
|
||||
ips = hostgroup['ips'][mode]
|
||||
if ips is None:
|
||||
ips = []
|
||||
hostgroup['ips'][mode] = ips
|
||||
if ip not in ips:
|
||||
ips.append(ip)
|
||||
else:
|
||||
print('Already exists', file=sys.stderr)
|
||||
return 3
|
||||
writeYaml(hostgroupsFilename, content)
|
||||
return 0
|
||||
|
||||
def removeIp(name, ip, mode, silence = False):
|
||||
content = loadYaml(hostgroupsFilename)
|
||||
if name not in content['firewall']['hostgroups']:
|
||||
print('Host group does not exist', file=sys.stderr)
|
||||
return 4
|
||||
hostgroup = content['firewall']['hostgroups'][name]
|
||||
ips = hostgroup['ips'][mode]
|
||||
if ips is None:
|
||||
ips = []
|
||||
hostgroup['ips'][mode] = ips
|
||||
if ip in ips:
|
||||
ips.remove(ip)
|
||||
else:
|
||||
if not silence:
|
||||
print('IP does not exist', file=sys.stderr)
|
||||
return 3
|
||||
writeYaml(hostgroupsFilename, content)
|
||||
return 0
|
||||
|
||||
def createProtocolMap():
|
||||
map = {}
|
||||
for protocol in supportedProtocols:
|
||||
map[protocol] = []
|
||||
return map
|
||||
|
||||
def addhostgroup(args):
|
||||
if len(args) != 1:
|
||||
print('Missing host group name argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
|
||||
name = args[1]
|
||||
content = loadYaml(hostgroupsFilename)
|
||||
if name in content['firewall']['hostgroups']:
|
||||
print('Already exists', file=sys.stderr)
|
||||
return 3
|
||||
content['firewall']['hostgroups'][name] = { 'ips': { 'insert': [], 'delete': [] }}
|
||||
writeYaml(hostgroupsFilename, content)
|
||||
return 0
|
||||
|
||||
def addportgroup(args):
|
||||
if len(args) != 1:
|
||||
print('Missing port group name argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
|
||||
name = args[0]
|
||||
content = loadYaml(portgroupsFilename)
|
||||
ports = content['firewall']['aliases']['ports']
|
||||
if ports is None:
|
||||
ports = {}
|
||||
content['firewall']['aliases']['ports'] = ports
|
||||
if name in ports:
|
||||
print('Already exists', file=sys.stderr)
|
||||
return 3
|
||||
ports[name] = createProtocolMap()
|
||||
writeYaml(portgroupsFilename, content)
|
||||
return 0
|
||||
|
||||
def listports(args):
|
||||
if len(args) != 2:
|
||||
print('Missing port group name or port protocol', file=sys.stderr)
|
||||
showUsage(args)
|
||||
|
||||
name = args[0]
|
||||
protocol = args[1]
|
||||
if protocol not in supportedProtocols:
|
||||
print('Port protocol is not supported', file=sys.stderr)
|
||||
return 5
|
||||
|
||||
content = loadYaml(portgroupsFilename)
|
||||
ports = content['firewall']['aliases']['ports']
|
||||
if ports is None:
|
||||
ports = {}
|
||||
content['firewall']['aliases']['ports'] = ports
|
||||
if name not in ports:
|
||||
print('Port group does not exist', file=sys.stderr)
|
||||
return 3
|
||||
ports = ports[name][protocol]
|
||||
if ports is not None:
|
||||
for port in ports:
|
||||
print(port)
|
||||
return 0
|
||||
|
||||
def addport(args):
|
||||
if len(args) != 3:
|
||||
print('Missing port group name or port protocol, or port argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
|
||||
name = args[0]
|
||||
protocol = args[1]
|
||||
port = args[2]
|
||||
if protocol not in supportedProtocols:
|
||||
print('Port protocol is not supported', file=sys.stderr)
|
||||
return 5
|
||||
|
||||
content = loadYaml(portgroupsFilename)
|
||||
ports = content['firewall']['aliases']['ports']
|
||||
if ports is None:
|
||||
ports = {}
|
||||
content['firewall']['aliases']['ports'] = ports
|
||||
if name not in ports:
|
||||
print('Port group does not exist', file=sys.stderr)
|
||||
return 3
|
||||
ports = ports[name][protocol]
|
||||
if ports is None:
|
||||
ports = []
|
||||
content['firewall']['aliases']['ports'][name][protocol] = ports
|
||||
if port in ports:
|
||||
print('Already exists', file=sys.stderr)
|
||||
return 3
|
||||
ports.append(port)
|
||||
writeYaml(portgroupsFilename, content)
|
||||
return 0
|
||||
|
||||
def removeport(args):
|
||||
if len(args) != 3:
|
||||
print('Missing port group name or port protocol, or port argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
|
||||
name = args[0]
|
||||
protocol = args[1]
|
||||
port = args[2]
|
||||
if protocol not in supportedProtocols:
|
||||
print('Port protocol is not supported', file=sys.stderr)
|
||||
return 5
|
||||
|
||||
content = loadYaml(portgroupsFilename)
|
||||
ports = content['firewall']['aliases']['ports']
|
||||
if ports is None:
|
||||
ports = {}
|
||||
content['firewall']['aliases']['ports'] = ports
|
||||
if name not in ports:
|
||||
print('Port group does not exist', file=sys.stderr)
|
||||
return 3
|
||||
ports = ports[name][protocol]
|
||||
if ports is None or port not in ports:
|
||||
print('Port does not exist', file=sys.stderr)
|
||||
return 3
|
||||
ports.remove(port)
|
||||
writeYaml(portgroupsFilename, content)
|
||||
return 0
|
||||
|
||||
def includedhosts(args):
|
||||
if len(args) != 1:
|
||||
print('Missing host group name argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
return listIps(args[0], 'insert')
|
||||
|
||||
def excludedhosts(args):
|
||||
if len(args) != 1:
|
||||
print('Missing host group name argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
return listIps(args[0], 'delete')
|
||||
|
||||
def includehost(args):
|
||||
if len(args) != 2:
|
||||
print('Missing host group name or ip argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
result = addIp(args[0], args[1], 'insert')
|
||||
if result == 0:
|
||||
removeIp(args[0], args[1], 'delete', True)
|
||||
return result
|
||||
|
||||
def excludehost(args):
|
||||
if len(args) != 2:
|
||||
print('Missing host group name or ip argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
result = addIp(args[0], args[1], 'delete')
|
||||
if result == 0:
|
||||
removeIp(args[0], args[1], 'insert', True)
|
||||
return result
|
||||
|
||||
def removehost(args):
|
||||
if len(args) != 2:
|
||||
print('Missing host group name or ip argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
return removeIp(args[0], args[1], 'delete')
|
||||
|
||||
def apply():
|
||||
proc = subprocess.run(['salt-call', 'state.apply', 'firewall', 'queue=True'])
|
||||
return proc.returncode
|
||||
|
||||
def main():
|
||||
options = []
|
||||
args = sys.argv[1:]
|
||||
for option in args:
|
||||
if option.startswith("--"):
|
||||
options.append(option)
|
||||
args.remove(option)
|
||||
|
||||
if len(args) == 0:
|
||||
showUsage(None)
|
||||
|
||||
commands = {
|
||||
"help": showUsage,
|
||||
"includedhosts": includedhosts,
|
||||
"excludedhosts": excludedhosts,
|
||||
"includehost": includehost,
|
||||
"excludehost": excludehost,
|
||||
"removehost": removehost,
|
||||
"listports": listports,
|
||||
"addport": addport,
|
||||
"removeport": removeport,
|
||||
"addhostgroup": addhostgroup,
|
||||
"addportgroup": addportgroup
|
||||
}
|
||||
|
||||
cmd = commands.get(args[0], showUsage)
|
||||
code = cmd(args[1:])
|
||||
|
||||
|
||||
if code == 0 and "--apply" in options:
|
||||
code = apply()
|
||||
|
||||
sys.exit(code)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
44
salt/common/tools/sbin/so-fleet-setup
Normal file
44
salt/common/tools/sbin/so-fleet-setup
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
#so-fleet-setup $FleetEmail $FleetPassword
|
||||
|
||||
if [[ $# -ne 2 ]] ; then
|
||||
echo "Username or Password was not set - exiting now."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Checking to see if required containers are started...
|
||||
if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
|
||||
echo "Starting Docker Containers..."
|
||||
salt-call state.apply mysql queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply fleet queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply redis queue=True >> /root/fleet-setup.log
|
||||
fi
|
||||
|
||||
docker exec so-fleet fleetctl config set --address https://localhost:8080 --tls-skip-verify --url-prefix /fleet
|
||||
docker exec so-fleet fleetctl setup --email $1 --password $2
|
||||
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/so/so-default.yml
|
||||
docker exec so-fleet /bin/sh -c 'for pack in /packs/palantir/Fleet/Endpoints/packs/*.yaml; do fleetctl apply -f "$pack"; done'
|
||||
docker exec so-fleet fleetctl apply -f /packs/osquery-config.conf
|
||||
|
||||
|
||||
# Enable Fleet
|
||||
echo "Enabling Fleet..."
|
||||
salt-call state.apply fleet.event_enable-fleet queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply nginx queue=True >> /root/fleet-setup.log
|
||||
|
||||
# Generate osquery install packages
|
||||
echo "Generating osquery install packages - this will take some time..."
|
||||
salt-call state.apply fleet.event_gen-packages queue=True >> /root/fleet-setup.log
|
||||
sleep 120
|
||||
|
||||
echo "Installing launcher via salt..."
|
||||
salt-call state.apply fleet.install_package queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply filebeat queue=True >> /root/fleet-setup.log
|
||||
docker stop so-nginx
|
||||
salt-call state.apply nginx queue=True >> /root/fleet-setup.log
|
||||
|
||||
echo "Fleet Setup Complete - Login with the username and password you ran the script with."
|
||||
@@ -31,5 +31,5 @@ sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE
|
||||
# Clean up for Fleet, if applicable
|
||||
# {% if FLEET_NODE or FLEET_MASTER %}
|
||||
# Fleet IP
|
||||
sed -i "s/{{ FLEET_IP }}/FLEETPLACEHOLDER/g" $OUTFILE
|
||||
sed -i "s/{{ MASTER }}/FLEETPLACEHOLDER/g" $OUTFILE
|
||||
# {% endif %}
|
||||
|
||||
57
salt/common/tools/sbin/so-saltstack-update
Normal file
57
salt/common/tools/sbin/so-saltstack-update
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
clone_to_tmp() {
|
||||
|
||||
# TODO Need to add a air gap option
|
||||
# Make a temp location for the files
|
||||
mkdir /tmp/sogh
|
||||
cd /tmp/sogh
|
||||
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
|
||||
git clone https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
|
||||
cd /tmp
|
||||
|
||||
}
|
||||
|
||||
copy_new_files() {
|
||||
|
||||
# Copy new files over to the salt dir
|
||||
cd /tmp/sogh/securityonion-saltstack
|
||||
git checkout $BRANCH
|
||||
rsync -a --exclude-from 'exclude-list.txt' salt $default_salt_dir/
|
||||
rsync -a --exclude-from 'exclude-list.txt' pillar $default_salt_dir/
|
||||
chown -R socore:socore $default_salt_dir/salt
|
||||
chown -R socore:socore $default_salt_dir/pillar
|
||||
chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh
|
||||
rm -rf /tmp/sogh
|
||||
}
|
||||
|
||||
got_root(){
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
got_root
|
||||
if [ $# -ne 1 ] ; then
|
||||
BRANCH=master
|
||||
else
|
||||
BRANCH=$1
|
||||
fi
|
||||
clone_to_tmp
|
||||
copy_new_files
|
||||
@@ -1,4 +1,4 @@
|
||||
{% if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
|
||||
{% if grains['role'] in ['so-node', 'so-heavynode'] %}
|
||||
{%- set elasticsearch = salt['pillar.get']('node:mainip', '') -%}
|
||||
{% elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
|
||||
{%- set elasticsearch = salt['pillar.get']('master:mainip', '') -%}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
{% if grains['role'] in ['so-searchnode', 'so-eval', 'so-node', 'so-mastersearch', 'so-heavynode', 'so-standalone'] %}
|
||||
{% if grains['role'] in ['so-eval', 'so-node', 'so-mastersearch', 'so-heavynode', 'so-standalone'] %}
|
||||
# Curator
|
||||
# Create the group
|
||||
curatorgroup:
|
||||
@@ -89,7 +89,7 @@ curdel:
|
||||
|
||||
so-curatorcloseddeletecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-closed-delete
|
||||
- name: /usr/sbin/so-curator-closed-delete > /opt/so/log/curator/cron-closed-delete.log 2>&1
|
||||
- user: root
|
||||
- minute: '*'
|
||||
- hour: '*'
|
||||
@@ -99,7 +99,7 @@ so-curatorcloseddeletecron:
|
||||
|
||||
so-curatorclosecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-close
|
||||
- name: /usr/sbin/so-curator-close > /opt/so/log/curator/cron-close.log 2>&1
|
||||
- user: root
|
||||
- minute: '*'
|
||||
- hour: '*'
|
||||
@@ -109,7 +109,7 @@ so-curatorclosecron:
|
||||
|
||||
so-curatordeletecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-delete
|
||||
- name: /usr/sbin/so-curator-delete > /opt/so/log/curator/cron-delete.log 2>&1
|
||||
- user: root
|
||||
- minute: '*'
|
||||
- hour: '*'
|
||||
|
||||
35
salt/elasticsearch/files/ingest/beats.common
Normal file
35
salt/elasticsearch/files/ingest/beats.common
Normal file
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"description" : "beats.common",
|
||||
"processors" : [
|
||||
{"community_id": {"if": "ctx.winlog.event_data?.Protocol != null", "field":["winlog.event_data.SourceIp","winlog.event_data.SourcePort","winlog.event_data.DestinationIp","winlog.event_data.DestinationPort","winlog.event_data.Protocol"],"target_field":"network.community_id"}},
|
||||
{ "set": { "if": "ctx.winlog?.channel != null", "field": "dataset", "value": "wel-{{winlog.channel}}", "override": true } },
|
||||
{ "set": { "if": "ctx.agent?.type != null", "field": "module", "value": "{{agent.type}}", "override": true } },
|
||||
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 3", "field": "event.category", "value": "host,process,network", "override": true } },
|
||||
{ "set": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational' && ctx.event?.code == 1", "field": "event.category", "value": "host,process", "override": true } },
|
||||
{ "rename": { "field": "agent.hostname", "target_field": "agent.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
@@ -39,6 +39,9 @@
|
||||
{ "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_missing": true } },
|
||||
{ "rename": { "field": "category", "target_field": "event.category", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "convert": { "field": "destination.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "convert": { "field": "source.port", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
||||
{
|
||||
"remove": {
|
||||
"field": [ "index_name_prefix", "message2", "type" ],
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
{
|
||||
"description" : "suricata.alert",
|
||||
"processors" : [
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.comunity_id", "target_field": "network.comunity_id", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "suricata.common" } }
|
||||
{ "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
{
|
||||
"description" : "suricata.common",
|
||||
"processors" : [
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.flow_id", "target_field": "event.id", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||
{ "remove":{ "field": "dataset", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.event_type", "target_field": "dataset", "ignore_failure": true } },
|
||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
|
||||
{ "remove": { "field": ["agent"], "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
{ "remove":{ "field": "agent", "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "suricata.{{dataset}}" } }
|
||||
]
|
||||
}
|
||||
|
||||
14
salt/elasticsearch/files/ingest/suricata.dhcp
Normal file
14
salt/elasticsearch/files/ingest/suricata.dhcp
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"description" : "suricata.dhcp",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dhcp.assigned_ip", "target_field": "dhcp.assigned_ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dhcp.client_mac", "target_field": "host.mac", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dhcp.dhcp_type", "target_field": "dhcp.message_types", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dhcp.assigned_ip", "target_field": "dhcp.assigned_ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dhcp.type", "target_field": "dhcp.type", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dhcp.id", "target_field": "dhcp.id", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.dnp3
Normal file
8
salt/elasticsearch/files/ingest/suricata.dnp3
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.dnp3",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
14
salt/elasticsearch/files/ingest/suricata.dns
Normal file
14
salt/elasticsearch/files/ingest/suricata.dns
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"description" : "suricata.dns",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.type", "target_field": "dns.type", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.grouped.A", "target_field": "dns.answers", "ignore_missing": true } },
|
||||
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.fileinfo
Normal file
8
salt/elasticsearch/files/ingest/suricata.fileinfo
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.fileinfo",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
14
salt/elasticsearch/files/ingest/suricata.flow
Normal file
14
salt/elasticsearch/files/ingest/suricata.flow
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"description" : "suricata.flow",
|
||||
"processors" : [
|
||||
{ "set": { "field": "dataset", "value": "conn" } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.flow.state", "target_field": "connection.state", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.flow.bytes_toclient", "target_field": "server.ip_bytes", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.flow.bytes_toserver", "target_field": "client.ip_bytes", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.flow.start", "target_field": "connection.start", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.flow.end", "target_field": "connection.end", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
14
salt/elasticsearch/files/ingest/suricata.ftp
Normal file
14
salt/elasticsearch/files/ingest/suricata.ftp
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"description" : "suricata.ftp",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ftp.reply", "target_field": "server.reply_message", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ftp.completion_code", "target_field": "server.reply_code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ftp.reply_received", "target_field": "server.reply_received", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ftp.command", "target_field": "ftp.command", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ftp.command_data", "target_field": "ftp.command_data", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ftp.dynamic_port", "target_field": "ftp.data_channel_destination.port", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
17
salt/elasticsearch/files/ingest/suricata.http
Normal file
17
salt/elasticsearch/files/ingest/suricata.http
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"description" : "suricata.http",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.http.hostname", "target_field": "http.virtual_host", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.http.http_user_agent", "target_field": "http.useragent", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.http.url", "target_field": "http.uri", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.http.http_content_type", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.http.http_refer", "target_field": "http.referrer", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.http.http_method", "target_field": "http.method", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.http.protocol", "target_field": "http.version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.http.status", "target_field": "http.status_code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.http.length", "target_field": "http.request.body.length", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.ikev2
Normal file
8
salt/elasticsearch/files/ingest/suricata.ikev2
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.ikev2",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.krb5
Normal file
8
salt/elasticsearch/files/ingest/suricata.krb5
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.krb5",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.nfs
Normal file
8
salt/elasticsearch/files/ingest/suricata.nfs
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.nfs",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.rdp
Normal file
8
salt/elasticsearch/files/ingest/suricata.rdp
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.rdp",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.sip
Normal file
8
salt/elasticsearch/files/ingest/suricata.sip
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.sip",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.smb
Normal file
8
salt/elasticsearch/files/ingest/suricata.smb
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.smb",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
13
salt/elasticsearch/files/ingest/suricata.smtp
Normal file
13
salt/elasticsearch/files/ingest/suricata.smtp
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"description" : "suricata.smtp",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.smtp.helo", "target_field": "smtp.helo", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.email.from", "target_field": "smtp.from", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.email.to", "target_field": "smtp.to", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.email.cc", "target_field": "smtp.cc", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.email.attachment", "target_field": "smtp.attachment", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
10
salt/elasticsearch/files/ingest/suricata.snmp
Normal file
10
salt/elasticsearch/files/ingest/suricata.snmp
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"description" : "suricata.snmp",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.snmp.version", "target_field": "snmp.version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.snmp.community", "target_field": "snmp.community", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
11
salt/elasticsearch/files/ingest/suricata.ssh
Normal file
11
salt/elasticsearch/files/ingest/suricata.ssh
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"description" : "suricata.ssh",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ssh.client.proto_version", "target_field": "ssh.version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ssh.client.software_version", "target_field": "ssh.client", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ssh.server.proto_version", "target_field": "ssh.server", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.tftp
Normal file
8
salt/elasticsearch/files/ingest/suricata.tftp
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.tftp",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
8
salt/elasticsearch/files/ingest/suricata.tls
Normal file
8
salt/elasticsearch/files/ingest/suricata.tls
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"description" : "suricata.tls",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
@@ -6,6 +6,10 @@
|
||||
"field": "message",
|
||||
"pattern" : "%{message}",
|
||||
"on_failure": [ { "drop" : { } } ]
|
||||
},
|
||||
"remove": {
|
||||
"field": [ "type", "agent" ],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{ "pipeline": { "name": "common" } }
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
"processors" : [
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id", "target_field": "id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.id", "target_field": "log.id.fuid", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "certificate.version", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.certificate.version", "target_field": "x509.certificate.version", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "certificate.serial", "path": "message2", "ignore_failure": true } },
|
||||
|
||||
@@ -75,10 +75,10 @@ filebeat.modules:
|
||||
filebeat.inputs:
|
||||
#------------------------------ Log prospector --------------------------------
|
||||
{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" or grains['role'] == "so-helix" or grains['role'] == "so-heavynode" or grains['role'] == "so-standalone" %}
|
||||
- type: syslog
|
||||
|
||||
- type: udp
|
||||
enabled: true
|
||||
protocol.udp:
|
||||
host: "0.0.0.0:514"
|
||||
host: "0.0.0.0:514"
|
||||
fields:
|
||||
module: syslog
|
||||
dataset: syslog
|
||||
@@ -87,7 +87,20 @@ filebeat.inputs:
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
|
||||
- type: tcp
|
||||
enabled: true
|
||||
host: "0.0.0.0:514"
|
||||
fields:
|
||||
module: syslog
|
||||
dataset: syslog
|
||||
pipeline: "syslog"
|
||||
index: "so-syslog-%{+yyyy.MM.dd}"
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
{%- if BROVER != 'SURICATA' %}
|
||||
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
|
||||
- type: log
|
||||
@@ -110,10 +123,10 @@ filebeat.inputs:
|
||||
|
||||
- type: log
|
||||
paths:
|
||||
- /suricata/eve.json
|
||||
- /suricata/eve*.json
|
||||
fields:
|
||||
module: suricata
|
||||
dataset: alert
|
||||
dataset: common
|
||||
category: network
|
||||
|
||||
processors:
|
||||
|
||||
469
salt/firewall/assigned_hostgroups.map.yaml
Normal file
469
salt/firewall/assigned_hostgroups.map.yaml
Normal file
@@ -0,0 +1,469 @@
|
||||
{% import_yaml 'firewall/portgroups.yaml' as portgroups %}
|
||||
{% set portgroups = portgroups.firewall.aliases.ports %}
|
||||
|
||||
role:
|
||||
eval:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
master:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.wazuh_authd }}
|
||||
- {{ portgroups.playbook }}
|
||||
- {{ portgroups.mysql }}
|
||||
- {{ portgroups.navigator }}
|
||||
- {{ portgroups.kibana }}
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.cortex }}
|
||||
- {{ portgroups.elasticsearch_rest }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
- {{ portgroups.cortex_es_rest }}
|
||||
- {{ portgroups.cortex_es_node }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.acng }}
|
||||
- {{ portgroups.docker_registry }}
|
||||
- {{ portgroups.osquery_8080 }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5044 }}
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5644 }}
|
||||
osquery_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.fleet_api }}
|
||||
syslog:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog }}
|
||||
wazuh_agent:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
wazuh_api:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_api }}
|
||||
wazuh_authd:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_authd }}
|
||||
analyst:
|
||||
portgroups:
|
||||
- {{ portgroups.nginx }}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.salt_master }}
|
||||
master:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
master:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.wazuh_authd }}
|
||||
- {{ portgroups.playbook }}
|
||||
- {{ portgroups.mysql }}
|
||||
- {{ portgroups.navigator }}
|
||||
- {{ portgroups.kibana }}
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.cortex }}
|
||||
- {{ portgroups.elasticsearch_rest }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
- {{ portgroups.cortex_es_rest }}
|
||||
- {{ portgroups.cortex_es_node }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.acng }}
|
||||
- {{ portgroups.docker_registry }}
|
||||
- {{ portgroups.osquery_8080 }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
syslog:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog }}
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5044 }}
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5644 }}
|
||||
osquery_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.fleet_api }}
|
||||
wazuh_agent:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
wazuh_api:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_api }}
|
||||
wazuh_authd:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_authd }}
|
||||
analyst:
|
||||
portgroups:
|
||||
- {{ portgroups.nginx }}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.salt_master }}
|
||||
mastersearch:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
master:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.wazuh_authd }}
|
||||
- {{ portgroups.playbook }}
|
||||
- {{ portgroups.mysql }}
|
||||
- {{ portgroups.navigator }}
|
||||
- {{ portgroups.kibana }}
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.cortex }}
|
||||
- {{ portgroups.elasticsearch_rest }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
- {{ portgroups.cortex_es_rest }}
|
||||
- {{ portgroups.cortex_es_node }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.acng }}
|
||||
- {{ portgroups.docker_registry }}
|
||||
- {{ portgroups.osquery_8080 }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5044 }}
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5644 }}
|
||||
osquery_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.fleet_api }}
|
||||
syslog:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog }}
|
||||
wazuh_agent:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
wazuh_api:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_api }}
|
||||
wazuh_authd:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_authd }}
|
||||
analyst:
|
||||
portgroups:
|
||||
- {{ portgroups.nginx }}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.salt_master }}
|
||||
standalone:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
master:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.wazuh_authd }}
|
||||
- {{ portgroups.playbook }}
|
||||
- {{ portgroups.mysql }}
|
||||
- {{ portgroups.navigator }}
|
||||
- {{ portgroups.kibana }}
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.cortex }}
|
||||
- {{ portgroups.elasticsearch_rest }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
- {{ portgroups.cortex_es_rest }}
|
||||
- {{ portgroups.cortex_es_node }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.acng }}
|
||||
- {{ portgroups.docker_registry }}
|
||||
- {{ portgroups.osquery_8080 }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5044 }}
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5644 }}
|
||||
osquery_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.fleet_api }}
|
||||
syslog:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog }}
|
||||
wazuh_agent:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
wazuh_api:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_api }}
|
||||
wazuh_authd:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_authd }}
|
||||
analyst:
|
||||
portgroups:
|
||||
- {{ portgroups.nginx }}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.salt_master }}
|
||||
helixsensor:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
master:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
- {{ portgroups.playbook }}
|
||||
- {{ portgroups.mysql }}
|
||||
- {{ portgroups.navigator }}
|
||||
- {{ portgroups.kibana }}
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.cortex }}
|
||||
- {{ portgroups.elasticsearch_rest }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
- {{ portgroups.cortex_es_rest }}
|
||||
- {{ portgroups.cortex_es_node }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.acng }}
|
||||
- {{ portgroups.docker_registry }}
|
||||
- {{ portgroups.osquery_8080 }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5044 }}
|
||||
osquery_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.fleet_api }}
|
||||
wazuh_agent:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
analyst:
|
||||
portgroups:
|
||||
- {{ portgroups.nginx }}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.salt_master }}
|
||||
searchnode:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
master:
|
||||
portgroups:
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
- {{ portgroups.elasticsearch_rest }}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
sensor:
|
||||
chain:
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
heavynode:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
fleet:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.mysql }}
|
||||
- {{ portgroups.osquery_8080 }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.mysql }}
|
||||
- {{ portgroups.osquery_8080 }}
|
||||
analyst:
|
||||
portgroups:
|
||||
- {{ portgroups.fleet_webui }}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.fleet_api }}
|
||||
osquery_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.fleet_api}}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
22
salt/firewall/hostgroups.yaml
Normal file
22
salt/firewall/hostgroups.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
firewall:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
- 0.0.0.0/0
|
||||
dockernet:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
- 172.17.0.0/24
|
||||
localhost:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
- 127.0.0.1
|
||||
self:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
- {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('node:mainint', salt['pillar.get']('host:mainint')))))[0] }}
|
||||
@@ -1,16 +1,12 @@
|
||||
# Firewall Magic for the grid
|
||||
{% if grains['role'] in ['so-eval','so-master','so-helix','so-mastersearch', 'so-standalone'] %}
|
||||
{% set ip = salt['pillar.get']('static:masterip', '') %}
|
||||
{% elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
|
||||
{% set ip = salt['pillar.get']('node:mainip', '') %}
|
||||
{% elif grains['role'] == 'so-sensor' %}
|
||||
{% set ip = salt['pillar.get']('sensor:mainip', '') %}
|
||||
{% elif grains['role'] == 'so-fleet' %}
|
||||
{% set ip = salt['pillar.get']('node:mainip', '') %}
|
||||
{% endif %}
|
||||
{% from 'firewall/map.jinja' import hostgroups with context %}
|
||||
{% from 'firewall/map.jinja' import assigned_hostgroups with context %}
|
||||
|
||||
{% set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
|
||||
{% set FLEET_NODE_IP = salt['pillar.get']('static:fleet_ip') %}
|
||||
create_sysconfig_iptables:
|
||||
file.touch:
|
||||
- name: /etc/sysconfig/iptables
|
||||
- makedirs: True
|
||||
- unless: 'ls /etc/sysconfig/iptables'
|
||||
|
||||
# Quick Fix for Docker being difficult
|
||||
iptables_fix_docker:
|
||||
@@ -27,15 +23,6 @@ iptables_fix_fwd:
|
||||
- position: 1
|
||||
- target: DOCKER-USER
|
||||
|
||||
# Keep localhost in the game
|
||||
iptables_allow_localhost:
|
||||
iptables.append:
|
||||
- table: filter
|
||||
- chain: INPUT
|
||||
- jump: ACCEPT
|
||||
- source: 127.0.0.1
|
||||
- save: True
|
||||
|
||||
# Allow related/established sessions
|
||||
iptables_allow_established:
|
||||
iptables.append:
|
||||
@@ -46,16 +33,6 @@ iptables_allow_established:
|
||||
- ctstate: 'RELATED,ESTABLISHED'
|
||||
- save: True
|
||||
|
||||
# Always allow SSH so we can like log in
|
||||
iptables_allow_ssh:
|
||||
iptables.append:
|
||||
- table: filter
|
||||
- chain: INPUT
|
||||
- jump: ACCEPT
|
||||
- dport: 22
|
||||
- proto: tcp
|
||||
- save: True
|
||||
|
||||
# I like pings
|
||||
iptables_allow_pings:
|
||||
iptables.append:
|
||||
@@ -113,604 +90,37 @@ enable_docker_user_established:
|
||||
- match: conntrack
|
||||
- ctstate: 'RELATED,ESTABLISHED'
|
||||
|
||||
# Add rule(s) for Wazuh manager
|
||||
enable_wazuh_manager_1514_tcp_{{ip}}:
|
||||
iptables.insert:
|
||||
{% for chain, hg in assigned_hostgroups.chain.items() %}
|
||||
{% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
|
||||
{% for action in ['insert', 'delete' ] %}
|
||||
{% if hostgroups[hostgroup].ips[action] %}
|
||||
{% for ip in hostgroups[hostgroup].ips[action] %}
|
||||
{% for portgroup in portgroups.portgroups %}
|
||||
{% for proto, ports in portgroup.items() %}
|
||||
{% for port in ports %}
|
||||
|
||||
{{action}}_{{chain}}_{{hostgroup}}_{{ip}}_{{port}}_{{proto}}:
|
||||
iptables.{{action}}:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- chain: {{ chain }}
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- proto: {{ proto }}
|
||||
- source: {{ ip }}
|
||||
- dport: 1514
|
||||
- dport: {{ port }}
|
||||
{% if action == 'insert' %}
|
||||
- position: 1
|
||||
{% endif %}
|
||||
- save: True
|
||||
|
||||
enable_wazuh_manager_1514_udp_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: udp
|
||||
- source: {{ ip }}
|
||||
- dport: 1514
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
# Allow syslog
|
||||
enable_syslog_514_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 514
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
# Rules if you are a Master
|
||||
{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
|
||||
#This should be more granular
|
||||
iptables_allow_master_docker:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: INPUT
|
||||
- jump: ACCEPT
|
||||
- source: 172.17.0.0/24
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% for ip in pillar.get('masterfw') %}
|
||||
# Allow Redis
|
||||
enable_maternode_redis_6379_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 6379
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_masternode_kibana_5601_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 5601
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_masternode_ES_9200_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9200
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_masternode_ES_9300_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9300
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_masternode_ES_9400_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9400
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_masternode_ES_9500_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9500
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_masternode_influxdb_8086_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8086
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_masternode_mysql_3306_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 3306
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_master_osquery_8090_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8090
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_master_playbook_3200_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 3200
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_master_navigator_4200_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 4200
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_master_cortex_9001_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9001
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_master_cyberchef_9080_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9080
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
# Make it so all the minions can talk to salt and update etc.
|
||||
{% for ip in pillar.get('minions') %}
|
||||
|
||||
enable_salt_minions_4505_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: INPUT
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 4505
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_salt_minions_4506_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: INPUT
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 4506
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_salt_minions_5000_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 5000
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_salt_minions_3142_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 3142
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_minions_influxdb_8086_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8086
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_minion_osquery_8080_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8080
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_minion_osquery_8090_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8090
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_minion_wazuh_55000_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 55000
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Allow Forward Nodes to send their beats traffic
|
||||
{% for ip in pillar.get('forward_nodes') %}
|
||||
|
||||
enable_forwardnode_beats_5044_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 5044
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_forwardnode_beats_5644_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 5644
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_forwardnode_sensoroni_443_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 443
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_forwardnode_sensoroni_9822_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9822
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Allow Fleet Node to send its beats traffic
|
||||
{% if FLEET_NODE %}
|
||||
|
||||
enable_fleetnode_beats_5644_{{FLEET_NODE_IP}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ FLEET_NODE_IP }}
|
||||
- dport: 5644
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% for ip in pillar.get('search_nodes') %}
|
||||
|
||||
enable_searchnode_redis_6379_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 6379
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_searchnode_ES_9300_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9300
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Allow Beats Endpoints to send their beats traffic
|
||||
{% for ip in pillar.get('beats_endpoint') %}
|
||||
|
||||
enable_standard_beats_5044_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 5044
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Allow OSQuery Endpoints to send their traffic
|
||||
{% for ip in pillar.get('osquery_endpoint') %}
|
||||
|
||||
enable_standard_osquery_8090_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8090
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Allow Wazuh Endpoints to send their traffic
|
||||
{% for ip in pillar.get('wazuh_endpoint') %}
|
||||
|
||||
enable_wazuh_endpoint_tcp_1514_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 1514
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_wazuh_endpoint_udp_1514_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: udp
|
||||
- source: {{ ip }}
|
||||
- dport: 1514
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Allow Analysts
|
||||
{% for ip in pillar.get('analyst') %}
|
||||
|
||||
enable_standard_analyst_80_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 80
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_standard_analyst_443_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 443
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
#enable_standard_analyst_3000_{{ip}}:
|
||||
# iptables.insert:
|
||||
# - table: filter
|
||||
# - chain: DOCKER-USER
|
||||
# - jump: ACCEPT
|
||||
# - proto: tcp
|
||||
# - source: {{ ip }}
|
||||
# - dport: 3000
|
||||
# - position: 1
|
||||
# - save: True
|
||||
|
||||
#enable_standard_analyst_7000_{{ip}}:
|
||||
# iptables.insert:
|
||||
# - table: filter
|
||||
# - chain: DOCKER-USER
|
||||
# - jump: ACCEPT
|
||||
# - proto: tcp
|
||||
# - source: {{ ip }}
|
||||
# - dport: 7000
|
||||
# - position: 1
|
||||
# - save: True
|
||||
|
||||
#enable_standard_analyst_9000_{{ip}}:
|
||||
# iptables.insert:
|
||||
# - table: filter
|
||||
# - chain: DOCKER-USER
|
||||
# - jump: ACCEPT
|
||||
# - proto: tcp
|
||||
# - source: {{ ip }}
|
||||
# - dport: 9000
|
||||
# - position: 1
|
||||
# - save: True
|
||||
|
||||
#enable_standard_analyst_9001_{{ip}}:
|
||||
# iptables.insert:
|
||||
# - table: filter
|
||||
# - chain: DOCKER-USER
|
||||
# - jump: ACCEPT
|
||||
# - proto: tcp
|
||||
# - source: {{ ip }}
|
||||
# - dport: 9001
|
||||
# - position: 1
|
||||
# - save: True
|
||||
|
||||
# This is temporary for sensoroni testing
|
||||
#enable_standard_analyst_9822_{{ip}}:
|
||||
# iptables.insert:
|
||||
# - table: filter
|
||||
# - chain: DOCKER-USER
|
||||
# - jump: ACCEPT
|
||||
# - proto: tcp
|
||||
# - source: {{ ip }}
|
||||
# - dport: 9822
|
||||
# - position: 1
|
||||
# - save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Rules for search nodes connecting to master
|
||||
|
||||
|
||||
{% endif %}
|
||||
|
||||
# Rules if you are a Node
|
||||
{% if 'node' in grains['role'] %}
|
||||
|
||||
#This should be more granular
|
||||
iptables_allow_docker:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: INPUT
|
||||
- jump: ACCEPT
|
||||
- source: 172.17.0.0/24
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_docker_ES_9200:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: 172.17.0.0/24
|
||||
- dport: 9200
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
|
||||
enable_docker_ES_9300:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: 172.17.0.0/24
|
||||
- dport: 9300
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
|
||||
{% for ip in pillar.get('masterfw') %}
|
||||
|
||||
enable_cluster_ES_9300_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 9300
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
# Rules if you are a Sensor
|
||||
{% if grains['role'] == 'so-sensor' %}
|
||||
iptables_allow_sensor_docker:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: INPUT
|
||||
- jump: ACCEPT
|
||||
- source: 172.17.0.0/24
|
||||
- position: 1
|
||||
- save: True
|
||||
{% endif %}
|
||||
|
||||
# Rules if you are a Hot Node
|
||||
|
||||
# Rules if you are a Warm Node
|
||||
|
||||
# Some Fixer upper type rules
|
||||
# Drop it like it's hot
|
||||
# Make the input policy send stuff that doesn't match to be logged and dropped
|
||||
iptables_drop_all_the_things:
|
||||
iptables.append:
|
||||
@@ -718,144 +128,3 @@ iptables_drop_all_the_things:
|
||||
- chain: LOGGING
|
||||
- jump: DROP
|
||||
- save: True
|
||||
|
||||
{% if grains['role'] == 'so-heavynode' %}
|
||||
# Allow Redis
|
||||
enable_heavynode_redis_6379_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 6379
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_forwardnode_beats_5044_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 5044
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_forwardnode_beats_5644_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 5644
|
||||
- position: 1
|
||||
- save: True
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Rules if you are a Standalone Fleet node
|
||||
{% if grains['role'] == 'so-fleet' %}
|
||||
#This should be more granular
|
||||
iptables_allow_fleetnode_docker:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: INPUT
|
||||
- jump: ACCEPT
|
||||
- source: 172.17.0.0/24
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
# Allow Redis
|
||||
enable_fleetnode_redis_6379_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 6379
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_fleetnode_mysql_3306_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 3306
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_fleet_osquery_8080_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8080
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
|
||||
enable_fleetnodetemp_mysql_3306_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: 127.0.0.1
|
||||
- dport: 3306
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_fleettemp_osquery_8080_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: 127.0.0.1
|
||||
- dport: 8080
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
|
||||
# Allow Analysts to access Fleet WebUI
|
||||
{% for ip in pillar.get('analyst') %}
|
||||
|
||||
enable_fleetnode_fleet_443_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 443
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Needed for osquery endpoints to checkin to Fleet API for mgt
|
||||
{% for ip in pillar.get('osquery_endpoint') %}
|
||||
|
||||
enable_fleetnode_8090_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8090
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
45
salt/firewall/map.jinja
Normal file
45
salt/firewall/map.jinja
Normal file
@@ -0,0 +1,45 @@
|
||||
{% set role = grains.id.split('_') | last %}
|
||||
{% set translated_pillar_assigned_hostgroups = {} %}
|
||||
|
||||
{% import_yaml 'firewall/portgroups.yaml' as default_portgroups %}
|
||||
{% set default_portgroups = default_portgroups.firewall.aliases.ports %}
|
||||
{% import_yaml 'firewall/portgroups.local.yaml' as local_portgroups %}
|
||||
{% if local_portgroups.firewall.aliases.ports %}
|
||||
{% set local_portgroups = local_portgroups.firewall.aliases.ports %}
|
||||
{% else %}
|
||||
{% set local_portgroups = {} %}
|
||||
{% endif %}
|
||||
{% set portgroups = salt['defaults.merge'](default_portgroups, local_portgroups, in_place=False) %}
|
||||
{% set defined_portgroups = portgroups %}
|
||||
|
||||
{% import_yaml 'firewall/hostgroups.yaml' as default_hostgroups %}
|
||||
{% import_yaml 'firewall/hostgroups.local.yaml' as local_hostgroups %}
|
||||
{% set hostgroups = salt['defaults.merge'](default_hostgroups.firewall.hostgroups, local_hostgroups.firewall.hostgroups, in_place=False) %}
|
||||
|
||||
{# This block translate the portgroups defined in the pillar to what is defined my portgroups.yaml and portgroups.local.yaml #}
|
||||
{% if salt['pillar.get']('firewall:assigned_hostgroups:chain') %}
|
||||
|
||||
{% for chain, hg in salt['pillar.get']('firewall:assigned_hostgroups:chain').items() %}
|
||||
{% for pillar_hostgroup, pillar_portgroups in salt['pillar.get']('firewall:assigned_hostgroups:chain')[chain].hostgroups.items() %}
|
||||
{% do translated_pillar_assigned_hostgroups.update({"chain": {chain: {"hostgroups": {pillar_hostgroup: {"portgroups": []}}}}}) %}
|
||||
{% for pillar_portgroup in pillar_portgroups.portgroups %}
|
||||
{% set pillar_portgroup = pillar_portgroup.split('.') | last %}
|
||||
{% do translated_pillar_assigned_hostgroups.chain[chain].hostgroups[pillar_hostgroup].portgroups.append(defined_portgroups[pillar_portgroup]) %}
|
||||
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% import_yaml 'firewall/assigned_hostgroups.map.yaml' as default_assigned_hostgroups %}
|
||||
{% import_yaml 'firewall/assigned_hostgroups.local.map.yaml' as local_assigned_hostgroups %}
|
||||
{% if local_assigned_hostgroups.role[role] %}
|
||||
{% set assigned_hostgroups = salt['defaults.merge'](local_assigned_hostgroups.role[role], default_assigned_hostgroups.role[role], merge_lists=False, in_place=False) %}
|
||||
{% else %}
|
||||
{% set assigned_hostgroups = default_assigned_hostgroups.role[role] %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% if translated_pillar_assigned_hostgroups %}
|
||||
{% do salt['defaults.merge'](assigned_hostgroups, translated_pillar_assigned_hostgroups, merge_lists=True, in_place=True) %}
|
||||
{% endif %}
|
||||
92
salt/firewall/portgroups.yaml
Normal file
92
salt/firewall/portgroups.yaml
Normal file
@@ -0,0 +1,92 @@
|
||||
firewall:
|
||||
aliases:
|
||||
ports:
|
||||
all:
|
||||
tcp:
|
||||
- '0:65535'
|
||||
udp:
|
||||
- '0:65535'
|
||||
acng:
|
||||
tcp:
|
||||
- 3142
|
||||
beats_5044:
|
||||
tcp:
|
||||
- 5044
|
||||
beats_5644:
|
||||
tcp:
|
||||
- 5644
|
||||
cortex:
|
||||
tcp:
|
||||
- 9001
|
||||
cortex_es_node:
|
||||
tcp:
|
||||
- 9500
|
||||
cortex_es_rest:
|
||||
tcp:
|
||||
- 9400
|
||||
docker_registry:
|
||||
tcp:
|
||||
- 5000
|
||||
elasticsearch_node:
|
||||
tcp:
|
||||
- 9300
|
||||
elasticsearch_rest:
|
||||
tcp:
|
||||
- 9200
|
||||
fleet_api:
|
||||
tcp:
|
||||
- 8090
|
||||
fleet_webui:
|
||||
tcp:
|
||||
- 443
|
||||
influxdb:
|
||||
tcp:
|
||||
- 8086
|
||||
kibana:
|
||||
tcp:
|
||||
- 5601
|
||||
mysql:
|
||||
tcp:
|
||||
- 3306
|
||||
navigator:
|
||||
tcp:
|
||||
- 4200
|
||||
nginx:
|
||||
tcp:
|
||||
- 80
|
||||
- 443
|
||||
osquery_8080:
|
||||
tcp:
|
||||
- 8080
|
||||
playbook:
|
||||
tcp:
|
||||
- 3200
|
||||
redis:
|
||||
tcp:
|
||||
- 6379
|
||||
salt_master:
|
||||
tcp:
|
||||
- 4505
|
||||
- 4506
|
||||
sensoroni:
|
||||
tcp:
|
||||
- 443
|
||||
ssh:
|
||||
tcp:
|
||||
- 22
|
||||
syslog:
|
||||
tcp:
|
||||
- 514
|
||||
udp:
|
||||
- 514
|
||||
wazuh_agent:
|
||||
tcp:
|
||||
- 1514
|
||||
udp:
|
||||
- 1514
|
||||
wazuh_api:
|
||||
tcp:
|
||||
- 55000
|
||||
wazuh_authd:
|
||||
tcp:
|
||||
- 1515
|
||||
@@ -1,5 +1,6 @@
|
||||
{% set ENROLLSECRET = salt['cmd.run']('docker exec so-fleet fleetctl get enroll-secret') %}
|
||||
{%- set MAINIP = salt['pillar.get']('node:mainip') -%}
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') %}
|
||||
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
|
||||
|
||||
so/fleet:
|
||||
event.send:
|
||||
|
||||
@@ -1,15 +1,24 @@
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
{% set ENROLLSECRET = salt['pillar.get']('secrets:fleet_enroll-secret') %}
|
||||
{% set CURRENTPACKAGEVERSION = salt['pillar.get']('static:fleet_packages-version') %}
|
||||
{% set VERSION = salt['pillar.get']('static:soversion') %}
|
||||
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
|
||||
|
||||
{% if CUSTOM_FLEET_HOSTNAME != None and CUSTOM_FLEET_HOSTNAME != '' %}
|
||||
{% set HOSTNAME = CUSTOM_FLEET_HOSTNAME %}
|
||||
{% else %}
|
||||
{% set HOSTNAME = grains.host %}
|
||||
{% endif %}
|
||||
|
||||
so/fleet:
|
||||
event.send:
|
||||
- data:
|
||||
action: 'genpackages'
|
||||
hostname: {{ grains.host }}
|
||||
package-hostname: {{ HOSTNAME }}
|
||||
role: {{ grains.role }}
|
||||
mainip: {{ grains.host }}
|
||||
enroll-secret: {{ ENROLLSECRET }}
|
||||
current-package-version: {{ CURRENTPACKAGEVERSION }}
|
||||
master: {{ MASTER }}
|
||||
version: {{ VERSION }}
|
||||
|
||||
9
salt/fleet/event_update-custom-hostname.sls
Normal file
9
salt/fleet/event_update-custom-hostname.sls
Normal file
@@ -0,0 +1,9 @@
|
||||
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
|
||||
|
||||
so/fleet:
|
||||
event.send:
|
||||
- data:
|
||||
action: 'update_custom_hostname'
|
||||
custom_hostname: {{ CUSTOM_FLEET_HOSTNAME }}
|
||||
role: {{ grains.role }}
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
{%- set PACKAGESTS = salt['pillar.get']('static:fleet_packages-timestamp:', 'N/A') -%}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Security Onion - Hybrid Hunter</title>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link rel="icon" type="image/png" href="favicon-32x32.png" sizes="32x32" />
|
||||
<link rel="icon" type="image/png" href="favicon-16x16.png" sizes="16x16" />
|
||||
<style>
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
padding-left: 30px;
|
||||
padding-right: 30px;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
background-color: #2a2a2a;
|
||||
|
||||
}
|
||||
a {
|
||||
color: #f2f2f2;
|
||||
text-align: left;
|
||||
padding: 0px;
|
||||
}
|
||||
|
||||
.center-content {
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
/* Style the top navigation bar */
|
||||
.topnav {
|
||||
overflow: hidden;
|
||||
background-color: #333;
|
||||
width: 1080px;
|
||||
display: flex;
|
||||
align-content: center;
|
||||
}
|
||||
|
||||
/* Style the topnav links */
|
||||
.topnav a {
|
||||
margin: auto;
|
||||
color: #f2f2f2;
|
||||
text-align: center;
|
||||
padding: 14px 16px;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
/* Change color on hover */
|
||||
.topnav a:hover {
|
||||
background-color: #ddd;
|
||||
color: black;
|
||||
}
|
||||
|
||||
/* Style the content */
|
||||
.content {
|
||||
background-color: #2a2a2a;
|
||||
padding: 10px;
|
||||
padding-top: 20px;
|
||||
padding-left: 60px;
|
||||
color: #E3DBCC;
|
||||
width: 1080px;
|
||||
}
|
||||
|
||||
/* Style the footer */
|
||||
.footer {
|
||||
background-color: #2a2a2a;
|
||||
padding: 60px;
|
||||
color: #E3DBCC;
|
||||
width: 1080px;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="center-content">
|
||||
<div class="topnav center-content">
|
||||
<a href="/fleet/" target="_blank">Fleet</a>
|
||||
<a href="https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/Configuring-Osquery-with-Security-Onion" target="_blank">Osquery/Fleet Docs</a>
|
||||
<a href="https://www.securityonionsolutions.com" target="_blank">Security Onion Solutions</a>
|
||||
</div>
|
||||
|
||||
<div class="content center-content">
|
||||
<p>
|
||||
<div style="text-align: center;">
|
||||
<h1>Security Onion - Dedicated Fleet Node</h1>
|
||||
</div>
|
||||
<br/>
|
||||
<br/>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/bin/sh
|
||||
echo "Applying Post Configuration for Osquery"
|
||||
#fleetctl apply -f /packs/hh/osquery.conf
|
||||
fleetctl apply -f /packs/palantir/Fleet/Endpoints/options.yaml
|
||||
fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
|
||||
fleetctl apply -f /packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
|
||||
fleetctl apply -f /packs/hh/hhdefault.yml
|
||||
|
||||
for pack in /packs/palantir/Fleet/Endpoints/packs/*.yaml;
|
||||
do fleetctl apply -f "$pack"
|
||||
done
|
||||
echo ""
|
||||
echo "You can now exit the container by typing exit"
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
{% set MAIN_HOSTNAME = salt['grains.get']('host') %}
|
||||
{% set MAIN_IP = salt['pillar.get']('node:mainip') %}
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
#so-fleet-packages $FleetHostname/IP
|
||||
|
||||
#if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
|
||||
# echo "so-fleet container not running... Exiting..."
|
||||
# exit 1
|
||||
#fi
|
||||
|
||||
#docker exec so-fleet /bin/ash -c "echo {{ MAIN_IP }} {{ MAIN_HOSTNAME }} >> /etc/hosts"
|
||||
#esecret=$(docker exec so-fleet fleetctl get enroll-secret)
|
||||
|
||||
#Concat fleet.crt & ca.crt - this is required for launcher connectivity
|
||||
#cat /etc/pki/fleet.crt /etc/pki/ca.crt > /etc/pki/launcher.crt
|
||||
#Actually only need to use /etc/ssl/certs/intca.crt
|
||||
|
||||
#Create the output directory
|
||||
#mkdir /opt/so/conf/fleet/packages
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--mount type=bind,source=/opt/so/conf/fleet/packages,target=/output \
|
||||
--mount type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt \
|
||||
docker.io/soshybridhunter/so-fleet-launcher:HH1.1.0 "$esecret" "$1":8090
|
||||
|
||||
cp /opt/so/conf/fleet/packages/launcher.* $local_salt_dir/salt/launcher/packages/
|
||||
|
||||
#Update timestamp on packages webpage
|
||||
sed -i "s@.*Generated.*@Generated: $(date '+%m%d%Y')@g" /opt/so/conf/fleet/packages/index.html
|
||||
sed -i "s@.*Generated.*@Generated: $(date '+%m%d%Y')@g" $local_salt_dir/salt/fleet/files/dedicated-index.html
|
||||
@@ -1,48 +0,0 @@
|
||||
#!/bin/bash
|
||||
{% set MAIN_HOSTNAME = salt['grains.get']('host') %}
|
||||
{% set MAIN_IP = salt['pillar.get']('node:mainip') %}
|
||||
|
||||
#so-fleet-setup.sh $FleetEmail
|
||||
|
||||
# Enable Fleet
|
||||
echo "Starting Docker Containers..."
|
||||
salt-call state.apply mysql queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply fleet queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply redis queue=True >> /root/fleet-setup.log
|
||||
|
||||
if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
|
||||
echo "so-fleet container not running... Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
initpw=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
|
||||
|
||||
docker exec so-fleet /bin/ash -c "echo {{ MAIN_IP }} {{ MAIN_HOSTNAME }} >> /etc/hosts"
|
||||
docker exec so-fleet fleetctl config set --address https://{{ MAIN_HOSTNAME }}:443 --tls-skip-verify --url-prefix /fleet
|
||||
docker exec so-fleet fleetctl setup --email $1 --password $initpw
|
||||
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/hh/hhdefault.yml
|
||||
docker exec so-fleet /bin/sh -c 'for pack in /packs/palantir/Fleet/Endpoints/packs/*.yaml; do fleetctl apply -f "$pack"; done'
|
||||
docker exec so-fleet fleetctl apply -f /packs/hh/osquery.conf
|
||||
|
||||
|
||||
# Enable Fleet
|
||||
echo "Enabling Fleet..."
|
||||
salt-call state.apply fleet.event_enable-fleet queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply nginx queue=True >> /root/fleet-setup.log
|
||||
|
||||
# Generate osquery install packages
|
||||
echo "Generating osquery install packages - this will take some time..."
|
||||
salt-call state.apply fleet.event_gen-packages queue=True >> /root/fleet-setup.log
|
||||
sleep 120
|
||||
|
||||
echo "Installing launcher via salt..."
|
||||
salt-call state.apply fleet.install_package queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply filebeat queue=True >> /root/fleet-setup.log
|
||||
docker stop so-nginx
|
||||
salt-call state.apply nginx queue=True >> /root/fleet-setup.log
|
||||
|
||||
echo "Fleet Setup Complete - Login here: https://{{ MAIN_HOSTNAME }}"
|
||||
echo "Your username is $2 and your password is $initpw"
|
||||
@@ -3,12 +3,11 @@
|
||||
{%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%}
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
{% set MAINIP = salt['pillar.get']('node:mainip') %}
|
||||
{% set FLEETARCH = salt['grains.get']('role') %}
|
||||
|
||||
|
||||
{% if FLEETARCH == "so-fleet" %}
|
||||
{% set MAINIP = salt['pillar.get']('node:mainip') %}
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') %}
|
||||
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
|
||||
{% else %}
|
||||
{% set MAINIP = salt['pillar.get']('static:masterip') %}
|
||||
{% endif %}
|
||||
@@ -16,14 +15,6 @@
|
||||
include:
|
||||
- mysql
|
||||
|
||||
#{% if grains.id.split('_')|last in ['master', 'eval', 'fleet'] %}
|
||||
#so/fleet:
|
||||
# event.send:
|
||||
# - data:
|
||||
# action: 'enablefleet'
|
||||
# hostname: {{ grains.host }}
|
||||
#{% endif %}
|
||||
|
||||
# Fleet Setup
|
||||
fleetcdir:
|
||||
file.directory:
|
||||
@@ -67,21 +58,6 @@ fleetlogdir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
fleetsetupscripts:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- user: 0
|
||||
- group: 0
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- source: salt://fleet/files/scripts
|
||||
|
||||
osquerypackageswebpage:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/fleet/packages/index.html
|
||||
- source: salt://fleet/files/dedicated-index.html
|
||||
- template: jinja
|
||||
|
||||
fleetdb:
|
||||
mysql_database.present:
|
||||
- name: fleet
|
||||
|
||||
@@ -2,14 +2,24 @@
|
||||
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
|
||||
{%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%}
|
||||
{%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%}
|
||||
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
|
||||
|
||||
{%- if FLEETMASTER or FLEETNODE %}
|
||||
{% if CUSTOM_FLEET_HOSTNAME != (None and '') %}
|
||||
|
||||
{{ CUSTOM_FLEET_HOSTNAME }}:
|
||||
host.present:
|
||||
- ip: {{ FLEETIP }}
|
||||
- clean: True
|
||||
|
||||
{% elif FLEETNODE and grains['role'] != 'so-fleet' %}
|
||||
|
||||
{{ FLEETHOSTNAME }}:
|
||||
host.present:
|
||||
- ip: {{ FLEETIP }}
|
||||
- clean: True
|
||||
|
||||
{% endif %}
|
||||
|
||||
launcherpkg:
|
||||
pkg.installed:
|
||||
- sources:
|
||||
@@ -18,4 +28,3 @@ launcherpkg:
|
||||
{% elif grains['os'] == 'Ubuntu' %}
|
||||
- launcher-final: salt://fleet/packages/launcher.deb
|
||||
{% endif %}
|
||||
{%- endif %}
|
||||
|
||||
@@ -3226,7 +3226,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3290,7 +3290,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3798,7 +3798,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3862,7 +3862,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
|
||||
@@ -1969,7 +1969,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -2033,7 +2033,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -2633,7 +2633,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -2697,7 +2697,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3342,7 +3342,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3406,7 +3406,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3874,7 +3874,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3938,7 +3938,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
|
||||
@@ -1971,7 +1971,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -2035,7 +2035,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -2718,7 +2718,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -2782,7 +2782,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3470,7 +3470,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
@@ -3875,7 +3875,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
@@ -3936,7 +3936,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
|
||||
@@ -2827,7 +2827,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -2891,7 +2891,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3399,7 +3399,7 @@
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"alias": "InBound",
|
||||
"alias": "Inbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
@@ -3463,7 +3463,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
|
||||
6392
salt/grafana/dashboards/standalone/standalone.json
Normal file
6392
salt/grafana/dashboards/standalone/standalone.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -31,6 +31,13 @@ providers:
|
||||
editable: true
|
||||
options:
|
||||
path: /etc/grafana/grafana_dashboards/search_nodes
|
||||
- name: 'Standalone'
|
||||
folder: 'Standalone'
|
||||
type: file
|
||||
disableDeletion: false
|
||||
editable: true
|
||||
options:
|
||||
path: /etc/grafana/grafana_dashboards/standalone
|
||||
{%- else %}
|
||||
- name: 'Security Onion'
|
||||
folder: 'Eval Mode'
|
||||
|
||||
@@ -40,6 +40,13 @@ grafanadashmsdir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
grafanadashsadir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/standalone
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
grafanadashevaldir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/eval
|
||||
@@ -96,7 +103,7 @@ dashboard-master:
|
||||
{% for SN, SNDATA in salt['pillar.get']('mastersearchtab', {}).items() %}
|
||||
{% set NODETYPE = SN.split('_')|last %}
|
||||
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
|
||||
dashboard-master:
|
||||
dashboard-mastersearch:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/mastersearch/{{ SN }}-MasterSearch.json
|
||||
- user: 939
|
||||
@@ -115,6 +122,29 @@ dashboard-master:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if salt['pillar.get']('standalonetab', False) %}
|
||||
{% for SN, SNDATA in salt['pillar.get']('standalonetab', {}).items() %}
|
||||
{% set NODETYPE = SN.split('_')|last %}
|
||||
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
|
||||
dashboard-standalone:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/standalone/{{ SN }}-Standalone.json
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
- source: salt://grafana/dashboards/standalone/standalone.json
|
||||
- defaults:
|
||||
SERVERNAME: {{ SN }}
|
||||
MANINT: {{ SNDATA.manint }}
|
||||
MONINT: {{ SNDATA.manint }}
|
||||
CPUS: {{ SNDATA.totalcpus }}
|
||||
UID: {{ SNDATA.guid }}
|
||||
ROOTFS: {{ SNDATA.rootfs }}
|
||||
NSMFS: {{ SNDATA.nsmfs }}
|
||||
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if salt['pillar.get']('sensorstab', False) %}
|
||||
{% for SN, SNDATA in salt['pillar.get']('sensorstab', {}).items() %}
|
||||
{% set NODETYPE = SN.split('_')|last %}
|
||||
|
||||
@@ -39,7 +39,7 @@ idstoolsetcsync:
|
||||
|
||||
so-ruleupdatecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-rule-update.sh > /opt/so/log/idstools/download.log
|
||||
- name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download.log 2>&1
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '7'
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#!/bin/bash
|
||||
# {%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master', False) -%}
|
||||
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
|
||||
# {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %}
|
||||
# {%- set MASTER = salt['pillar.get']('master:url_base', '') %}
|
||||
|
||||
KIBANA_VERSION="7.6.1"
|
||||
@@ -11,7 +10,7 @@ cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_o
|
||||
|
||||
# {% if FLEET_NODE or FLEET_MASTER %}
|
||||
# Fleet IP
|
||||
sed -i "s/FLEETPLACEHOLDER/{{ FLEET_IP }}/g" /opt/so/conf/kibana/saved_objects.ndjson
|
||||
sed -i "s/FLEETPLACEHOLDER/{{ MASTER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
|
||||
# {% endif %}
|
||||
|
||||
# SOCtopus and Master
|
||||
|
||||
File diff suppressed because one or more lines are too long
6
salt/logstash/pipelines/config/so/0009_input_beats.conf
Normal file
6
salt/logstash/pipelines/config/so/0009_input_beats.conf
Normal file
@@ -0,0 +1,6 @@
|
||||
input {
|
||||
beats {
|
||||
port => "5044"
|
||||
tags => [ "beat-ext" ]
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,7 @@ output {
|
||||
elasticsearch {
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-import-%{+YYYY.MM.dd}"
|
||||
template_name => "logstash"
|
||||
template_name => "so-common"
|
||||
template => "/so-common-template.json"
|
||||
template_overwrite => true
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ output {
|
||||
elasticsearch {
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-ids-%{+YYYY.MM.dd}"
|
||||
template_name => "logstash"
|
||||
template_name => "so-common"
|
||||
template => "/so-common-template.json"
|
||||
template_overwrite => true
|
||||
}
|
||||
|
||||
@@ -3,24 +3,21 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
|
||||
{%- endif %}
|
||||
# Author: Justin Henderson
|
||||
# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
|
||||
# Updated by: Doug Burks
|
||||
# Last Update: 5/15/2017
|
||||
|
||||
filter {
|
||||
if "syslog" in [tags] and "test_data" not in [tags] {
|
||||
if [module] =~ "syslog" {
|
||||
mutate {
|
||||
##add_tag => [ "conf_file_9034"]
|
||||
}
|
||||
##add_tag => [ "conf_file_9000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
output {
|
||||
if "syslog" in [tags] and "test_data" not in [tags] {
|
||||
if [module] =~ "syslog" {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-syslog-%{+YYYY.MM.dd}"
|
||||
template_name => "logstash"
|
||||
template_name => "so-common"
|
||||
template => "/so-common-template.json"
|
||||
template_overwrite => true
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ output {
|
||||
elasticsearch {
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-firewall-%{+YYYY.MM.dd}"
|
||||
template_name => "logstash"
|
||||
template_name => "so-common"
|
||||
template => "/so-common-template.json"
|
||||
template_overwrite => true
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ output {
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-ids-%{+YYYY.MM.dd}"
|
||||
template_name => "so-common"
|
||||
template => "/so-common-template.json"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,22 +3,15 @@
|
||||
{%- else %}
|
||||
{%- set ES = salt['pillar.get']('node:mainip', '') -%}
|
||||
{%- endif %}
|
||||
# Author: Wes Lambert
|
||||
# Last Update: 09/14/2018
|
||||
filter {
|
||||
if "beat" in [tags] {
|
||||
mutate {
|
||||
##add_tag => [ "conf_file_9500"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output {
|
||||
if "beat" in [tags] {
|
||||
if "beat-ext" in [tags] {
|
||||
elasticsearch {
|
||||
pipeline => "beats.common"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-beats-%{+YYYY.MM.dd}"
|
||||
template_name => "so-beats"
|
||||
template => "/so-beats-template.json"
|
||||
template_name => "so-common"
|
||||
template => "/so-common-template.json"
|
||||
template_overwrite => true
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -61,6 +61,7 @@ so-aptcacherng:
|
||||
docker_container.running:
|
||||
- image: {{ MASTER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
|
||||
- hostname: so-acng
|
||||
- restart_policy: always
|
||||
- port_bindings:
|
||||
- 0.0.0.0:3142:3142
|
||||
- binds:
|
||||
|
||||
@@ -6,7 +6,8 @@
|
||||
{% set FLEETARCH = salt['grains.get']('role') %}
|
||||
|
||||
{% if FLEETARCH == "so-fleet" %}
|
||||
{% set MAINIP = salt['pillar.get']('node:mainip') %}
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') %}
|
||||
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
|
||||
{% else %}
|
||||
{% set MAINIP = salt['pillar.get']('static:masterip') %}
|
||||
{% endif %}
|
||||
|
||||
@@ -119,6 +119,7 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location / {
|
||||
@@ -132,9 +133,10 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
||||
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433;
|
||||
proxy_read_timeout 90;
|
||||
@@ -143,7 +145,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cyberchef/ {
|
||||
@@ -154,6 +156,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cyberchef {
|
||||
@@ -169,6 +172,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /grafana/ {
|
||||
@@ -180,7 +184,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/ {
|
||||
@@ -193,7 +197,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /nodered/ {
|
||||
@@ -206,7 +210,7 @@ http {
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /playbook/ {
|
||||
@@ -217,7 +221,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
|
||||
@@ -230,7 +234,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if FLEET_NODE %}
|
||||
@@ -246,6 +250,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
{%- endif %}
|
||||
|
||||
@@ -258,7 +263,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cortex/ {
|
||||
@@ -270,7 +275,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /soctopus/ {
|
||||
@@ -281,7 +286,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/app/soc/ {
|
||||
@@ -304,6 +309,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
error_page 401 = @error401;
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
{%- set MAINIP = salt['pillar.get']('node:mainip', '') %}
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') %}
|
||||
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
|
||||
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
@@ -83,7 +85,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
#error_page 404 /404.html;
|
||||
|
||||
@@ -119,6 +119,7 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location / {
|
||||
@@ -132,9 +133,10 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
||||
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433;
|
||||
proxy_read_timeout 90;
|
||||
@@ -143,7 +145,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cyberchef/ {
|
||||
@@ -154,6 +156,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cyberchef {
|
||||
@@ -169,6 +172,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /grafana/ {
|
||||
@@ -180,7 +184,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/ {
|
||||
@@ -193,7 +197,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /nodered/ {
|
||||
@@ -206,7 +210,7 @@ http {
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /playbook/ {
|
||||
@@ -217,7 +221,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
|
||||
@@ -230,7 +234,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if FLEET_NODE %}
|
||||
@@ -246,6 +250,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
{%- endif %}
|
||||
|
||||
@@ -258,7 +263,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cortex/ {
|
||||
@@ -270,7 +275,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /soctopus/ {
|
||||
@@ -281,7 +286,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/app/soc/ {
|
||||
@@ -304,6 +309,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
error_page 401 = @error401;
|
||||
|
||||
@@ -119,6 +119,7 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location / {
|
||||
@@ -132,9 +133,10 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
||||
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433;
|
||||
proxy_read_timeout 90;
|
||||
@@ -143,7 +145,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cyberchef/ {
|
||||
@@ -154,6 +156,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cyberchef {
|
||||
@@ -169,6 +172,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /grafana/ {
|
||||
@@ -180,7 +184,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/ {
|
||||
@@ -193,7 +197,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /nodered/ {
|
||||
@@ -206,7 +210,7 @@ http {
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /playbook/ {
|
||||
@@ -217,7 +221,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
|
||||
@@ -230,7 +234,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if FLEET_NODE %}
|
||||
@@ -246,6 +250,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
{%- endif %}
|
||||
|
||||
@@ -258,7 +263,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cortex/ {
|
||||
@@ -270,7 +275,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /soctopus/ {
|
||||
@@ -281,7 +286,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/app/soc/ {
|
||||
@@ -304,6 +309,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
error_page 401 = @error401;
|
||||
|
||||
@@ -119,6 +119,7 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location / {
|
||||
@@ -132,9 +133,10 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location ~ ^/auth/.*?(whoami|login|logout) {
|
||||
location ~ ^/auth/.*?(whoami|login|logout|settings) {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433;
|
||||
proxy_read_timeout 90;
|
||||
@@ -143,7 +145,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cyberchef/ {
|
||||
@@ -154,6 +156,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cyberchef {
|
||||
@@ -169,6 +172,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /grafana/ {
|
||||
@@ -180,7 +184,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/ {
|
||||
@@ -193,7 +197,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /nodered/ {
|
||||
@@ -206,7 +210,7 @@ http {
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /playbook/ {
|
||||
@@ -217,7 +221,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
|
||||
@@ -230,7 +234,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if FLEET_NODE %}
|
||||
@@ -246,6 +250,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
{%- endif %}
|
||||
|
||||
@@ -258,7 +263,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /cortex/ {
|
||||
@@ -270,7 +275,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /soctopus/ {
|
||||
@@ -281,7 +286,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/app/soc/ {
|
||||
@@ -304,6 +309,7 @@ http {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
error_page 401 = @error401;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
{% set MAINIP = salt['pillar.get']('node:mainip') %}
|
||||
{% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('node:mainint', salt['pillar.get']('host:mainint')))))[0] %}
|
||||
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
|
||||
{%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook', None) -%}
|
||||
|
||||
@@ -86,15 +86,22 @@ so-playbook:
|
||||
|
||||
{% endif %}
|
||||
|
||||
playbooklogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/playbook
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
so-playbooksynccron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-playbook-sync
|
||||
- name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
|
||||
so-playbookruleupdatecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-playbook-ruleupdate
|
||||
- name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '6'
|
||||
@@ -9,19 +9,19 @@ import subprocess
|
||||
def run():
|
||||
MINIONID = data['id']
|
||||
ACTION = data['data']['action']
|
||||
HOSTNAME = data['data']['hostname']
|
||||
ROLE = data['data']['role']
|
||||
ESECRET = data['data']['enroll-secret']
|
||||
MAINIP = data['data']['mainip']
|
||||
local_salt_dir = /opt/so/saltstack/local
|
||||
STATICFILE = local_salt_dir + '/pillar/static.sls'
|
||||
SECRETSFILE = local_salt_dir + '/pillar/secrets.sls'
|
||||
LOCAL_SALT_DIR = "/opt/so/saltstack/local"
|
||||
STATICFILE = f"{LOCAL_SALT_DIR}/pillar/static.sls"
|
||||
SECRETSFILE = f"{LOCAL_SALT_DIR}/pillar/secrets.sls"
|
||||
|
||||
if MINIONID.split('_')[-1] in ['master','eval','fleet','mastersearch']:
|
||||
|
||||
if MINIONID.split('_')[-1] in ['master','eval','fleet','mastersearch','standalone']:
|
||||
if ACTION == 'enablefleet':
|
||||
logging.info('so/fleet enablefleet reactor')
|
||||
|
||||
ESECRET = data['data']['enroll-secret']
|
||||
MAINIP = data['data']['mainip']
|
||||
ROLE = data['data']['role']
|
||||
HOSTNAME = data['data']['hostname']
|
||||
|
||||
# Enable Fleet
|
||||
for line in fileinput.input(STATICFILE, inplace=True):
|
||||
if ROLE == 'so-fleet':
|
||||
@@ -49,15 +49,18 @@ def run():
|
||||
logging.info('so/fleet genpackages reactor')
|
||||
|
||||
PACKAGEVERSION = data['data']['current-package-version']
|
||||
PACKAGEHOSTNAME = data['data']['package-hostname']
|
||||
MASTER = data['data']['master']
|
||||
VERSION = data['data']['version']
|
||||
ESECRET = data['data']['enroll-secret']
|
||||
|
||||
# Increment the package version by 1
|
||||
PACKAGEVERSION += 1
|
||||
|
||||
# Run Docker container that will build the packages
|
||||
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", "type=bind,source=" + local_salt_dir + "/salt/fleet/packages,target=/output", \
|
||||
"--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MASTER }:5000/soshybridhunter/so-fleet-launcher:HH1.3.0", \
|
||||
f"{ESECRET}", f"{HOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii')
|
||||
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", f"type=bind,source={LOCAL_SALT_DIR}/salt/fleet/packages,target=/output", \
|
||||
"--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MASTER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
|
||||
f"{ESECRET}", f"{PACKAGEHOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii')
|
||||
|
||||
# Update the 'packages-built' timestamp on the webpage (stored in the static pillar)
|
||||
for line in fileinput.input(STATICFILE, inplace=True):
|
||||
@@ -70,6 +73,16 @@ def run():
|
||||
print(line)
|
||||
|
||||
# Copy over newly-built packages
|
||||
copy_packages = subprocess.run(["salt-call", "state.apply","fleet"], stdout=subprocess.PIPE, encoding='ascii')
|
||||
copy_packages = subprocess.run(["salt-call", "state.apply","fleet"], stdout=subprocess.PIPE, encoding='ascii')
|
||||
|
||||
if ACTION == 'update_custom_hostname':
|
||||
logging.info('so/fleet update_custom_hostname reactor')
|
||||
|
||||
CUSTOMHOSTNAME = data['data']['custom_hostname']
|
||||
|
||||
# Update the Fleet host in the static pillar
|
||||
for line in fileinput.input(STATICFILE, inplace=True):
|
||||
line = re.sub(r'fleet_custom_hostname:.*\n', f"fleet_custom_hostname: {CUSTOMHOSTNAME}", line.rstrip())
|
||||
print(line)
|
||||
|
||||
return {}
|
||||
|
||||
@@ -42,6 +42,7 @@ so-dockerregistry:
|
||||
docker_container.running:
|
||||
- image: registry:2
|
||||
- hostname: so-registry
|
||||
- restart_policy: always
|
||||
- port_bindings:
|
||||
- 0.0.0.0:5000:5000
|
||||
- binds:
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
|
||||
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
|
||||
{
|
||||
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
|
||||
"server": {
|
||||
"bindAddress": "0.0.0.0:9822",
|
||||
"maxPacketCount": 5000,
|
||||
"htmlDir": "html",
|
||||
"modules": {
|
||||
"filedatastore": {
|
||||
"jobDir": "jobs"
|
||||
},
|
||||
"securityonion": {
|
||||
"elasticsearchHost": "http://{{ MASTERIP }}:9200",
|
||||
"elasticsearchUsername": "",
|
||||
"elasticsearchPassword": "",
|
||||
"elasticsearchVerifyCert": false
|
||||
},
|
||||
"statickeyauth": {
|
||||
"anonymousCidr": "172.17.0.0/24",
|
||||
"apiKey": "{{ SENSORONIKEY }}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
|
||||
sensoronidir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/sensoroni
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
sensoronidatadir:
|
||||
file.directory:
|
||||
- name: /nsm/sensoroni/jobs
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
sensoronilogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/sensoroni
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
sensoronisync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/sensoroni
|
||||
- source: salt://sensoroni/files
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
|
||||
so-sensoroni:
|
||||
docker_container.running:
|
||||
- image: {{ MASTER }}:5000/soshybridhunter/so-sensoroni:{{ VERSION }}
|
||||
- hostname: sensoroni
|
||||
- name: so-sensoroni
|
||||
- binds:
|
||||
- /nsm/sensoroni/jobs:/opt/sensoroni/jobs:rw
|
||||
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
|
||||
- /opt/so/log/sensoroni/:/opt/sensoroni/logs/:rw
|
||||
- port_bindings:
|
||||
- 0.0.0.0:9822:9822
|
||||
- watch:
|
||||
- file: /opt/so/conf/sensoroni
|
||||
@@ -42,7 +42,7 @@ urls:
|
||||
login_ui: https://{{ WEBACCESS }}/login/
|
||||
registration_ui: https://{{ WEBACCESS }}/login/
|
||||
error_ui: https://{{ WEBACCESS }}/login/
|
||||
settings_ui: https://{{ WEBACCESS }}/
|
||||
settings_ui: https://{{ WEBACCESS }}/?r=/settings
|
||||
verify_ui: https://{{ WEBACCESS }}/
|
||||
mfa_ui: https://{{ WEBACCESS }}/
|
||||
|
||||
|
||||
@@ -1,32 +1,31 @@
|
||||
{
|
||||
"title": "Introducing Hybrid Hunter 1.3.0 Beta 2",
|
||||
"title": "Introducing Hybrid Hunter 1.4.0 Beta 3",
|
||||
"changes": [
|
||||
{ "summary": "New Feature: Codename: \"Onion Hunt\". Select Hunt from the menu and start hunting down your adversaries!" },
|
||||
{ "summary": "Improved ECS support." },
|
||||
{ "summary": "Complete refactor of the setup to make it easier to follow." },
|
||||
{ "summary": "Improved setup script logging to better assist on any issues." },
|
||||
{ "summary": "Setup now checks for minimal requirements during install." },
|
||||
{ "summary": "Updated Cyberchef to version 9.20.3." },
|
||||
{ "summary": "Updated Elastalert to version 0.2.4 and switched to alpine to reduce container size." },
|
||||
{ "summary": "Updated Redis to 5.0.9 and switched to alpine to reduce container size." },
|
||||
{ "summary": "Updated Salt to 2019.2.5." },
|
||||
{ "summary": "Updated Grafana to 6.7.3." },
|
||||
{ "summary": "Zeek 3.0.6." },
|
||||
{ "summary": "Suricata 4.1.8." },
|
||||
{ "summary": "Fixes so-status to now display correct containers and status." },
|
||||
{ "summary": "local.zeek is now controlled by a pillar instead of modifying the file directly." },
|
||||
{ "summary": "Renamed so-core to so-nginx and switched to alpine to reduce container size." },
|
||||
{ "summary": "Playbook now uses MySQL instead of SQLite." },
|
||||
{ "summary": "Sigma rules have all been updated." },
|
||||
{ "summary": "Kibana dashboard improvements for ECS." },
|
||||
{ "summary": "Fixed an issue where geoip was not properly parsed." },
|
||||
{ "summary": "ATT&CK Navigator is now it's own state." },
|
||||
{ "summary": "Standlone mode is now supported." },
|
||||
{ "summary": "Mastersearch previously used the same Grafana dashboard as a Search node. It now has its own dashboard that incorporates panels from the Master node and Search node dashboards." },
|
||||
{ "summary": "Complete overhaul of the way we handle custom and default settings and data. You will now see a default and local directory under the saltstack directory. All customizations are stored in local." },
|
||||
{ "summary": "The way firewall rules are handled has been completely revamped. This will allow the user to customize firewall rules much easier." },
|
||||
{ "summary": "Users can now change their own password in SOC." },
|
||||
{ "summary": "Hunt now allows users to enable auto-hunt. This is a toggle which, when enabled, automatically submits a new hunt when filtering, grouping, etc." },
|
||||
{ "summary": "Title bar now reflects current Hunt query. This will assist users in locating a previous query from their browser history." },
|
||||
{ "summary": "Zeek 3.0.7" },
|
||||
{ "summary": "Elastic 7.7.1" },
|
||||
{ "summary": "Suricata can now be used for meta data generation." },
|
||||
{ "summary": "Suricata eve.json has been moved to `/nsm` to align with storage of other data." },
|
||||
{ "summary": "Suricata will now properly rotate its logs." },
|
||||
{ "summary": "Grafana dashboards now work properly in standalone mode." },
|
||||
{ "summary": "Kibana Dashboard updates including osquery, community_id." },
|
||||
{ "summary": "New Elasticsearch Ingest processor to generate community_id from any log that includes the required fields." },
|
||||
{ "summary": "Community_id generated for additional logs: Zeek HTTP/SMTP/ , Sysmon shipped with Osquery or Winlogbeat." },
|
||||
{ "summary": "Major streamlining of Fleet setup & configuration - no need to run a secondary setup script anymore." },
|
||||
{ "summary": "Fleet Standalone node now includes the ability to set a FQDN to point osquery endpoints to." },
|
||||
{ "summary": "Distributed installs now support ingesting Windows Eventlogs via Winlogbeat - includes full parsing support for Sysmon." },
|
||||
{ "summary": "SOC Downloads section now includes a link to the supported version of Winlogbeat." },
|
||||
{ "summary": "Basic syslog ingestion capability now included." },
|
||||
{ "summary": "Elasticsearch index name transition fixes for various components." },
|
||||
{ "summary": "Updated URLs for pivot fields in Kibana." },
|
||||
{ "summary": "Instances of \"hive\" renamed to \"thehive\"." },
|
||||
{ "summary": "KNOWN ISSUE: The Hunt feature is currently considered \"Preview\" and although very useful in its current state, not everything works. We wanted to get this out as soon as possible to get the feedback from you! Let us know what you want to see! Let us know what you think we should call it!" },
|
||||
{ "summary": "KNOWN ISSUE: You cannot pivot to PCAP from Suricata alerts in Kibana or Hunt." },
|
||||
{ "summary": "KNOWN ISSUE: Navigator is currently not working when using hostname to access SOC. IP mode works correctly." },
|
||||
{ "summary": "KNOWN ISSUE: Updating users via the SOC ui is known to fail. To change a user, delete the user and re-add them." },
|
||||
{ "summary": "KNOWN ISSUE: Due to the move to ECS, the current Playbook plays may not alert correctly at this time." },
|
||||
{ "summary": "KNOWN ISSUE: The osquery MacOS package does not install correctly." }
|
||||
]
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"dateRangeMinutes": 1440,
|
||||
"mostRecentlyUsedLimit": 5,
|
||||
"eventFields": {
|
||||
"default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id" ],
|
||||
"default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id", "event.dataset" ],
|
||||
"bro_conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "service", "log.id.uid" ],
|
||||
"bro_dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ],
|
||||
"bro_dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ],
|
||||
@@ -102,11 +102,13 @@
|
||||
{ "name": "DHCP", "description": "DHCP leases", "query": "event.dataset:dhcp | groupby host.hostname host.domain"},
|
||||
{ "name": "DHCP", "description": "DHCP grouped by message type", "query": "event.dataset:dhcp | groupby dhcp.message_types"},
|
||||
{ "name": "DNP3", "description": "DNP3 grouped by reply", "query": "event.dataset:dnp3 | groupby dnp3.fc_reply"},
|
||||
{ "name": "DNS", "description": "DNS queries grouped by port ", "query": "event.dataset:dns | groupby dns.query.name destination.port"},
|
||||
{ "name": "DNS", "description": "DNS queries grouped by port", "query": "event.dataset:dns | groupby dns.query.name destination.port"},
|
||||
{ "name": "DNS", "description": "DNS queries grouped by type", "query": "event.dataset:dns | groupby dns.query.type_name destination.port"},
|
||||
{ "name": "DNS", "description": "DNS highest registered domain", "query": "event.dataset:dns | groupby dns.highest_registered_domain.keyword"},
|
||||
{ "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.dataset:dns | groupby dns.parent_domain.keyword"},
|
||||
{ "name": "DNS", "description": "DNS queries grouped by response code", "query": "event.dataset:dns | groupby dns.response.code_name destination.port"},
|
||||
{ "name": "DNS", "description": "DNS highest registered domain", "query": "event.dataset:dns | groupby dns.highest_registered_domain.keyword destination.port"},
|
||||
{ "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.dataset:dns | groupby dns.parent_domain.keyword destination.port"},
|
||||
{ "name": "Files", "description": "Files grouped by mimetype", "query": "event.dataset:files | groupby file.mime_type source.ip"},
|
||||
{ "name": "Files", "description": "Files grouped by source", "query": "event.dataset:files | groupby file.source source.ip"},
|
||||
{ "name": "FTP", "description": "FTP grouped by argument", "query": "event.dataset:ftp | groupby ftp.argument"},
|
||||
{ "name": "FTP", "description": "FTP grouped by command", "query": "event.dataset:ftp | groupby ftp.command"},
|
||||
{ "name": "FTP", "description": "FTP grouped by username", "query": "event.dataset:ftp | groupby ftp.user"},
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
{% set master = salt['grains.get']('master') %}
|
||||
{% set masterip = salt['pillar.get']('static:masterip', '') %}
|
||||
{% set HOSTNAME = salt['grains.get']('host') %}
|
||||
{% set MAINIP = salt['pillar.get']('node:mainip') %}
|
||||
{% set global_ca_text = [] %}
|
||||
{% set global_ca_server = [] %}
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') %}
|
||||
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
|
||||
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
|
||||
|
||||
{% if grains.id.split('_')|last in ['master', 'eval', 'standalone'] %}
|
||||
{% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %}
|
||||
@@ -11,7 +13,7 @@
|
||||
{% else %}
|
||||
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %}
|
||||
{% for host in x509dict %}
|
||||
{% if 'master' in host.split('_')|last %}
|
||||
{% if 'master' in host.split('_')|last or host.split('_')|last == 'standalone' %}
|
||||
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
|
||||
{% do global_ca_server.append(host) %}
|
||||
{% endif %}
|
||||
@@ -200,6 +202,7 @@ chownfilebeatp8:
|
||||
- signing_policy: masterssl
|
||||
- public_key: /etc/pki/masterssl.key
|
||||
- CN: {{ HOSTNAME }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }} {% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }} {% endif %}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
@@ -222,7 +225,7 @@ chownfilebeatp8:
|
||||
x509.certificate_managed:
|
||||
- signing_private_key: /etc/pki/fleet.key
|
||||
- CN: {{ HOSTNAME }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }} {% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }} {% endif %}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
|
||||
@@ -95,8 +95,8 @@ outputs:
|
||||
- eve-log:
|
||||
enabled: yes
|
||||
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
|
||||
filename: /nsm/eve.json
|
||||
rotate-interval: day
|
||||
filename: /nsm/eve-%Y-%m-%d-%H:%M.json
|
||||
rotate-interval: hour
|
||||
|
||||
#prefix: "@cee: " # prefix to prepend to each log entry
|
||||
# the following are valid when type: syslog above
|
||||
|
||||
@@ -95,7 +95,7 @@ outputs:
|
||||
- eve-log:
|
||||
enabled: yes
|
||||
filetype: regular #regular|syslog|unix_dgram|unix_stream|redis
|
||||
filename: /nsm/eve.json
|
||||
filename: /nsm/eve-%Y-%m-%d-%H:%M.json
|
||||
rotate-interval: hour
|
||||
|
||||
#prefix: "@cee: " # prefix to prepend to each log entry
|
||||
|
||||
@@ -616,14 +616,14 @@
|
||||
|
||||
|
||||
# # Read stats from one or more Elasticsearch servers or clusters
|
||||
{% if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch'] %}
|
||||
{% if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
|
||||
[[inputs.elasticsearch]]
|
||||
|
||||
# ## specify a list of one or more Elasticsearch servers
|
||||
# # you can add username and password to your url to use basic authentication:
|
||||
# # servers = ["http://user:pass@localhost:9200"]
|
||||
servers = ["http://{{ MASTER }}:9200"]
|
||||
{% elif grains['role'] in ['so-searchnode', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
|
||||
{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
|
||||
[[inputs.elasticsearch]]
|
||||
servers = ["http://{{ NODEIP }}:9200"]
|
||||
{% endif %}
|
||||
@@ -683,7 +683,18 @@
|
||||
"/scripts/oldpcap.sh"
|
||||
]
|
||||
data_format = "influx"
|
||||
|
||||
{% elif grains['role'] == 'so-standalone' %}
|
||||
[[inputs.exec]]
|
||||
commands = [
|
||||
"/scripts/redis.sh",
|
||||
"/scripts/influxdbsize.sh",
|
||||
"/scripts/stenoloss.sh",
|
||||
"/scripts/suriloss.sh",
|
||||
"/scripts/checkfiles.sh",
|
||||
"/scripts/broloss.sh",
|
||||
"/scripts/oldpcap.sh"
|
||||
]
|
||||
data_format = "influx"
|
||||
{% elif grains['role'] == 'so-eval' %}
|
||||
[[inputs.exec]]
|
||||
commands = [
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user