m0duspwnens
2020-07-09 11:27:06 -04:00
parent 2c32c24bf0
commit 3cf31e2460
134 changed files with 609 additions and 609 deletions

View File

@@ -13,8 +13,8 @@ role:
fleet: fleet:
heavynode: heavynode:
helixsensor: helixsensor:
master: manager:
mastersearch: managersearch:
standalone: standalone:
searchnode: searchnode:
sensor: sensor:

View File

@@ -24,7 +24,7 @@ firewall:
ips: ips:
delete: delete:
insert: insert:
master: manager:
ips: ips:
delete: delete:
insert: insert:

View File

@@ -1,12 +1,12 @@
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%} {%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} {%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{% set WAZUH = salt['pillar.get']('master:wazuh', '0') %} {% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %}
{% set THEHIVE = salt['pillar.get']('master:thehive', '0') %} {% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
{% set PLAYBOOK = salt['pillar.get']('master:playbook', '0') %} {% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
{% set FREQSERVER = salt['pillar.get']('master:freq', '0') %} {% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
{% set DOMAINSTATS = salt['pillar.get']('master:domainstats', '0') %} {% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
{% set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %} {% set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
eval: eval:
containers: containers:
@@ -20,7 +20,7 @@ eval:
- so-soc - so-soc
- so-kratos - so-kratos
- so-idstools - so-idstools
{% if FLEETMASTER %} {% if FLEETMANAGER %}
- so-mysql - so-mysql
- so-fleet - so-fleet
- so-redis - so-redis
@@ -83,7 +83,7 @@ hot_node:
- so-logstash - so-logstash
- so-elasticsearch - so-elasticsearch
- so-curator - so-curator
master_search: manager_search:
containers: containers:
- so-nginx - so-nginx
- so-telegraf - so-telegraf
@@ -99,7 +99,7 @@ master_search:
- so-elastalert - so-elastalert
- so-filebeat - so-filebeat
- so-soctopus - so-soctopus
{% if FLEETMASTER %} {% if FLEETMANAGER %}
- so-mysql - so-mysql
- so-fleet - so-fleet
- so-redis - so-redis
@@ -122,7 +122,7 @@ master_search:
{% if DOMAINSTATS != '0' %} {% if DOMAINSTATS != '0' %}
- so-domainstats - so-domainstats
{% endif %} {% endif %}
master: manager:
containers: containers:
- so-dockerregistry - so-dockerregistry
- so-nginx - so-nginx
@@ -141,7 +141,7 @@ master:
- so-kibana - so-kibana
- so-elastalert - so-elastalert
- so-filebeat - so-filebeat
{% if FLEETMASTER %} {% if FLEETMANAGER %}
- so-mysql - so-mysql
- so-fleet - so-fleet
- so-redis - so-redis

View File

@@ -17,7 +17,7 @@ firewall:
- 5644 - 5644
- 9822 - 9822
udp: udp:
master: manager:
ports: ports:
tcp: tcp:
- 1514 - 1514

View File

@@ -1,6 +1,6 @@
logstash: logstash:
pipelines: pipelines:
master: manager:
config: config:
- so/0009_input_beats.conf - so/0009_input_beats.conf
- so/0010_input_hhbeats.conf - so/0010_input_hhbeats.conf

View File

@@ -6,10 +6,10 @@ base:
- match: compound - match: compound
- zeek - zeek
'*_mastersearch or *_heavynode': '*_managersearch or *_heavynode':
- match: compound - match: compound
- logstash - logstash
- logstash.master - logstash.manager
- logstash.search - logstash.search
'*_sensor': '*_sensor':
@@ -18,16 +18,16 @@ base:
- healthcheck.sensor - healthcheck.sensor
- minions.{{ grains.id }} - minions.{{ grains.id }}
'*_master or *_mastersearch': '*_manager or *_managersearch':
- match: compound - match: compound
- static - static
- data.* - data.*
- secrets - secrets
- minions.{{ grains.id }} - minions.{{ grains.id }}
'*_master': '*_manager':
- logstash - logstash
- logstash.master - logstash.manager
'*_eval': '*_eval':
- static - static
@@ -39,7 +39,7 @@ base:
'*_standalone': '*_standalone':
- logstash - logstash
- logstash.master - logstash.manager
- logstash.search - logstash.search
- data.* - data.*
- brologs - brologs

View File

@@ -6,7 +6,7 @@ import socket
def send(data): def send(data):
mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('master:mainint')) mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('manager:mainint'))
mainip = __salt__['grains.get']('ip_interfaces').get(mainint)[0] mainip = __salt__['grains.get']('ip_interfaces').get(mainint)[0]
dstport = 8094 dstport = 8094

View File

@@ -1,4 +1,4 @@
{% set master = salt['grains.get']('master') %} {% set manager = salt['grains.get']('manager') %}
/etc/salt/minion.d/signing_policies.conf: /etc/salt/minion.d/signing_policies.conf:
file.managed: file.managed:
- source: salt://ca/files/signing_policies.conf - source: salt://ca/files/signing_policies.conf
@@ -20,7 +20,7 @@ pki_private_key:
/etc/pki/ca.crt: /etc/pki/ca.crt:
x509.certificate_managed: x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key - signing_private_key: /etc/pki/ca.key
- CN: {{ master }} - CN: {{ manager }}
- C: US - C: US
- ST: Utah - ST: Utah
- L: Salt Lake City - L: Salt Lake City

View File

@@ -18,14 +18,14 @@
} }
},grain='id', merge=salt['pillar.get']('docker')) %} },grain='id', merge=salt['pillar.get']('docker')) %}
{% if role in ['eval', 'mastersearch', 'master', 'standalone'] %} {% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
{{ append_containers('master', 'grafana', 0) }} {{ append_containers('manager', 'grafana', 0) }}
{{ append_containers('static', 'fleet_master', 0) }} {{ append_containers('static', 'fleet_manager', 0) }}
{{ append_containers('master', 'wazuh', 0) }} {{ append_containers('manager', 'wazuh', 0) }}
{{ append_containers('master', 'thehive', 0) }} {{ append_containers('manager', 'thehive', 0) }}
{{ append_containers('master', 'playbook', 0) }} {{ append_containers('manager', 'playbook', 0) }}
{{ append_containers('master', 'freq', 0) }} {{ append_containers('manager', 'freq', 0) }}
{{ append_containers('master', 'domainstats', 0) }} {{ append_containers('manager', 'domainstats', 0) }}
{% endif %} {% endif %}
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %} {% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
@@ -37,7 +37,7 @@
{% endif %} {% endif %}
{% if role == 'searchnode' %} {% if role == 'searchnode' %}
{{ append_containers('master', 'wazuh', 0) }} {{ append_containers('manager', 'wazuh', 0) }}
{% endif %} {% endif %}
{% if role == 'sensor' %} {% if role == 'sensor' %}

View File

@@ -11,7 +11,7 @@ bro_logs_enabled() {
} }
whiptail_master_adv_service_brologs() { whiptail_manager_adv_service_brologs() {
BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \ BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
"conn" "Connection Logging" ON \ "conn" "Connection Logging" ON \
@@ -54,5 +54,5 @@ whiptail_master_adv_service_brologs() {
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 ) "x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
} }
whiptail_master_adv_service_brologs whiptail_manager_adv_service_brologs
bro_logs_enabled bro_logs_enabled

View File

@@ -21,13 +21,13 @@ got_root(){
fi fi
} }
master_check() { manager_check() {
# Check to see if this is a master # Check to see if this is a manager
MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
if [ $MASTERCHECK == 'so-eval' ] || [ $MASTERCHECK == 'so-master' ] || [ $MASTERCHECK == 'so-mastersearch' ] || [ $MASTERCHECK == 'so-standalone' ] || [ $MASTERCHECK == 'so-helix' ]; then if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ]; then
echo "This is a master. We can proceed" echo "This is a manager. We can proceed"
else else
echo "Please run soup on the master. The master controls all updates." echo "Please run soup on the manager. The manager controls all updates."
exit 1 exit 1
fi fi
} }
@@ -56,13 +56,13 @@ version_check() {
fi fi
} }
got_root got_root
master_check manager_check
version_check version_check
# Use the hostname # Use the hostname
HOSTNAME=$(hostname) HOSTNAME=$(hostname)
# List all the containers # List all the containers
if [ $MASTERCHECK != 'so-helix' ]; then if [ $MANAGERCHECK != 'so-helix' ]; then
TRUSTED_CONTAINERS=( \ TRUSTED_CONTAINERS=( \
"so-acng:$VERSION" \ "so-acng:$VERSION" \
"so-thehive-cortex:$VERSION" \ "so-thehive-cortex:$VERSION" \

View File

@@ -14,7 +14,7 @@
# #
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%} {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
. /usr/sbin/so-common . /usr/sbin/so-common
SKIP=0 SKIP=0
@@ -50,7 +50,7 @@ done
if [ $SKIP -ne 1 ]; then if [ $SKIP -ne 1 ]; then
# List indices # List indices
echo echo
curl {{ MASTERIP }}:9200/_cat/indices?v curl {{ MANAGERIP }}:9200/_cat/indices?v
echo echo
# Inform user we are about to delete all data # Inform user we are about to delete all data
echo echo
@@ -89,10 +89,10 @@ fi
# Delete data # Delete data
echo "Deleting data..." echo "Deleting data..."
INDXS=$(curl -s -XGET {{ MASTERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }') INDXS=$(curl -s -XGET {{ MANAGERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
for INDX in ${INDXS} for INDX in ${INDXS}
do do
curl -XDELETE "{{ MASTERIP }}:9200/${INDX}" > /dev/null 2>&1 curl -XDELETE "{{ MANAGERIP }}:9200/${INDX}" > /dev/null 2>&1
done done
#Start Logstash/Filebeat #Start Logstash/Filebeat

View File

@@ -1,5 +1,5 @@
#!/bin/bash #!/bin/bash
MASTER=MASTER MANAGER=MANAGER
VERSION="HH1.1.4" VERSION="HH1.1.4"
TRUSTED_CONTAINERS=( \ TRUSTED_CONTAINERS=( \
"so-nginx:$VERSION" \ "so-nginx:$VERSION" \
@@ -37,7 +37,7 @@ do
echo "Downloading $i" echo "Downloading $i"
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination # Tag it with the new registry destination
docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i docker tag soshybridhunter/$i $MANAGER:5000/soshybridhunter/$i
docker push $MASTER:5000/soshybridhunter/$i docker push $MANAGER:5000/soshybridhunter/$i
docker rmi soshybridhunter/$i docker rmi soshybridhunter/$i
done done

View File

@@ -15,7 +15,7 @@
# #
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }} IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
ESPORT=9200 ESPORT=9200
THEHIVEESPORT=9400 THEHIVEESPORT=9400

View File

@@ -1,4 +1,4 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %} {% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
#!/bin/bash #!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC # Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
# #
@@ -16,7 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
default_salt_dir=/opt/so/saltstack/default default_salt_dir=/opt/so/saltstack/default
ELASTICSEARCH_HOST="{{ MASTERIP}}" ELASTICSEARCH_HOST="{{ MANAGERIP}}"
ELASTICSEARCH_PORT=9200 ELASTICSEARCH_PORT=9200
#ELASTICSEARCH_AUTH="" #ELASTICSEARCH_AUTH=""

View File

@@ -15,9 +15,9 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set VERSION = salt['pillar.get']('static:soversion') %} {% set VERSION = salt['pillar.get']('static:soversion') %}
{%- set MASTERIP = salt['pillar.get']('static:masterip') -%} {%- set MANAGERIP = salt['pillar.get']('static:managerip') -%}
function usage { function usage {
cat << EOF cat << EOF
@@ -30,13 +30,13 @@ EOF
function pcapinfo() { function pcapinfo() {
PCAP=$1 PCAP=$1
ARGS=$2 ARGS=$2
docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MASTER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap $ARGS docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MANAGER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
} }
function pcapfix() { function pcapfix() {
PCAP=$1 PCAP=$1
PCAP_OUT=$2 PCAP_OUT=$2
docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MASTER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1 docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
} }
function suricata() { function suricata() {
@@ -57,7 +57,7 @@ function suricata() {
-v ${NSM_PATH}/:/nsm/:rw \ -v ${NSM_PATH}/:/nsm/:rw \
-v $PCAP:/input.pcap:ro \ -v $PCAP:/input.pcap:ro \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \ -v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
{{ MASTER }}:5000/soshybridhunter/so-suricata:{{ VERSION }} \ {{ MANAGER }}:5000/soshybridhunter/so-suricata:{{ VERSION }} \
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1 --runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
} }
@@ -85,7 +85,7 @@ function zeek() {
-v /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro \ -v /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro \
--entrypoint /opt/zeek/bin/zeek \ --entrypoint /opt/zeek/bin/zeek \
-w /nsm/zeek/logs \ -w /nsm/zeek/logs \
{{ MASTER }}:5000/soshybridhunter/so-zeek:{{ VERSION }} \ {{ MANAGER }}:5000/soshybridhunter/so-zeek:{{ VERSION }} \
-C -r /input.pcap local > $NSM_PATH/logs/console.log 2>&1 -C -r /input.pcap local > $NSM_PATH/logs/console.log 2>&1
} }
@@ -212,7 +212,7 @@ cat << EOF
Import complete! Import complete!
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser: You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
https://{{ MASTERIP }}/kibana/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:'${START_OLDEST}T00:00:00.000Z',mode:absolute,to:'${END_NEWEST}T00:00:00.000Z')) https://{{ MANAGERIP }}/kibana/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:'${START_OLDEST}T00:00:00.000Z',mode:absolute,to:'${END_NEWEST}T00:00:00.000Z'))
or you can manually set your Time Range to be: or you can manually set your Time Range to be:
From: $START_OLDEST To: $END_NEWEST From: $START_OLDEST To: $END_NEWEST

View File

@@ -1,9 +1,9 @@
#!/bin/bash #!/bin/bash
# #
# {%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master', False) -%} # {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%} # {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
# {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %} # {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %}
# {%- set MASTER = salt['pillar.get']('master:url_base', '') %} # {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# #
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC # Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# #
@@ -20,7 +20,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
KIBANA_HOST={{ MASTER }} KIBANA_HOST={{ MANAGER }}
KSO_PORT=5601 KSO_PORT=5601
OUTFILE="saved_objects.ndjson" OUTFILE="saved_objects.ndjson"
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
@@ -29,7 +29,7 @@ curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_H
sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE
# Clean up for Fleet, if applicable # Clean up for Fleet, if applicable
# {% if FLEET_NODE or FLEET_MASTER %} # {% if FLEET_NODE or FLEET_MANAGER %}
# Fleet IP # Fleet IP
sed -i "s/{{ MASTER }}/FLEETPLACEHOLDER/g" $OUTFILE sed -i "s/{{ MANAGER }}/FLEETPLACEHOLDER/g" $OUTFILE
# {% endif %} # {% endif %}

View File

@@ -1,7 +1,7 @@
{%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %} {%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
{%- set cur_close_days = salt['pillar.get']('elasticsearch:cur_close_days', '') -%} {%- set cur_close_days = salt['pillar.get']('elasticsearch:cur_close_days', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %} {%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set cur_close_days = salt['pillar.get']('master:cur_close_days', '') -%} {%- set cur_close_days = salt['pillar.get']('manager:cur_close_days', '') -%}
{%- endif -%} {%- endif -%}
--- ---

View File

@@ -1,7 +1,7 @@
{%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %} {%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
{%- set log_size_limit = salt['pillar.get']('elasticsearch:log_size_limit', '') -%} {%- set log_size_limit = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %} {%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set log_size_limit = salt['pillar.get']('master:log_size_limit', '') -%} {%- set log_size_limit = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif %} {%- endif %}
--- ---
# Remember, leave a key empty if there is no value. None will be a string, # Remember, leave a key empty if there is no value. None will be a string,

View File

@@ -5,10 +5,10 @@
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%} {%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%} {%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %} {%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('master:mainip', '') -%} {%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('master:es_port', '') -%} {%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('master:log_size_limit', '') -%} {%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif -%} {%- endif -%}
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC # Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC

View File

@@ -1,7 +1,7 @@
{% if grains['role'] in ['so-node', 'so-heavynode'] %} {% if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set elasticsearch = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set elasticsearch = salt['pillar.get']('elasticsearch:mainip', '') -%}
{% elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %} {% elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set elasticsearch = salt['pillar.get']('master:mainip', '') -%} {%- set elasticsearch = salt['pillar.get']('manager:mainip', '') -%}
{%- endif %} {%- endif %}
--- ---

View File

@@ -1,6 +1,6 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% if grains['role'] in ['so-eval', 'so-node', 'so-mastersearch', 'so-heavynode', 'so-standalone'] %} {% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %}
# Curator # Curator
# Create the group # Create the group
curatorgroup: curatorgroup:
@@ -119,7 +119,7 @@ so-curatordeletecron:
so-curator: so-curator:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-curator:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-curator:{{ VERSION }}
- hostname: curator - hostname: curator
- name: so-curator - name: so-curator
- user: curator - user: curator

View File

@@ -127,11 +127,11 @@
@load policy/hassh @load policy/hassh
# You can load your own intel into: # You can load your own intel into:
# /opt/so/saltstack/bro/policy/intel/ on the master # /opt/so/saltstack/bro/policy/intel/ on the manager
@load intel @load intel
# Load a custom Bro policy # Load a custom Bro policy
# /opt/so/saltstack/bro/policy/custom/ on the master # /opt/so/saltstack/bro/policy/custom/ on the manager
#@load custom/somebropolicy.bro #@load custom/somebropolicy.bro
# Write logs in JSON # Write logs in JSON

View File

@@ -121,11 +121,11 @@
@load policy/ja3 @load policy/ja3
# You can load your own intel into: # You can load your own intel into:
# /opt/so/saltstack/bro/policy/intel/ on the master # /opt/so/saltstack/bro/policy/intel/ on the manager
@load intel @load intel
# Load a custom Bro policy # Load a custom Bro policy
# /opt/so/saltstack/bro/policy/custom/ on the master # /opt/so/saltstack/bro/policy/custom/ on the manager
#@load custom/somebropolicy.bro #@load custom/somebropolicy.bro
# Use JSON # Use JSON

View File

@@ -1,5 +1,5 @@
{% set esip = salt['pillar.get']('master:mainip', '') %} {% set esip = salt['pillar.get']('manager:mainip', '') %}
{% set esport = salt['pillar.get']('master:es_port', '') %} {% set esport = salt['pillar.get']('manager:es_port', '') %}
# This is the folder that contains the rule yaml files # This is the folder that contains the rule yaml files
# Any .yaml file will be loaded as a rule # Any .yaml file will be loaded as a rule
rules_folder: /opt/elastalert/rules/ rules_folder: /opt/elastalert/rules/

View File

@@ -1,7 +1,7 @@
{% set es = salt['pillar.get']('static:masterip', '') %} {% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %} {% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %} {% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set MASTER = salt['pillar.get']('master:url_base', '') %} {% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance. # Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance.
# #
@@ -39,7 +39,7 @@ hive_alert_config:
title: '{match[rule][name]}' title: '{match[rule][name]}'
type: 'NIDS' type: 'NIDS'
source: 'SecurityOnion' source: 'SecurityOnion'
description: "`SOC Hunt Pivot:` \n\n <https://{{MASTER}}/#/hunt?q=network.community_id%3A%20%20%22{match[network][community_id]}%22%20%7C%20groupby%20source.ip%20destination.ip,event.module,%20event.dataset> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/30d0ac90-729f-11ea-8dd2-9d8795a1200b?_g=(filters:!(('$state':(store:globalState),meta:(alias:!n,disabled:!f,index:'*:so-*',key:network.community_id,negate:!f,params:(query:'{match[network][community_id]}'),type:phrase),query:(match_phrase:(network.community_id:'{match[network][community_id]}')))),refreshInterval:(pause:!t,value:0),time:(from:now-7d,to:now))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}" description: "`SOC Hunt Pivot:` \n\n <https://{{MANAGER}}/#/hunt?q=network.community_id%3A%20%20%22{match[network][community_id]}%22%20%7C%20groupby%20source.ip%20destination.ip,event.module,%20event.dataset> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MANAGER}}/kibana/app/kibana#/dashboard/30d0ac90-729f-11ea-8dd2-9d8795a1200b?_g=(filters:!(('$state':(store:globalState),meta:(alias:!n,disabled:!f,index:'*:so-*',key:network.community_id,negate:!f,params:(query:'{match[network][community_id]}'),type:phrase),query:(match_phrase:(network.community_id:'{match[network][community_id]}')))),refreshInterval:(pause:!t,value:0),time:(from:now-7d,to:now))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
severity: 2 severity: 2
tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}'] tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}']
tlp: 3 tlp: 3

View File

@@ -1,7 +1,7 @@
{% set es = salt['pillar.get']('static:masterip', '') %} {% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %} {% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %} {% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set MASTER = salt['pillar.get']('master:url_base', '') %} {% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance. # Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance.
# #
@@ -38,7 +38,7 @@ hive_alert_config:
title: '{match[rule][name]}' title: '{match[rule][name]}'
type: 'wazuh' type: 'wazuh'
source: 'SecurityOnion' source: 'SecurityOnion'
description: "`SOC Hunt Pivot:` \n\n <https://{{MASTER}}/#/hunt?q=event.module%3A%20ossec%20AND%20rule.id%3A{match[rule][id]}%20%7C%20groupby%20host.name%20rule.name> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MASTER}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:')),sort:!('@timestamp',desc))>" description: "`SOC Hunt Pivot:` \n\n <https://{{MANAGER}}/#/hunt?q=event.module%3A%20ossec%20AND%20rule.id%3A{match[rule][id]}%20%7C%20groupby%20host.name%20rule.name> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MANAGER}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:')),sort:!('@timestamp',desc))>"
severity: 2 severity: 2
tags: ['{match[rule][id]}','{match[host][name]}'] tags: ['{match[rule][id]}','{match[host][name]}']
tlp: 3 tlp: 3

View File

@@ -13,12 +13,12 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %} {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set esalert = salt['pillar.get']('master:elastalert', '1') %} {% set esalert = salt['pillar.get']('manager:elastalert', '1') %}
{% set esip = salt['pillar.get']('master:mainip', '') %} {% set esip = salt['pillar.get']('manager:mainip', '') %}
{% set esport = salt['pillar.get']('master:es_port', '') %} {% set esport = salt['pillar.get']('manager:es_port', '') %}
{% elif grains['role'] == 'so-node' %} {% elif grains['role'] == 'so-node' %}
{% set esalert = salt['pillar.get']('elasticsearch:elastalert', '0') %} {% set esalert = salt['pillar.get']('elasticsearch:elastalert', '0') %}
{% endif %} {% endif %}
@@ -101,7 +101,7 @@ elastaconf:
so-elastalert: so-elastalert:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-elastalert:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-elastalert:{{ VERSION }}
- hostname: elastalert - hostname: elastalert
- name: so-elastalert - name: so-elastalert
- user: elastalert - user: elastalert

View File

@@ -1,5 +1,5 @@
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %} {% if grains['role'] == 'so-manager' or grains['role'] == 'so-eval' %}
{%- set esclustername = salt['pillar.get']('master:esclustername', '') %} {%- set esclustername = salt['pillar.get']('manager:esclustername', '') %}
cluster.name: "{{ esclustername }}" cluster.name: "{{ esclustername }}"
network.host: 0.0.0.0 network.host: 0.0.0.0

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %} {% if FEATURES %}
@@ -22,9 +22,9 @@
{% set FEATURES = '' %} {% set FEATURES = '' %}
{% endif %} {% endif %}
{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %} {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set esclustername = salt['pillar.get']('master:esclustername', '') %} {% set esclustername = salt['pillar.get']('manager:esclustername', '') %}
{% set esheap = salt['pillar.get']('master:esheap', '') %} {% set esheap = salt['pillar.get']('manager:esheap', '') %}
{% elif grains['role'] in ['so-node','so-heavynode'] %} {% elif grains['role'] in ['so-node','so-heavynode'] %}
{% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %} {% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %}
{% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %} {% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %}
@@ -101,7 +101,7 @@ eslogdir:
so-elasticsearch: so-elasticsearch:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-elasticsearch:{{ VERSION }}{{ FEATURES }} - image: {{ MANAGER }}:5000/soshybridhunter/so-elasticsearch:{{ VERSION }}{{ FEATURES }}
- hostname: elasticsearch - hostname: elasticsearch
- name: so-elasticsearch - name: so-elasticsearch
- user: elasticsearch - user: elasticsearch
@@ -141,7 +141,7 @@ so-elasticsearch-pipelines:
- file: esyml - file: esyml
- file: so-elasticsearch-pipelines-file - file: so-elasticsearch-pipelines-file
{% if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %} {% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
so-elasticsearch-templates: so-elasticsearch-templates:
cmd.run: cmd.run:
- name: /usr/sbin/so-elasticsearch-templates - name: /usr/sbin/so-elasticsearch-templates

View File

@@ -1,7 +1,7 @@
{%- if grains.role == 'so-heavynode' %} {%- if grains.role == 'so-heavynode' %}
{%- set MASTER = salt['pillar.get']('sensor:mainip' '') %} {%- set MANAGER = salt['pillar.get']('sensor:mainip' '') %}
{%- else %} {%- else %}
{%- set MASTER = grains['master'] %} {%- set MANAGER = grains['manager'] %}
{%- endif %} {%- endif %}
@@ -9,7 +9,7 @@
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %} {%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
{%- set WAZUHENABLED = salt['pillar.get']('static:wazuh', '0') %} {%- set WAZUHENABLED = salt['pillar.get']('static:wazuh', '0') %}
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %} {%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%} {%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} {%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
name: {{ HOSTNAME }} name: {{ HOSTNAME }}
@@ -214,7 +214,7 @@ filebeat.inputs:
{%- endif %} {%- endif %}
{%- if FLEETMASTER or FLEETNODE %} {%- if FLEETMANAGER or FLEETNODE %}
- type: log - type: log
paths: paths:
@@ -252,7 +252,7 @@ output.{{ type }}:
{%- if grains['role'] == "so-eval" %} {%- if grains['role'] == "so-eval" %}
output.elasticsearch: output.elasticsearch:
enabled: true enabled: true
hosts: ["{{ MASTER }}:9200"] hosts: ["{{ MANAGER }}:9200"]
pipelines: pipelines:
- pipeline: "%{[module]}.%{[dataset]}" - pipeline: "%{[module]}.%{[dataset]}"
indices: indices:
@@ -280,7 +280,7 @@ output.logstash:
enabled: true enabled: true
# The Logstash hosts # The Logstash hosts
hosts: ["{{ MASTER }}:5644"] hosts: ["{{ MANAGER }}:5644"]
# Number of workers per Logstash host. # Number of workers per Logstash host.
#worker: 1 #worker: 1

View File

@@ -12,8 +12,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set MASTERIP = salt['pillar.get']('static:masterip', '') %} {% set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %} {% if FEATURES %}
{% set FEATURES = "-features" %} {% set FEATURES = "-features" %}
@@ -51,10 +51,10 @@ filebeatconfsync:
OUTPUT: {{ salt['pillar.get']('filebeat:config:output', {}) }} OUTPUT: {{ salt['pillar.get']('filebeat:config:output', {}) }}
so-filebeat: so-filebeat:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-filebeat:{{ VERSION }}{{ FEATURES }} - image: {{ MANAGER }}:5000/soshybridhunter/so-filebeat:{{ VERSION }}{{ FEATURES }}
- hostname: so-filebeat - hostname: so-filebeat
- user: root - user: root
- extra_hosts: {{ MASTER }}:{{ MASTERIP }} - extra_hosts: {{ MANAGER }}:{{ MANAGERIP }}
- binds: - binds:
- /nsm:/nsm:ro - /nsm:/nsm:ro
- /opt/so/log/filebeat:/usr/share/filebeat/logs:rw - /opt/so/log/filebeat:/usr/share/filebeat/logs:rw

View File

@@ -6,7 +6,7 @@ role:
chain: chain:
DOCKER-USER: DOCKER-USER:
hostgroups: hostgroups:
master: manager:
portgroups: portgroups:
- {{ portgroups.wazuh_agent }} - {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
@@ -85,12 +85,12 @@ role:
- {{ portgroups.all }} - {{ portgroups.all }}
minion: minion:
portgroups: portgroups:
- {{ portgroups.salt_master }} - {{ portgroups.salt_manager }}
master: manager:
chain: chain:
DOCKER-USER: DOCKER-USER:
hostgroups: hostgroups:
master: manager:
portgroups: portgroups:
- {{ portgroups.wazuh_agent }} - {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
@@ -166,12 +166,12 @@ role:
- {{ portgroups.all }} - {{ portgroups.all }}
minion: minion:
portgroups: portgroups:
- {{ portgroups.salt_master }} - {{ portgroups.salt_manager }}
mastersearch: managersearch:
chain: chain:
DOCKER-USER: DOCKER-USER:
hostgroups: hostgroups:
master: manager:
portgroups: portgroups:
- {{ portgroups.wazuh_agent }} - {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
@@ -247,12 +247,12 @@ role:
- {{ portgroups.all }} - {{ portgroups.all }}
minion: minion:
portgroups: portgroups:
- {{ portgroups.salt_master }} - {{ portgroups.salt_manager }}
standalone: standalone:
chain: chain:
DOCKER-USER: DOCKER-USER:
hostgroups: hostgroups:
master: manager:
portgroups: portgroups:
- {{ portgroups.wazuh_agent }} - {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }} - {{ portgroups.wazuh_api }}
@@ -328,12 +328,12 @@ role:
- {{ portgroups.all }} - {{ portgroups.all }}
minion: minion:
portgroups: portgroups:
- {{ portgroups.salt_master }} - {{ portgroups.salt_manager }}
helixsensor: helixsensor:
chain: chain:
DOCKER-USER: DOCKER-USER:
hostgroups: hostgroups:
master: manager:
portgroups: portgroups:
- {{ portgroups.wazuh_agent }} - {{ portgroups.wazuh_agent }}
- {{ portgroups.playbook }} - {{ portgroups.playbook }}
@@ -391,12 +391,12 @@ role:
- {{ portgroups.all }} - {{ portgroups.all }}
minion: minion:
portgroups: portgroups:
- {{ portgroups.salt_master }} - {{ portgroups.salt_manager }}
searchnode: searchnode:
chain: chain:
DOCKER-USER: DOCKER-USER:
hostgroups: hostgroups:
master: manager:
portgroups: portgroups:
- {{ portgroups.elasticsearch_node }} - {{ portgroups.elasticsearch_node }}
dockernet: dockernet:

View File

@@ -19,4 +19,4 @@ firewall:
ips: ips:
delete: delete:
insert: insert:
- {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }} - {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}

View File

@@ -61,7 +61,7 @@ firewall:
redis: redis:
tcp: tcp:
- 6379 - 6379
salt_master: salt_manager:
tcp: tcp:
- 4505 - 4505
- 4506 - 4506

View File

@@ -1,4 +1,4 @@
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set ENROLLSECRET = salt['pillar.get']('secrets:fleet_enroll-secret') %} {% set ENROLLSECRET = salt['pillar.get']('secrets:fleet_enroll-secret') %}
{% set CURRENTPACKAGEVERSION = salt['pillar.get']('static:fleet_packages-version') %} {% set CURRENTPACKAGEVERSION = salt['pillar.get']('static:fleet_packages-version') %}
{% set VERSION = salt['pillar.get']('static:soversion') %} {% set VERSION = salt['pillar.get']('static:soversion') %}
@@ -19,6 +19,6 @@ so/fleet:
mainip: {{ grains.host }} mainip: {{ grains.host }}
enroll-secret: {{ ENROLLSECRET }} enroll-secret: {{ ENROLLSECRET }}
current-package-version: {{ CURRENTPACKAGEVERSION }} current-package-version: {{ CURRENTPACKAGEVERSION }}
master: {{ MASTER }} manager: {{ MANAGER }}
version: {{ VERSION }} version: {{ VERSION }}

View File

@@ -2,14 +2,14 @@
{%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%} {%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%}
{%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%} {%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set FLEETARCH = salt['grains.get']('role') %} {% set FLEETARCH = salt['grains.get']('role') %}
{% if FLEETARCH == "so-fleet" %} {% if FLEETARCH == "so-fleet" %}
{% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% else %} {% else %}
{% set MAINIP = salt['pillar.get']('static:masterip') %} {% set MAINIP = salt['pillar.get']('static:managerip') %}
{% endif %} {% endif %}
include: include:
@@ -105,7 +105,7 @@ fleet_password_none:
so-fleet: so-fleet:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-fleet:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-fleet:{{ VERSION }}
- hostname: so-fleet - hostname: so-fleet
- port_bindings: - port_bindings:
- 0.0.0.0:8080:8080 - 0.0.0.0:8080:8080

View File

@@ -1,4 +1,4 @@
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%} {%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} {%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%} {%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%}
{%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%} {%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%}

View File

@@ -9,14 +9,14 @@ providers:
disableDeletion: false disableDeletion: false
editable: true editable: true
options: options:
path: /etc/grafana/grafana_dashboards/master path: /etc/grafana/grafana_dashboards/manager
- name: 'Master Search' - name: 'Master Search'
folder: 'Master Search' folder: 'Master Search'
type: file type: file
disableDeletion: false disableDeletion: false
editable: true editable: true
options: options:
path: /etc/grafana/grafana_dashboards/mastersearch path: /etc/grafana/grafana_dashboards/managersearch
- name: 'Sensor Nodes' - name: 'Sensor Nodes'
folder: 'Sensor Nodes' folder: 'Sensor Nodes'
type: file type: file

View File

@@ -1,4 +1,4 @@
{%- set MASTER = salt['pillar.get']('static:masterip', '') %} {%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
apiVersion: 1 apiVersion: 1
deleteDatasources: deleteDatasources:
@@ -10,7 +10,7 @@ datasources:
type: influxdb type: influxdb
access: proxy access: proxy
database: telegraf database: telegraf
url: https://{{ MASTER }}:8086 url: https://{{ MANAGER }}:8086
jsonData: jsonData:
tlsAuth: false tlsAuth: false
tlsAuthWithCACert: false tlsAuthWithCACert: false

View File

@@ -1,8 +1,8 @@
{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
# Grafana all the things # Grafana all the things
grafanadir: grafanadir:
@@ -28,14 +28,14 @@ grafanadashdir:
grafanadashmdir: grafanadashmdir:
file.directory: file.directory:
- name: /opt/so/conf/grafana/grafana_dashboards/master - name: /opt/so/conf/grafana/grafana_dashboards/manager
- user: 939 - user: 939
- group: 939 - group: 939
- makedirs: True - makedirs: True
grafanadashmsdir: grafanadashmsdir:
file.directory: file.directory:
- name: /opt/so/conf/grafana/grafana_dashboards/mastersearch - name: /opt/so/conf/grafana/grafana_dashboards/managersearch
- user: 939 - user: 939
- group: 939 - group: 939
- makedirs: True - makedirs: True
@@ -76,17 +76,17 @@ grafanaconf:
- template: jinja - template: jinja
- source: salt://grafana/etc - source: salt://grafana/etc
{% if salt['pillar.get']('mastertab', False) %} {% if salt['pillar.get']('managertab', False) %}
{% for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %} {% for SN, SNDATA in salt['pillar.get']('managertab', {}).items() %}
{% set NODETYPE = SN.split('_')|last %} {% set NODETYPE = SN.split('_')|last %}
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %} {% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
dashboard-master: dashboard-manager:
file.managed: file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json - name: /opt/so/conf/grafana/grafana_dashboards/manager/{{ SN }}-Master.json
- user: 939 - user: 939
- group: 939 - group: 939
- template: jinja - template: jinja
- source: salt://grafana/dashboards/master/master.json - source: salt://grafana/dashboards/manager/manager.json
- defaults: - defaults:
SERVERNAME: {{ SN }} SERVERNAME: {{ SN }}
MANINT: {{ SNDATA.manint }} MANINT: {{ SNDATA.manint }}
@@ -99,17 +99,17 @@ dashboard-master:
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if salt['pillar.get']('mastersearchtab', False) %} {% if salt['pillar.get']('managersearchtab', False) %}
{% for SN, SNDATA in salt['pillar.get']('mastersearchtab', {}).items() %} {% for SN, SNDATA in salt['pillar.get']('managersearchtab', {}).items() %}
{% set NODETYPE = SN.split('_')|last %} {% set NODETYPE = SN.split('_')|last %}
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %} {% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
dashboard-mastersearch: dashboard-managersearch:
file.managed: file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/mastersearch/{{ SN }}-MasterSearch.json - name: /opt/so/conf/grafana/grafana_dashboards/managersearch/{{ SN }}-MasterSearch.json
- user: 939 - user: 939
- group: 939 - group: 939
- template: jinja - template: jinja
- source: salt://grafana/dashboards/mastersearch/mastersearch.json - source: salt://grafana/dashboards/managersearch/managersearch.json
- defaults: - defaults:
SERVERNAME: {{ SN }} SERVERNAME: {{ SN }}
MANINT: {{ SNDATA.manint }} MANINT: {{ SNDATA.manint }}
@@ -216,7 +216,7 @@ dashboard-{{ SN }}:
so-grafana: so-grafana:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-grafana:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-grafana:{{ VERSION }}
- hostname: grafana - hostname: grafana
- user: socore - user: socore
- binds: - binds:

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
# IDSTools Setup # IDSTools Setup
idstoolsdir: idstoolsdir:
file.directory: file.directory:
@@ -60,7 +60,7 @@ synclocalnidsrules:
so-idstools: so-idstools:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-idstools:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
- hostname: so-idstools - hostname: so-idstools
- user: socore - user: socore
- binds: - binds:

View File

@@ -1,9 +1,9 @@
{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %} {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
# Influx DB # Influx DB
influxconfdir: influxconfdir:
@@ -26,7 +26,7 @@ influxdbconf:
so-influxdb: so-influxdb:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-influxdb:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-influxdb:{{ VERSION }}
- hostname: influxdb - hostname: influxdb
- environment: - environment:
- INFLUXDB_HTTP_LOG_ENABLED=false - INFLUXDB_HTTP_LOG_ENABLED=false

View File

@@ -1,4 +1,4 @@
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
# Wait for ElasticSearch to come up, so that we can query for version infromation # Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..." echo -n "Waiting for ElasticSearch..."
COUNT=0 COUNT=0

View File

@@ -1,20 +1,20 @@
#!/bin/bash #!/bin/bash
# {%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master', False) -%} # {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%} # {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
# {%- set MASTER = salt['pillar.get']('master:url_base', '') %} # {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
KIBANA_VERSION="7.6.1" KIBANA_VERSION="7.6.1"
# Copy template file # Copy template file
cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_objects.ndjson cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_objects.ndjson
# {% if FLEET_NODE or FLEET_MASTER %} # {% if FLEET_NODE or FLEET_MANAGER %}
# Fleet IP # Fleet IP
sed -i "s/FLEETPLACEHOLDER/{{ MASTER }}/g" /opt/so/conf/kibana/saved_objects.ndjson sed -i "s/FLEETPLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
# {% endif %} # {% endif %}
# SOCtopus and Master # SOCtopus and Master
sed -i "s/PLACEHOLDER/{{ MASTER }}/g" /opt/so/conf/kibana/saved_objects.ndjson sed -i "s/PLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
# Load saved objects # Load saved objects
curl -X POST "localhost:5601/api/saved_objects/_import" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1 curl -X POST "localhost:5601/api/saved_objects/_import" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1

View File

@@ -1,6 +1,6 @@
--- ---
# Default Kibana configuration from kibana-docker. # Default Kibana configuration from kibana-docker.
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
server.name: kibana server.name: kibana
server.host: "0" server.host: "0"
server.basePath: /kibana server.basePath: /kibana

View File

@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %} {% if FEATURES %}
{% set FEATURES = "-features" %} {% set FEATURES = "-features" %}
@@ -69,13 +69,13 @@ kibanabin:
# Start the kibana docker # Start the kibana docker
so-kibana: so-kibana:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-kibana:{{ VERSION }}{{ FEATURES }} - image: {{ MANAGER }}:5000/soshybridhunter/so-kibana:{{ VERSION }}{{ FEATURES }}
- hostname: kibana - hostname: kibana
- user: kibana - user: kibana
- environment: - environment:
- ELASTICSEARCH_HOST={{ MASTER }} - ELASTICSEARCH_HOST={{ MANAGER }}
- ELASTICSEARCH_PORT=9200 - ELASTICSEARCH_PORT=9200
- MASTER={{ MASTER }} - MANAGER={{ MANAGER }}
- binds: - binds:
- /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw - /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw
- /opt/so/log/kibana:/var/log/kibana:rw - /opt/so/log/kibana:/var/log/kibana:rw
@@ -94,7 +94,7 @@ kibanadashtemplate:
wait_for_kibana: wait_for_kibana:
module.run: module.run:
- http.wait_for_successful_query: - http.wait_for_successful_query:
- url: "http://{{MASTER}}:5601/api/saved_objects/_find?type=config" - url: "http://{{MANAGER}}:5601/api/saved_objects/_find?type=config"
- wait_for: 180 - wait_for: 180
- onchanges: - onchanges:
- file: kibanadashtemplate - file: kibanadashtemplate

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %} {% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %} {% if FEATURES %}
@@ -24,13 +24,13 @@
# Logstash Section - Decide which pillar to use # Logstash Section - Decide which pillar to use
{% set lsheap = salt['pillar.get']('logstash_settings:lsheap', '') %} {% set lsheap = salt['pillar.get']('logstash_settings:lsheap', '') %}
{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %} {% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set freq = salt['pillar.get']('master:freq', '0') %} {% set freq = salt['pillar.get']('manager:freq', '0') %}
{% set dstats = salt['pillar.get']('master:domainstats', '0') %} {% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %} {% set nodetype = salt['grains.get']('role', '') %}
{% elif grains['role'] == 'so-helix' %} {% elif grains['role'] == 'so-helix' %}
{% set freq = salt['pillar.get']('master:freq', '0') %} {% set freq = salt['pillar.get']('manager:freq', '0') %}
{% set dstats = salt['pillar.get']('master:domainstats', '0') %} {% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %} {% set nodetype = salt['grains.get']('role', '') %}
{% endif %} {% endif %}
@@ -159,7 +159,7 @@ lslogdir:
so-logstash: so-logstash:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-logstash:{{ VERSION }}{{ FEATURES }} - image: {{ MANAGER }}:5000/soshybridhunter/so-logstash:{{ VERSION }}{{ FEATURES }}
- hostname: so-logstash - hostname: so-logstash
- name: so-logstash - name: so-logstash
- user: logstash - user: logstash

View File

@@ -1,13 +1,13 @@
{%- if grains.role == 'so-heavynode' %} {%- if grains.role == 'so-heavynode' %}
{%- set MASTER = salt['pillar.get']('elasticsearch:mainip', '') %} {%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- else %} {%- else %}
{%- set MASTER = salt['pillar.get']('static:masterip', '') %} {%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
{% endif -%} {% endif -%}
{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %} {%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %}
input { input {
redis { redis {
host => '{{ MASTER }}' host => '{{ MANAGER }}'
data_type => 'list' data_type => 'list'
key => 'logstash:unparsed' key => 'logstash:unparsed'
type => 'redis-input' type => 'redis-input'

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%} {%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('master:mainip', '') -%} {%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %} {%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} {%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %} {%- endif %}

View File

@@ -1,9 +1,9 @@
{% set MASTER = salt['pillar.get']('static:masterip', '') %} {% set MANAGER = salt['pillar.get']('static:managerip', '') %}
{% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %} {% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %}
output { output {
redis { redis {
host => '{{ MASTER }}' host => '{{ MANAGER }}'
data_type => 'list' data_type => 'list'
key => 'logstash:unparsed' key => 'logstash:unparsed'
congestion_interval => 1 congestion_interval => 1

View File

@@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
MASTER={{ MASTER }} MANAGER={{ MANAGER }}
VERSION="HH1.2.2" VERSION="HH1.2.2"
TRUSTED_CONTAINERS=( \ TRUSTED_CONTAINERS=( \
"so-nginx:$VERSION" \ "so-nginx:$VERSION" \
@@ -41,6 +41,6 @@ do
# Pull down the trusted docker image # Pull down the trusted docker image
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination # Tag it with the new registry destination
docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i docker tag soshybridhunter/$i $MANAGER:5000/soshybridhunter/$i
docker push $MASTER:5000/soshybridhunter/$i docker push $MANAGER:5000/soshybridhunter/$i
done done

View File

@@ -13,8 +13,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set masterproxy = salt['pillar.get']('static:masterupdate', '0') %} {% set managerproxy = salt['pillar.get']('static:managerupdate', '0') %}
socore_own_saltstack: socore_own_saltstack:
file.directory: file.directory:
@@ -25,7 +25,7 @@ socore_own_saltstack:
- user - user
- group - group
{% if masterproxy == 1 %} {% if managerproxy == 1 %}
# Create the directories for apt-cacher-ng # Create the directories for apt-cacher-ng
aptcacherconfdir: aptcacherconfdir:
@@ -54,12 +54,12 @@ aptcacherlogdir:
acngcopyconf: acngcopyconf:
file.managed: file.managed:
- name: /opt/so/conf/aptcacher-ng/etc/acng.conf - name: /opt/so/conf/aptcacher-ng/etc/acng.conf
- source: salt://master/files/acng/acng.conf - source: salt://manager/files/acng/acng.conf
# Install the apt-cacher-ng container # Install the apt-cacher-ng container
so-aptcacherng: so-aptcacherng:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-acng:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
- hostname: so-acng - hostname: so-acng
- restart_policy: always - restart_policy: always
- port_bindings: - port_bindings:

View File

@@ -13,8 +13,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set access_key = salt['pillar.get']('master:access_key', '') %} {% set access_key = salt['pillar.get']('manager:access_key', '') %}
{% set access_secret = salt['pillar.get']('master:access_secret', '') %} {% set access_secret = salt['pillar.get']('manager:access_secret', '') %}
# Minio Setup # Minio Setup
minioconfdir: minioconfdir:

View File

@@ -1,7 +1,7 @@
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %} {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %}
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %} {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set MAINIP = salt['pillar.get']('elasticsearch:mainip') %} {% set MAINIP = salt['pillar.get']('elasticsearch:mainip') %}
{% set FLEETARCH = salt['grains.get']('role') %} {% set FLEETARCH = salt['grains.get']('role') %}
@@ -9,7 +9,7 @@
{% set MAININT = salt['pillar.get']('host:mainint') %} {% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% else %} {% else %}
{% set MAINIP = salt['pillar.get']('static:masterip') %} {% set MAINIP = salt['pillar.get']('static:managerip') %}
{% endif %} {% endif %}
# MySQL Setup # MySQL Setup
@@ -71,7 +71,7 @@ mysql_password_none:
so-mysql: so-mysql:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-mysql:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-mysql:{{ VERSION }}
- hostname: so-mysql - hostname: so-mysql
- user: socore - user: socore
- port_bindings: - port_bindings:

View File

@@ -1,5 +1,5 @@
{%- set masterip = salt['pillar.get']('master:mainip', '') %} {%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %} {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see: # For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri; return 301 https://$host$request_uri;
} }
{% if FLEET_MASTER %} {% if FLEET_MANAGER %}
server { server {
listen 8090 ssl http2 default_server; listen 8090 ssl http2 default_server;
server_name _; server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on; ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ masterip }}:8080; grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host; grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off; proxy_buffering off;
@@ -110,7 +110,7 @@ http {
#include /etc/nginx/default.d/*.conf; #include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ masterip }}:9822; proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -124,7 +124,7 @@ http {
location / { location / {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
proxy_pass http://{{ masterip }}:9822/; proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -138,7 +138,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) { location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break; rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ masterip }}:4433; proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -185,7 +185,7 @@ http {
location /grafana/ { location /grafana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break; rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:3000/; proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -198,7 +198,7 @@ http {
location /kibana/ { location /kibana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break; rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:5601/; proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -209,7 +209,7 @@ http {
} }
location /nodered/ { location /nodered/ {
proxy_pass http://{{ masterip }}:1880/; proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -222,7 +222,7 @@ http {
} }
location /playbook/ { location /playbook/ {
proxy_pass http://{{ masterip }}:3200/playbook/; proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -238,7 +238,7 @@ http {
} }
{%- else %} {%- else %}
location /fleet/ { location /fleet/ {
proxy_pass https://{{ masterip }}:8080; proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -250,7 +250,7 @@ http {
{%- endif %} {%- endif %}
location /thehive/ { location /thehive/ {
proxy_pass http://{{ masterip }}:9000/thehive/; proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -262,7 +262,7 @@ http {
} }
location /cortex/ { location /cortex/ {
proxy_pass http://{{ masterip }}:9001/cortex/; proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -274,7 +274,7 @@ http {
} }
location /soctopus/ { location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/; proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -297,7 +297,7 @@ http {
} }
location /sensoroniagents/ { location /sensoroniagents/ {
proxy_pass http://{{ masterip }}:9822/; proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;

View File

@@ -1,5 +1,5 @@
{%- set masterip = salt['pillar.get']('master:mainip', '') %} {%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %} {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see: # For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri; return 301 https://$host$request_uri;
} }
{% if FLEET_MASTER %} {% if FLEET_MANAGER %}
server { server {
listen 8090 ssl http2 default_server; listen 8090 ssl http2 default_server;
server_name _; server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on; ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ masterip }}:8080; grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host; grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off; proxy_buffering off;
@@ -110,7 +110,7 @@ http {
#include /etc/nginx/default.d/*.conf; #include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ masterip }}:9822; proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -124,7 +124,7 @@ http {
location / { location / {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
proxy_pass http://{{ masterip }}:9822/; proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -138,7 +138,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) { location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break; rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ masterip }}:4433; proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -185,7 +185,7 @@ http {
location /grafana/ { location /grafana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break; rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:3000/; proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -198,7 +198,7 @@ http {
location /kibana/ { location /kibana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break; rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:5601/; proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -209,7 +209,7 @@ http {
} }
location /nodered/ { location /nodered/ {
proxy_pass http://{{ masterip }}:1880/; proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -222,7 +222,7 @@ http {
} }
location /playbook/ { location /playbook/ {
proxy_pass http://{{ masterip }}:3200/playbook/; proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -238,7 +238,7 @@ http {
} }
{%- else %} {%- else %}
location /fleet/ { location /fleet/ {
proxy_pass https://{{ masterip }}:8080; proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -250,7 +250,7 @@ http {
{%- endif %} {%- endif %}
location /thehive/ { location /thehive/ {
proxy_pass http://{{ masterip }}:9000/thehive/; proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -262,7 +262,7 @@ http {
} }
location /cortex/ { location /cortex/ {
proxy_pass http://{{ masterip }}:9001/cortex/; proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -274,7 +274,7 @@ http {
} }
location /soctopus/ { location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/; proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -297,7 +297,7 @@ http {
} }
location /sensoroniagents/ { location /sensoroniagents/ {
proxy_pass http://{{ masterip }}:9822/; proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;

View File

@@ -1,5 +1,5 @@
{%- set masterip = salt['pillar.get']('master:mainip', '') %} {%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %} {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see: # For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri; return 301 https://$host$request_uri;
} }
{% if FLEET_MASTER %} {% if FLEET_MANAGER %}
server { server {
listen 8090 ssl http2 default_server; listen 8090 ssl http2 default_server;
server_name _; server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on; ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ masterip }}:8080; grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host; grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off; proxy_buffering off;
@@ -109,7 +109,7 @@ http {
#include /etc/nginx/default.d/*.conf; #include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ masterip }}:9822; proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -123,7 +123,7 @@ http {
location / { location / {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
proxy_pass http://{{ masterip }}:9822/; proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -137,7 +137,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) { location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break; rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ masterip }}:4433; proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -184,7 +184,7 @@ http {
location /grafana/ { location /grafana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break; rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:3000/; proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -197,7 +197,7 @@ http {
location /kibana/ { location /kibana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break; rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:5601/; proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -208,7 +208,7 @@ http {
} }
location /nodered/ { location /nodered/ {
proxy_pass http://{{ masterip }}:1880/; proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -221,7 +221,7 @@ http {
} }
location /playbook/ { location /playbook/ {
proxy_pass http://{{ masterip }}:3200/playbook/; proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -237,7 +237,7 @@ http {
} }
{%- else %} {%- else %}
location /fleet/ { location /fleet/ {
proxy_pass https://{{ masterip }}:8080; proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -249,7 +249,7 @@ http {
{%- endif %} {%- endif %}
location /thehive/ { location /thehive/ {
proxy_pass http://{{ masterip }}:9000/thehive/; proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -261,7 +261,7 @@ http {
} }
location /cortex/ { location /cortex/ {
proxy_pass http://{{ masterip }}:9001/cortex/; proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -273,7 +273,7 @@ http {
} }
location /soctopus/ { location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/; proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -296,7 +296,7 @@ http {
} }
location /sensoroniagents/ { location /sensoroniagents/ {
proxy_pass http://{{ masterip }}:9822/; proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;

View File

@@ -1,5 +1,5 @@
{%- set masterip = salt['pillar.get']('master:mainip', '') %} {%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %} {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %} {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %} {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see: # For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri; return 301 https://$host$request_uri;
} }
{% if FLEET_MASTER %} {% if FLEET_MANAGER %}
server { server {
listen 8090 ssl http2 default_server; listen 8090 ssl http2 default_server;
server_name _; server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on; ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ { location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
grpc_pass grpcs://{{ masterip }}:8080; grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host; grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for; grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off; proxy_buffering off;
@@ -110,7 +110,7 @@ http {
#include /etc/nginx/default.d/*.conf; #include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) { location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
proxy_pass http://{{ masterip }}:9822; proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -124,7 +124,7 @@ http {
location / { location / {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
proxy_pass http://{{ masterip }}:9822/; proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -138,7 +138,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) { location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break; rewrite /auth/(.*) /$1 break;
proxy_pass http://{{ masterip }}:4433; proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -185,7 +185,7 @@ http {
location /grafana/ { location /grafana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break; rewrite /grafana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:3000/; proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -198,7 +198,7 @@ http {
location /kibana/ { location /kibana/ {
auth_request /auth/sessions/whoami; auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break; rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:5601/; proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -209,7 +209,7 @@ http {
} }
location /nodered/ { location /nodered/ {
proxy_pass http://{{ masterip }}:1880/; proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -222,7 +222,7 @@ http {
} }
location /playbook/ { location /playbook/ {
proxy_pass http://{{ masterip }}:3200/playbook/; proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -238,7 +238,7 @@ http {
} }
{%- else %} {%- else %}
location /fleet/ { location /fleet/ {
proxy_pass https://{{ masterip }}:8080; proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -250,7 +250,7 @@ http {
{%- endif %} {%- endif %}
location /thehive/ { location /thehive/ {
proxy_pass http://{{ masterip }}:9000/thehive/; proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -262,7 +262,7 @@ http {
} }
location /cortex/ { location /cortex/ {
proxy_pass http://{{ masterip }}:9001/cortex/; proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -274,7 +274,7 @@ http {
} }
location /soctopus/ { location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/; proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;
@@ -297,7 +297,7 @@ http {
} }
location /sensoroniagents/ { location /sensoroniagents/ {
proxy_pass http://{{ masterip }}:9822/; proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90; proxy_read_timeout 90;
proxy_connect_timeout 90; proxy_connect_timeout 90;
proxy_set_header Host $host; proxy_set_header Host $host;

View File

@@ -1,4 +1,4 @@
{%- set ip = salt['pillar.get']('static:masterip', '') %} {%- set ip = salt['pillar.get']('static:managerip', '') %}
{ {
"enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json", "enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json",

View File

@@ -1,6 +1,6 @@
{% set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) %} {% set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) %}
{% set FLEETNODE = salt['pillar.get']('static:fleet_node', False) %} {% set FLEETNODE = salt['pillar.get']('static:fleet_node', False) %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
# Drop the correct nginx config based on role # Drop the correct nginx config based on role
@@ -61,15 +61,15 @@ navigatordefaultlayer:
so-nginx: so-nginx:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-nginx:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-nginx:{{ VERSION }}
- hostname: so-nginx - hostname: so-nginx
- binds: - binds:
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro - /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/so/log/nginx/:/var/log/nginx:rw - /opt/so/log/nginx/:/var/log/nginx:rw
- /opt/so/tmp/nginx/:/var/lib/nginx:rw - /opt/so/tmp/nginx/:/var/lib/nginx:rw
- /opt/so/tmp/nginx/:/run:rw - /opt/so/tmp/nginx/:/run:rw
- /etc/pki/masterssl.crt:/etc/pki/nginx/server.crt:ro - /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
- /etc/pki/masterssl.key:/etc/pki/nginx/server.key:ro - /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
- /opt/so/conf/fleet/packages:/opt/socore/html/packages - /opt/so/conf/fleet/packages:/opt/socore/html/packages
# ATT&CK Navigator binds # ATT&CK Navigator binds
- /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro - /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro
@@ -78,7 +78,7 @@ so-nginx:
- port_bindings: - port_bindings:
- 80:80 - 80:80
- 443:443 - 443:443
{%- if FLEETMASTER or FLEETNODE %} {%- if FLEETMANAGER or FLEETNODE %}
- 8090:8090 - 8090:8090
{%- endif %} {%- endif %}
- watch: - watch:

View File

@@ -1,4 +1,4 @@
{%- set ip = salt['pillar.get']('static:masterip', '') -%} {%- set ip = salt['pillar.get']('static:managerip', '') -%}
#!/bin/bash #!/bin/bash
default_salt_dir=/opt/so/saltstack/default default_salt_dir=/opt/so/saltstack/default

File diff suppressed because one or more lines are too long

View File

@@ -1,11 +1,11 @@
{%- set MASTER = grains['master'] -%} {%- set MANAGER = grains['manager'] -%}
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%} {%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
{ {
"logFilename": "/opt/sensoroni/logs/sensoroni.log", "logFilename": "/opt/sensoroni/logs/sensoroni.log",
"logLevel":"debug", "logLevel":"debug",
"agent": { "agent": {
"pollIntervalMs": 10000, "pollIntervalMs": 10000,
"serverUrl": "https://{{ MASTER }}/sensoroniagents", "serverUrl": "https://{{ MANAGER }}/sensoroniagents",
"verifyCert": false, "verifyCert": false,
"modules": { "modules": {
"statickeyauth": { "statickeyauth": {

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %} {% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %}
{% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %} {% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %}
{% set BPF_COMPILED = "" %} {% set BPF_COMPILED = "" %}
@@ -129,7 +129,7 @@ sensoronilog:
so-steno: so-steno:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-steno:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-steno:{{ VERSION }}
- network_mode: host - network_mode: host
- privileged: True - privileged: True
- port_bindings: - port_bindings:
@@ -146,7 +146,7 @@ so-steno:
so-sensoroni: so-sensoroni:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-soc:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
- network_mode: host - network_mode: host
- binds: - binds:
- /opt/so/conf/steno/certs:/etc/stenographer/certs:rw - /opt/so/conf/steno/certs:/etc/stenographer/certs:rw

View File

@@ -1,7 +1,7 @@
{% set MASTERIP = salt['pillar.get']('master:mainip', '') %} {% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %} {% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%} {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
{%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook', None) -%} {%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook', None) -%}
@@ -40,7 +40,7 @@ query_playbookdbuser_grants:
query_updatwebhooks: query_updatwebhooks:
mysql_query.run: mysql_query.run:
- database: playbook - database: playbook
- query: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1" - query: "update webhooks set url = 'http://{{MANAGERIP}}:7000/playbook/webhook' where project_id = 1"
- connection_host: {{ MAINIP }} - connection_host: {{ MAINIP }}
- connection_port: 3306 - connection_port: 3306
- connection_user: root - connection_user: root
@@ -53,8 +53,8 @@ query_updatepluginurls:
update settings set value = update settings set value =
"--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess "--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess
project: '1' project: '1'
convert_url: http://{{MASTERIP}}:7000/playbook/sigmac convert_url: http://{{MANAGERIP}}:7000/playbook/sigmac
create_url: http://{{MASTERIP}}:7000/playbook/play" create_url: http://{{MANAGERIP}}:7000/playbook/play"
where id = 43 where id = 43
- connection_host: {{ MAINIP }} - connection_host: {{ MAINIP }}
- connection_port: 3306 - connection_port: 3306
@@ -73,11 +73,11 @@ playbook_password_none:
so-playbook: so-playbook:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-playbook:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-playbook:{{ VERSION }}
- hostname: playbook - hostname: playbook
- name: so-playbook - name: so-playbook
- environment: - environment:
- REDMINE_DB_MYSQL={{ MASTERIP }} - REDMINE_DB_MYSQL={{ MANAGERIP }}
- REDMINE_DB_DATABASE=playbook - REDMINE_DB_DATABASE=playbook
- REDMINE_DB_USERNAME=playbookdbuser - REDMINE_DB_USERNAME=playbookdbuser
- REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }} - REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }}

View File

@@ -13,7 +13,7 @@ def run():
STATICFILE = f"{LOCAL_SALT_DIR}/pillar/static.sls" STATICFILE = f"{LOCAL_SALT_DIR}/pillar/static.sls"
SECRETSFILE = f"{LOCAL_SALT_DIR}/pillar/secrets.sls" SECRETSFILE = f"{LOCAL_SALT_DIR}/pillar/secrets.sls"
if MINIONID.split('_')[-1] in ['master','eval','fleet','mastersearch','standalone']: if MINIONID.split('_')[-1] in ['manager','eval','fleet','managersearch','standalone']:
if ACTION == 'enablefleet': if ACTION == 'enablefleet':
logging.info('so/fleet enablefleet reactor') logging.info('so/fleet enablefleet reactor')
@@ -27,7 +27,7 @@ def run():
if ROLE == 'so-fleet': if ROLE == 'so-fleet':
line = re.sub(r'fleet_node: \S*', f"fleet_node: True", line.rstrip()) line = re.sub(r'fleet_node: \S*', f"fleet_node: True", line.rstrip())
else: else:
line = re.sub(r'fleet_master: \S*', f"fleet_master: True", line.rstrip()) line = re.sub(r'fleet_manager: \S*', f"fleet_manager: True", line.rstrip())
print(line) print(line)
# Update the enroll secret in the secrets pillar # Update the enroll secret in the secrets pillar
@@ -50,7 +50,7 @@ def run():
PACKAGEVERSION = data['data']['current-package-version'] PACKAGEVERSION = data['data']['current-package-version']
PACKAGEHOSTNAME = data['data']['package-hostname'] PACKAGEHOSTNAME = data['data']['package-hostname']
MASTER = data['data']['master'] MANAGER = data['data']['manager']
VERSION = data['data']['version'] VERSION = data['data']['version']
ESECRET = data['data']['enroll-secret'] ESECRET = data['data']['enroll-secret']
@@ -59,7 +59,7 @@ def run():
# Run Docker container that will build the packages # Run Docker container that will build the packages
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", f"type=bind,source={LOCAL_SALT_DIR}/salt/fleet/packages,target=/output", \ gen_packages = subprocess.run(["docker", "run","--rm", "--mount", f"type=bind,source={LOCAL_SALT_DIR}/salt/fleet/packages,target=/output", \
"--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MASTER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \ "--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MANAGER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
f"{ESECRET}", f"{PACKAGEHOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii') f"{ESECRET}", f"{PACKAGEHOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii')
# Update the 'packages-built' timestamp on the webpage (stored in the static pillar) # Update the 'packages-built' timestamp on the webpage (stored in the static pillar)

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
# Redis Setup # Redis Setup
redisconfdir: redisconfdir:
@@ -47,7 +47,7 @@ redisconfsync:
so-redis: so-redis:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-redis:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
- hostname: so-redis - hostname: so-redis
- user: socore - user: socore
- port_bindings: - port_bindings:

View File

@@ -1,4 +1,4 @@
{%- set WEBACCESS = salt['pillar.get']('master:url_base', '') -%} {%- set WEBACCESS = salt['pillar.get']('manager:url_base', '') -%}
{%- set KRATOSKEY = salt['pillar.get']('kratos:kratoskey', '') -%} {%- set KRATOSKEY = salt['pillar.get']('kratos:kratoskey', '') -%}
selfservice: selfservice:

View File

@@ -1,4 +1,4 @@
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%} {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%} {%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
{ {
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log", "logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
@@ -12,10 +12,10 @@
"jobDir": "jobs" "jobDir": "jobs"
}, },
"kratos": { "kratos": {
"hostUrl": "http://{{ MASTERIP }}:4434/" "hostUrl": "http://{{ MANAGERIP }}:4434/"
}, },
"elastic": { "elastic": {
"hostUrl": "http://{{ MASTERIP }}:9200", "hostUrl": "http://{{ MANAGERIP }}:9200",
"username": "", "username": "",
"password": "", "password": "",
"verifyCert": false "verifyCert": false

View File

@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %} {% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set MASTER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('manager') %}
socdir: socdir:
file.directory: file.directory:
@@ -33,7 +33,7 @@ socsync:
so-soc: so-soc:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-soc:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
- hostname: soc - hostname: soc
- name: so-soc - name: so-soc
- binds: - binds:
@@ -84,7 +84,7 @@ kratossync:
so-kratos: so-kratos:
docker_container.running: docker_container.running:
- image: {{ MASTER }}:5000/soshybridhunter/so-kratos:{{ VERSION }} - image: {{ MANAGER }}:5000/soshybridhunter/so-kratos:{{ VERSION }}
- hostname: kratos - hostname: kratos
- name: so-kratos - name: so-kratos
- binds: - binds:

View File

@@ -1,10 +1,10 @@
{%- set MASTER = salt['pillar.get']('master:url_base', '') %} {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %} {%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %} {%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
[es] [es]
es_url = http://{{MASTER}}:9200 es_url = http://{{MANAGER}}:9200
es_ip = {{MASTER}} es_ip = {{MANAGER}}
es_user = YOURESUSER es_user = YOURESUSER
es_pass = YOURESPASS es_pass = YOURESPASS
es_index_pattern = so-* es_index_pattern = so-*
@@ -12,7 +12,7 @@ es_verifycert = no
[cortex] [cortex]
auto_analyze_alerts = no auto_analyze_alerts = no
cortex_url = https://{{MASTER}}/cortex/ cortex_url = https://{{MANAGER}}/cortex/
cortex_key = {{ CORTEXKEY }} cortex_key = {{ CORTEXKEY }}
supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
@@ -33,7 +33,7 @@ grr_user = YOURGRRUSER
grr_pass = YOURGRRPASS grr_pass = YOURGRRPASS
[hive] [hive]
hive_url = https://{{MASTER}}/thehive/ hive_url = https://{{MANAGER}}/thehive/
hive_key = {{ HIVEKEY }} hive_key = {{ HIVEKEY }}
hive_tlp = 3 hive_tlp = 3
hive_verifycert = no hive_verifycert = no
@@ -60,7 +60,7 @@ slack_url = YOURSLACKWORKSPACE
slack_webhook = YOURSLACKWEBHOOK slack_webhook = YOURSLACKWEBHOOK
[playbook] [playbook]
playbook_url = http://{{MASTER}}:3200/playbook playbook_url = http://{{MANAGER}}:3200/playbook
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
playbook_verifycert = no playbook_verifycert = no
playbook_unit_test_index = playbook-testing playbook_unit_test_index = playbook-testing

View File

@@ -1,4 +1,4 @@
{% set ES = salt['pillar.get']('static:masterip', '') %} {% set ES = salt['pillar.get']('static:managerip', '') %}
alert: modules.so.playbook-es.PlaybookESAlerter alert: modules.so.playbook-es.PlaybookESAlerter
elasticsearch_host: "{{ ES }}:9200" elasticsearch_host: "{{ ES }}:9200"

View File

@@ -1,5 +1,5 @@
{% set es = salt['pillar.get']('static:masterip', '') %} {% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %} {% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %} {% set hivekey = salt['pillar.get']('static:hivekey', '') %}
alert: hivealerter alert: hivealerter

View File

@@ -1,5 +1,5 @@
{% set es = salt['pillar.get']('static:masterip', '') %} {% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %} {% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %} {% set hivekey = salt['pillar.get']('static:hivekey', '') %}
alert: hivealerter alert: hivealerter

Some files were not shown because too many files have changed in this diff Show More