diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml
index 6dff82823..5d9b662b6 100644
--- a/files/firewall/assigned_hostgroups.local.map.yaml
+++ b/files/firewall/assigned_hostgroups.local.map.yaml
@@ -13,8 +13,8 @@ role:
fleet:
heavynode:
helixsensor:
- master:
- mastersearch:
+ manager:
+ managersearch:
standalone:
searchnode:
sensor:
\ No newline at end of file
diff --git a/files/firewall/hostgroups.local.yaml b/files/firewall/hostgroups.local.yaml
index 6426ae207..794105627 100644
--- a/files/firewall/hostgroups.local.yaml
+++ b/files/firewall/hostgroups.local.yaml
@@ -24,7 +24,7 @@ firewall:
ips:
delete:
insert:
- master:
+ manager:
ips:
delete:
insert:
diff --git a/pillar/docker/config.sls b/pillar/docker/config.sls
index f8426b8cb..dd73f3aa9 100644
--- a/pillar/docker/config.sls
+++ b/pillar/docker/config.sls
@@ -1,12 +1,12 @@
-{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
+{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
-{% set WAZUH = salt['pillar.get']('master:wazuh', '0') %}
-{% set THEHIVE = salt['pillar.get']('master:thehive', '0') %}
-{% set PLAYBOOK = salt['pillar.get']('master:playbook', '0') %}
-{% set FREQSERVER = salt['pillar.get']('master:freq', '0') %}
-{% set DOMAINSTATS = salt['pillar.get']('master:domainstats', '0') %}
+{% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %}
+{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
+{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
+{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
+{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
{% set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
-{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
+{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
eval:
containers:
@@ -20,7 +20,7 @@ eval:
- so-soc
- so-kratos
- so-idstools
- {% if FLEETMASTER %}
+ {% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
@@ -83,7 +83,7 @@ hot_node:
- so-logstash
- so-elasticsearch
- so-curator
-master_search:
+manager_search:
containers:
- so-nginx
- so-telegraf
@@ -99,7 +99,7 @@ master_search:
- so-elastalert
- so-filebeat
- so-soctopus
- {% if FLEETMASTER %}
+ {% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
@@ -122,7 +122,7 @@ master_search:
{% if DOMAINSTATS != '0' %}
- so-domainstats
{% endif %}
-master:
+manager:
containers:
- so-dockerregistry
- so-nginx
@@ -141,7 +141,7 @@ master:
- so-kibana
- so-elastalert
- so-filebeat
- {% if FLEETMASTER %}
+ {% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
diff --git a/pillar/firewall/ports.sls b/pillar/firewall/ports.sls
index 29f711c13..4f7c06bec 100644
--- a/pillar/firewall/ports.sls
+++ b/pillar/firewall/ports.sls
@@ -17,7 +17,7 @@ firewall:
- 5644
- 9822
udp:
- master:
+ manager:
ports:
tcp:
- 1514
diff --git a/pillar/logstash/master.sls b/pillar/logstash/manager.sls
similarity index 92%
rename from pillar/logstash/master.sls
rename to pillar/logstash/manager.sls
index 1ff41b43c..9c16d2625 100644
--- a/pillar/logstash/master.sls
+++ b/pillar/logstash/manager.sls
@@ -1,6 +1,6 @@
logstash:
pipelines:
- master:
+ manager:
config:
- so/0009_input_beats.conf
- so/0010_input_hhbeats.conf
diff --git a/pillar/top.sls b/pillar/top.sls
index a691cf028..6eba800a9 100644
--- a/pillar/top.sls
+++ b/pillar/top.sls
@@ -6,10 +6,10 @@ base:
- match: compound
- zeek
- '*_mastersearch or *_heavynode':
+ '*_managersearch or *_heavynode':
- match: compound
- logstash
- - logstash.master
+ - logstash.manager
- logstash.search
'*_sensor':
@@ -18,16 +18,16 @@ base:
- healthcheck.sensor
- minions.{{ grains.id }}
- '*_master or *_mastersearch':
+ '*_manager or *_managersearch':
- match: compound
- static
- data.*
- secrets
- minions.{{ grains.id }}
- '*_master':
+ '*_manager':
- logstash
- - logstash.master
+ - logstash.manager
'*_eval':
- static
@@ -39,7 +39,7 @@ base:
'*_standalone':
- logstash
- - logstash.master
+ - logstash.manager
- logstash.search
- data.*
- brologs
diff --git a/salt/_modules/telegraf.py b/salt/_modules/telegraf.py
index 6fa33f89a..aa98af039 100644
--- a/salt/_modules/telegraf.py
+++ b/salt/_modules/telegraf.py
@@ -6,7 +6,7 @@ import socket
def send(data):
- mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('master:mainint'))
+ mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('manager:mainint'))
mainip = __salt__['grains.get']('ip_interfaces').get(mainint)[0]
dstport = 8094
diff --git a/salt/ca/init.sls b/salt/ca/init.sls
index 60d7adb3a..0def5677e 100644
--- a/salt/ca/init.sls
+++ b/salt/ca/init.sls
@@ -1,4 +1,4 @@
-{% set master = salt['grains.get']('master') %}
+{% set manager = salt['grains.get']('manager') %}
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
@@ -20,7 +20,7 @@ pki_private_key:
/etc/pki/ca.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key
- - CN: {{ master }}
+ - CN: {{ manager }}
- C: US
- ST: Utah
- L: Salt Lake City
diff --git a/salt/common/maps/fleet_master.map.jinja b/salt/common/maps/fleet_manager.map.jinja
similarity index 100%
rename from salt/common/maps/fleet_master.map.jinja
rename to salt/common/maps/fleet_manager.map.jinja
diff --git a/salt/common/maps/master.map.jinja b/salt/common/maps/manager.map.jinja
similarity index 100%
rename from salt/common/maps/master.map.jinja
rename to salt/common/maps/manager.map.jinja
diff --git a/salt/common/maps/mastersearch.map.jinja b/salt/common/maps/managersearch.map.jinja
similarity index 100%
rename from salt/common/maps/mastersearch.map.jinja
rename to salt/common/maps/managersearch.map.jinja
diff --git a/salt/common/maps/so-status.map.jinja b/salt/common/maps/so-status.map.jinja
index f67f4bcd6..f30291f90 100644
--- a/salt/common/maps/so-status.map.jinja
+++ b/salt/common/maps/so-status.map.jinja
@@ -18,14 +18,14 @@
}
},grain='id', merge=salt['pillar.get']('docker')) %}
-{% if role in ['eval', 'mastersearch', 'master', 'standalone'] %}
- {{ append_containers('master', 'grafana', 0) }}
- {{ append_containers('static', 'fleet_master', 0) }}
- {{ append_containers('master', 'wazuh', 0) }}
- {{ append_containers('master', 'thehive', 0) }}
- {{ append_containers('master', 'playbook', 0) }}
- {{ append_containers('master', 'freq', 0) }}
- {{ append_containers('master', 'domainstats', 0) }}
+{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
+ {{ append_containers('manager', 'grafana', 0) }}
+ {{ append_containers('static', 'fleet_manager', 0) }}
+ {{ append_containers('manager', 'wazuh', 0) }}
+ {{ append_containers('manager', 'thehive', 0) }}
+ {{ append_containers('manager', 'playbook', 0) }}
+ {{ append_containers('manager', 'freq', 0) }}
+ {{ append_containers('manager', 'domainstats', 0) }}
{% endif %}
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
@@ -37,7 +37,7 @@
{% endif %}
{% if role == 'searchnode' %}
- {{ append_containers('master', 'wazuh', 0) }}
+ {{ append_containers('manager', 'wazuh', 0) }}
{% endif %}
{% if role == 'sensor' %}
diff --git a/salt/common/tools/sbin/so-bro-logs b/salt/common/tools/sbin/so-bro-logs
index 173d23029..4f55eb7f4 100755
--- a/salt/common/tools/sbin/so-bro-logs
+++ b/salt/common/tools/sbin/so-bro-logs
@@ -11,7 +11,7 @@ bro_logs_enabled() {
}
-whiptail_master_adv_service_brologs() {
+whiptail_manager_adv_service_brologs() {
BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
"conn" "Connection Logging" ON \
@@ -54,5 +54,5 @@ whiptail_master_adv_service_brologs() {
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
}
-whiptail_master_adv_service_brologs
+whiptail_manager_adv_service_brologs
bro_logs_enabled
diff --git a/salt/common/tools/sbin/so-docker-refresh b/salt/common/tools/sbin/so-docker-refresh
index d4796818d..bd9993570 100644
--- a/salt/common/tools/sbin/so-docker-refresh
+++ b/salt/common/tools/sbin/so-docker-refresh
@@ -21,13 +21,13 @@ got_root(){
fi
}
-master_check() {
- # Check to see if this is a master
- MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
- if [ $MASTERCHECK == 'so-eval' ] || [ $MASTERCHECK == 'so-master' ] || [ $MASTERCHECK == 'so-mastersearch' ] || [ $MASTERCHECK == 'so-standalone' ] || [ $MASTERCHECK == 'so-helix' ]; then
- echo "This is a master. We can proceed"
+manager_check() {
+ # Check to see if this is a manager
+ MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
+ if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ]; then
+ echo "This is a manager. We can proceed"
else
- echo "Please run soup on the master. The master controls all updates."
+ echo "Please run soup on the manager. The manager controls all updates."
exit 1
fi
}
@@ -56,13 +56,13 @@ version_check() {
fi
}
got_root
-master_check
+manager_check
version_check
# Use the hostname
HOSTNAME=$(hostname)
# List all the containers
-if [ $MASTERCHECK != 'so-helix' ]; then
+if [ $MANAGERCHECK != 'so-helix' ]; then
TRUSTED_CONTAINERS=( \
"so-acng:$VERSION" \
"so-thehive-cortex:$VERSION" \
diff --git a/salt/common/tools/sbin/so-elastic-clear b/salt/common/tools/sbin/so-elastic-clear
index f7030bc13..04c153f85 100755
--- a/salt/common/tools/sbin/so-elastic-clear
+++ b/salt/common/tools/sbin/so-elastic-clear
@@ -14,7 +14,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
. /usr/sbin/so-common
SKIP=0
@@ -50,7 +50,7 @@ done
if [ $SKIP -ne 1 ]; then
# List indices
echo
- curl {{ MASTERIP }}:9200/_cat/indices?v
+ curl {{ MANAGERIP }}:9200/_cat/indices?v
echo
# Inform user we are about to delete all data
echo
@@ -89,10 +89,10 @@ fi
# Delete data
echo "Deleting data..."
-INDXS=$(curl -s -XGET {{ MASTERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
+INDXS=$(curl -s -XGET {{ MANAGERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
for INDX in ${INDXS}
do
- curl -XDELETE "{{ MASTERIP }}:9200/${INDX}" > /dev/null 2>&1
+ curl -XDELETE "{{ MANAGERIP }}:9200/${INDX}" > /dev/null 2>&1
done
#Start Logstash/Filebeat
diff --git a/salt/common/tools/sbin/so-elastic-download b/salt/common/tools/sbin/so-elastic-download
index 4c3406c74..b52d88c45 100755
--- a/salt/common/tools/sbin/so-elastic-download
+++ b/salt/common/tools/sbin/so-elastic-download
@@ -1,5 +1,5 @@
#!/bin/bash
-MASTER=MASTER
+MANAGER=MANAGER
VERSION="HH1.1.4"
TRUSTED_CONTAINERS=( \
"so-nginx:$VERSION" \
@@ -37,7 +37,7 @@ do
echo "Downloading $i"
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination
- docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
- docker push $MASTER:5000/soshybridhunter/$i
+ docker tag soshybridhunter/$i $MANAGER:5000/soshybridhunter/$i
+ docker push $MANAGER:5000/soshybridhunter/$i
docker rmi soshybridhunter/$i
done
diff --git a/salt/common/tools/sbin/so-elasticsearch-indices-rw b/salt/common/tools/sbin/so-elasticsearch-indices-rw
index d49fd5f1b..6e9eebe47 100644
--- a/salt/common/tools/sbin/so-elasticsearch-indices-rw
+++ b/salt/common/tools/sbin/so-elasticsearch-indices-rw
@@ -15,7 +15,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
+IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
ESPORT=9200
THEHIVEESPORT=9400
diff --git a/salt/common/tools/sbin/so-elasticsearch-templates b/salt/common/tools/sbin/so-elasticsearch-templates
index 829e2a68d..6b3e19d30 100755
--- a/salt/common/tools/sbin/so-elasticsearch-templates
+++ b/salt/common/tools/sbin/so-elasticsearch-templates
@@ -1,4 +1,4 @@
-{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
+{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
#
@@ -16,7 +16,7 @@
# along with this program. If not, see .
default_salt_dir=/opt/so/saltstack/default
-ELASTICSEARCH_HOST="{{ MASTERIP}}"
+ELASTICSEARCH_HOST="{{ MANAGERIP}}"
ELASTICSEARCH_PORT=9200
#ELASTICSEARCH_AUTH=""
diff --git a/salt/common/tools/sbin/so-import-pcap b/salt/common/tools/sbin/so-import-pcap
index 9e54820e0..1d1fd5f70 100755
--- a/salt/common/tools/sbin/so-import-pcap
+++ b/salt/common/tools/sbin/so-import-pcap
@@ -15,9 +15,9 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set VERSION = salt['pillar.get']('static:soversion') %}
-{%- set MASTERIP = salt['pillar.get']('static:masterip') -%}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip') -%}
function usage {
cat << EOF
@@ -30,13 +30,13 @@ EOF
function pcapinfo() {
PCAP=$1
ARGS=$2
- docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MASTER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
+ docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MANAGER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
}
function pcapfix() {
PCAP=$1
PCAP_OUT=$2
- docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MASTER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
+ docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
}
function suricata() {
@@ -57,7 +57,7 @@ function suricata() {
-v ${NSM_PATH}/:/nsm/:rw \
-v $PCAP:/input.pcap:ro \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
- {{ MASTER }}:5000/soshybridhunter/so-suricata:{{ VERSION }} \
+ {{ MANAGER }}:5000/soshybridhunter/so-suricata:{{ VERSION }} \
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
}
@@ -85,7 +85,7 @@ function zeek() {
-v /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro \
--entrypoint /opt/zeek/bin/zeek \
-w /nsm/zeek/logs \
- {{ MASTER }}:5000/soshybridhunter/so-zeek:{{ VERSION }} \
+ {{ MANAGER }}:5000/soshybridhunter/so-zeek:{{ VERSION }} \
-C -r /input.pcap local > $NSM_PATH/logs/console.log 2>&1
}
@@ -212,7 +212,7 @@ cat << EOF
Import complete!
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
-https://{{ MASTERIP }}/kibana/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:'${START_OLDEST}T00:00:00.000Z',mode:absolute,to:'${END_NEWEST}T00:00:00.000Z'))
+https://{{ MANAGERIP }}/kibana/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:'${START_OLDEST}T00:00:00.000Z',mode:absolute,to:'${END_NEWEST}T00:00:00.000Z'))
or you can manually set your Time Range to be:
From: $START_OLDEST To: $END_NEWEST
diff --git a/salt/common/tools/sbin/so-kibana-config-export b/salt/common/tools/sbin/so-kibana-config-export
index f64e12a0e..8ee3f59b5 100755
--- a/salt/common/tools/sbin/so-kibana-config-export
+++ b/salt/common/tools/sbin/so-kibana-config-export
@@ -1,9 +1,9 @@
#!/bin/bash
#
-# {%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master', False) -%}
+# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
# {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %}
-# {%- set MASTER = salt['pillar.get']('master:url_base', '') %}
+# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
@@ -20,7 +20,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-KIBANA_HOST={{ MASTER }}
+KIBANA_HOST={{ MANAGER }}
KSO_PORT=5601
OUTFILE="saved_objects.ndjson"
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
@@ -29,7 +29,7 @@ curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_H
sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE
# Clean up for Fleet, if applicable
-# {% if FLEET_NODE or FLEET_MASTER %}
+# {% if FLEET_NODE or FLEET_MANAGER %}
# Fleet IP
-sed -i "s/{{ MASTER }}/FLEETPLACEHOLDER/g" $OUTFILE
+sed -i "s/{{ MANAGER }}/FLEETPLACEHOLDER/g" $OUTFILE
# {% endif %}
diff --git a/salt/curator/files/action/close.yml b/salt/curator/files/action/close.yml
index d0bd1d5d1..fdbe9d450 100644
--- a/salt/curator/files/action/close.yml
+++ b/salt/curator/files/action/close.yml
@@ -1,7 +1,7 @@
{%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
{%- set cur_close_days = salt['pillar.get']('elasticsearch:cur_close_days', '') -%}
-{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set cur_close_days = salt['pillar.get']('master:cur_close_days', '') -%}
+{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set cur_close_days = salt['pillar.get']('manager:cur_close_days', '') -%}
{%- endif -%}
---
diff --git a/salt/curator/files/action/delete.yml b/salt/curator/files/action/delete.yml
index f24f0b781..5d7f26861 100644
--- a/salt/curator/files/action/delete.yml
+++ b/salt/curator/files/action/delete.yml
@@ -1,7 +1,7 @@
{%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
{%- set log_size_limit = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
-{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set log_size_limit = salt['pillar.get']('master:log_size_limit', '') -%}
+{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set log_size_limit = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif %}
---
# Remember, leave a key empty if there is no value. None will be a string,
diff --git a/salt/curator/files/bin/so-curator-closed-delete-delete b/salt/curator/files/bin/so-curator-closed-delete-delete
index 4a6458394..0d894db2f 100755
--- a/salt/curator/files/bin/so-curator-closed-delete-delete
+++ b/salt/curator/files/bin/so-curator-closed-delete-delete
@@ -5,10 +5,10 @@
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
-{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set ELASTICSEARCH_HOST = salt['pillar.get']('master:mainip', '') -%}
- {%- set ELASTICSEARCH_PORT = salt['pillar.get']('master:es_port', '') -%}
- {%- set LOG_SIZE_LIMIT = salt['pillar.get']('master:log_size_limit', '') -%}
+{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
+ {%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
+ {%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif -%}
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
diff --git a/salt/curator/files/curator.yml b/salt/curator/files/curator.yml
index e9b8a63ba..3b019923e 100644
--- a/salt/curator/files/curator.yml
+++ b/salt/curator/files/curator.yml
@@ -1,7 +1,7 @@
{% if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set elasticsearch = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{% elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set elasticsearch = salt['pillar.get']('master:mainip', '') -%}
+{% elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set elasticsearch = salt['pillar.get']('manager:mainip', '') -%}
{%- endif %}
---
diff --git a/salt/curator/init.sls b/salt/curator/init.sls
index 8d3147242..e43f7e91b 100644
--- a/salt/curator/init.sls
+++ b/salt/curator/init.sls
@@ -1,6 +1,6 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{% if grains['role'] in ['so-eval', 'so-node', 'so-mastersearch', 'so-heavynode', 'so-standalone'] %}
+{% set MANAGER = salt['grains.get']('manager') %}
+{% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %}
# Curator
# Create the group
curatorgroup:
@@ -119,7 +119,7 @@ so-curatordeletecron:
so-curator:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-curator:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-curator:{{ VERSION }}
- hostname: curator
- name: so-curator
- user: curator
diff --git a/salt/deprecated-bro/files/local.bro b/salt/deprecated-bro/files/local.bro
index afe4b94ca..30b216548 100644
--- a/salt/deprecated-bro/files/local.bro
+++ b/salt/deprecated-bro/files/local.bro
@@ -127,11 +127,11 @@
@load policy/hassh
# You can load your own intel into:
-# /opt/so/saltstack/bro/policy/intel/ on the master
+# /opt/so/saltstack/bro/policy/intel/ on the manager
@load intel
# Load a custom Bro policy
-# /opt/so/saltstack/bro/policy/custom/ on the master
+# /opt/so/saltstack/bro/policy/custom/ on the manager
#@load custom/somebropolicy.bro
# Write logs in JSON
diff --git a/salt/deprecated-bro/files/local.bro.community b/salt/deprecated-bro/files/local.bro.community
index 2ae12d7f2..76b18587f 100644
--- a/salt/deprecated-bro/files/local.bro.community
+++ b/salt/deprecated-bro/files/local.bro.community
@@ -121,11 +121,11 @@
@load policy/ja3
# You can load your own intel into:
-# /opt/so/saltstack/bro/policy/intel/ on the master
+# /opt/so/saltstack/bro/policy/intel/ on the manager
@load intel
# Load a custom Bro policy
-# /opt/so/saltstack/bro/policy/custom/ on the master
+# /opt/so/saltstack/bro/policy/custom/ on the manager
#@load custom/somebropolicy.bro
# Use JSON
diff --git a/salt/elastalert/files/elastalert_config.yaml b/salt/elastalert/files/elastalert_config.yaml
index 2ecf08ffa..de3db8b0a 100644
--- a/salt/elastalert/files/elastalert_config.yaml
+++ b/salt/elastalert/files/elastalert_config.yaml
@@ -1,5 +1,5 @@
-{% set esip = salt['pillar.get']('master:mainip', '') %}
-{% set esport = salt['pillar.get']('master:es_port', '') %}
+{% set esip = salt['pillar.get']('manager:mainip', '') %}
+{% set esport = salt['pillar.get']('manager:es_port', '') %}
# This is the folder that contains the rule yaml files
# Any .yaml file will be loaded as a rule
rules_folder: /opt/elastalert/rules/
diff --git a/salt/elastalert/files/rules/so/suricata_thehive.yaml b/salt/elastalert/files/rules/so/suricata_thehive.yaml
index 82698b7a8..cd887c9f9 100644
--- a/salt/elastalert/files/rules/so/suricata_thehive.yaml
+++ b/salt/elastalert/files/rules/so/suricata_thehive.yaml
@@ -1,7 +1,7 @@
-{% set es = salt['pillar.get']('static:masterip', '') %}
-{% set hivehost = salt['pillar.get']('static:masterip', '') %}
+{% set es = salt['pillar.get']('static:managerip', '') %}
+{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
-{% set MASTER = salt['pillar.get']('master:url_base', '') %}
+{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance.
#
@@ -39,7 +39,7 @@ hive_alert_config:
title: '{match[rule][name]}'
type: 'NIDS'
source: 'SecurityOnion'
- description: "`SOC Hunt Pivot:` \n\n \n\n `Kibana Dashboard Pivot:` \n\n \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
+ description: "`SOC Hunt Pivot:` \n\n \n\n `Kibana Dashboard Pivot:` \n\n \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
severity: 2
tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}']
tlp: 3
diff --git a/salt/elastalert/files/rules/so/wazuh_thehive.yaml b/salt/elastalert/files/rules/so/wazuh_thehive.yaml
index 1e275dce8..ccb79e1e5 100644
--- a/salt/elastalert/files/rules/so/wazuh_thehive.yaml
+++ b/salt/elastalert/files/rules/so/wazuh_thehive.yaml
@@ -1,7 +1,7 @@
-{% set es = salt['pillar.get']('static:masterip', '') %}
-{% set hivehost = salt['pillar.get']('static:masterip', '') %}
+{% set es = salt['pillar.get']('static:managerip', '') %}
+{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
-{% set MASTER = salt['pillar.get']('master:url_base', '') %}
+{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance.
#
@@ -38,7 +38,7 @@ hive_alert_config:
title: '{match[rule][name]}'
type: 'wazuh'
source: 'SecurityOnion'
- description: "`SOC Hunt Pivot:` \n\n \n\n `Kibana Dashboard Pivot:` \n\n "
+ description: "`SOC Hunt Pivot:` \n\n \n\n `Kibana Dashboard Pivot:` \n\n "
severity: 2
tags: ['{match[rule][id]}','{match[host][name]}']
tlp: 3
diff --git a/salt/elastalert/init.sls b/salt/elastalert/init.sls
index b79acf77f..37c0e903e 100644
--- a/salt/elastalert/init.sls
+++ b/salt/elastalert/init.sls
@@ -13,12 +13,12 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
-{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %}
- {% set esalert = salt['pillar.get']('master:elastalert', '1') %}
- {% set esip = salt['pillar.get']('master:mainip', '') %}
- {% set esport = salt['pillar.get']('master:es_port', '') %}
+{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
+ {% set esalert = salt['pillar.get']('manager:elastalert', '1') %}
+ {% set esip = salt['pillar.get']('manager:mainip', '') %}
+ {% set esport = salt['pillar.get']('manager:es_port', '') %}
{% elif grains['role'] == 'so-node' %}
{% set esalert = salt['pillar.get']('elasticsearch:elastalert', '0') %}
{% endif %}
@@ -101,7 +101,7 @@ elastaconf:
so-elastalert:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-elastalert:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-elastalert:{{ VERSION }}
- hostname: elastalert
- name: so-elastalert
- user: elastalert
diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml
index 02dd42aa5..d5acdbcdb 100644
--- a/salt/elasticsearch/files/elasticsearch.yml
+++ b/salt/elasticsearch/files/elasticsearch.yml
@@ -1,5 +1,5 @@
-{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
-{%- set esclustername = salt['pillar.get']('master:esclustername', '') %}
+{% if grains['role'] == 'so-manager' or grains['role'] == 'so-eval' %}
+{%- set esclustername = salt['pillar.get']('manager:esclustername', '') %}
cluster.name: "{{ esclustername }}"
network.host: 0.0.0.0
diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls
index 7292c055e..be0752665 100644
--- a/salt/elasticsearch/init.sls
+++ b/salt/elasticsearch/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
@@ -22,9 +22,9 @@
{% set FEATURES = '' %}
{% endif %}
-{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %}
- {% set esclustername = salt['pillar.get']('master:esclustername', '') %}
- {% set esheap = salt['pillar.get']('master:esheap', '') %}
+{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
+ {% set esclustername = salt['pillar.get']('manager:esclustername', '') %}
+ {% set esheap = salt['pillar.get']('manager:esheap', '') %}
{% elif grains['role'] in ['so-node','so-heavynode'] %}
{% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %}
{% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %}
@@ -101,7 +101,7 @@ eslogdir:
so-elasticsearch:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-elasticsearch:{{ VERSION }}{{ FEATURES }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-elasticsearch:{{ VERSION }}{{ FEATURES }}
- hostname: elasticsearch
- name: so-elasticsearch
- user: elasticsearch
@@ -141,7 +141,7 @@ so-elasticsearch-pipelines:
- file: esyml
- file: so-elasticsearch-pipelines-file
-{% if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
+{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
so-elasticsearch-templates:
cmd.run:
- name: /usr/sbin/so-elasticsearch-templates
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index 6d33c1bdf..37f0ec9f4 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -1,7 +1,7 @@
{%- if grains.role == 'so-heavynode' %}
-{%- set MASTER = salt['pillar.get']('sensor:mainip' '') %}
+{%- set MANAGER = salt['pillar.get']('sensor:mainip' '') %}
{%- else %}
-{%- set MASTER = grains['master'] %}
+{%- set MANAGER = grains['manager'] %}
{%- endif %}
@@ -9,7 +9,7 @@
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
{%- set WAZUHENABLED = salt['pillar.get']('static:wazuh', '0') %}
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
-{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
+{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
name: {{ HOSTNAME }}
@@ -214,7 +214,7 @@ filebeat.inputs:
{%- endif %}
-{%- if FLEETMASTER or FLEETNODE %}
+{%- if FLEETMANAGER or FLEETNODE %}
- type: log
paths:
@@ -252,7 +252,7 @@ output.{{ type }}:
{%- if grains['role'] == "so-eval" %}
output.elasticsearch:
enabled: true
- hosts: ["{{ MASTER }}:9200"]
+ hosts: ["{{ MANAGER }}:9200"]
pipelines:
- pipeline: "%{[module]}.%{[dataset]}"
indices:
@@ -280,7 +280,7 @@ output.logstash:
enabled: true
# The Logstash hosts
- hosts: ["{{ MASTER }}:5644"]
+ hosts: ["{{ MANAGER }}:5644"]
# Number of workers per Logstash host.
#worker: 1
diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls
index 6fc06f582..68b488569 100644
--- a/salt/filebeat/init.sls
+++ b/salt/filebeat/init.sls
@@ -12,8 +12,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{% set MANAGER = salt['grains.get']('manager') %}
+{% set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
{% set FEATURES = "-features" %}
@@ -51,10 +51,10 @@ filebeatconfsync:
OUTPUT: {{ salt['pillar.get']('filebeat:config:output', {}) }}
so-filebeat:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-filebeat:{{ VERSION }}{{ FEATURES }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-filebeat:{{ VERSION }}{{ FEATURES }}
- hostname: so-filebeat
- user: root
- - extra_hosts: {{ MASTER }}:{{ MASTERIP }}
+ - extra_hosts: {{ MANAGER }}:{{ MANAGERIP }}
- binds:
- /nsm:/nsm:ro
- /opt/so/log/filebeat:/usr/share/filebeat/logs:rw
diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml
index 07f7d1650..2500c604a 100644
--- a/salt/firewall/assigned_hostgroups.map.yaml
+++ b/salt/firewall/assigned_hostgroups.map.yaml
@@ -6,7 +6,7 @@ role:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -85,12 +85,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
- master:
+ - {{ portgroups.salt_manager }}
+ manager:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -166,12 +166,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
- mastersearch:
+ - {{ portgroups.salt_manager }}
+ managersearch:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -247,12 +247,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
+ - {{ portgroups.salt_manager }}
standalone:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -328,12 +328,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
+ - {{ portgroups.salt_manager }}
helixsensor:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.playbook }}
@@ -391,12 +391,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
+ - {{ portgroups.salt_manager }}
searchnode:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.elasticsearch_node }}
dockernet:
diff --git a/salt/firewall/hostgroups.yaml b/salt/firewall/hostgroups.yaml
index bd303001b..5ff6b900b 100644
--- a/salt/firewall/hostgroups.yaml
+++ b/salt/firewall/hostgroups.yaml
@@ -19,4 +19,4 @@ firewall:
ips:
delete:
insert:
- - {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
\ No newline at end of file
+ - {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
\ No newline at end of file
diff --git a/salt/firewall/portgroups.yaml b/salt/firewall/portgroups.yaml
index 7f09d1139..b8d86f253 100644
--- a/salt/firewall/portgroups.yaml
+++ b/salt/firewall/portgroups.yaml
@@ -61,7 +61,7 @@ firewall:
redis:
tcp:
- 6379
- salt_master:
+ salt_manager:
tcp:
- 4505
- 4506
diff --git a/salt/fleet/event_gen-packages.sls b/salt/fleet/event_gen-packages.sls
index e353eaf92..7c0ec9091 100644
--- a/salt/fleet/event_gen-packages.sls
+++ b/salt/fleet/event_gen-packages.sls
@@ -1,4 +1,4 @@
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set ENROLLSECRET = salt['pillar.get']('secrets:fleet_enroll-secret') %}
{% set CURRENTPACKAGEVERSION = salt['pillar.get']('static:fleet_packages-version') %}
{% set VERSION = salt['pillar.get']('static:soversion') %}
@@ -19,6 +19,6 @@ so/fleet:
mainip: {{ grains.host }}
enroll-secret: {{ ENROLLSECRET }}
current-package-version: {{ CURRENTPACKAGEVERSION }}
- master: {{ MASTER }}
+ manager: {{ MANAGER }}
version: {{ VERSION }}
\ No newline at end of file
diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls
index 65f32e213..d597f44d8 100644
--- a/salt/fleet/init.sls
+++ b/salt/fleet/init.sls
@@ -2,14 +2,14 @@
{%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%}
{%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set FLEETARCH = salt['grains.get']('role') %}
{% if FLEETARCH == "so-fleet" %}
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% else %}
- {% set MAINIP = salt['pillar.get']('static:masterip') %}
+ {% set MAINIP = salt['pillar.get']('static:managerip') %}
{% endif %}
include:
@@ -105,7 +105,7 @@ fleet_password_none:
so-fleet:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-fleet:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-fleet:{{ VERSION }}
- hostname: so-fleet
- port_bindings:
- 0.0.0.0:8080:8080
diff --git a/salt/fleet/install_package.sls b/salt/fleet/install_package.sls
index 3787d6111..d09de540c 100644
--- a/salt/fleet/install_package.sls
+++ b/salt/fleet/install_package.sls
@@ -1,4 +1,4 @@
-{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
+{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%}
{%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%}
diff --git a/salt/grafana/dashboards/master/master.json b/salt/grafana/dashboards/manager/manager.json
similarity index 100%
rename from salt/grafana/dashboards/master/master.json
rename to salt/grafana/dashboards/manager/manager.json
diff --git a/salt/grafana/dashboards/mastersearch/mastersearch.json b/salt/grafana/dashboards/managersearch/managersearch.json
similarity index 100%
rename from salt/grafana/dashboards/mastersearch/mastersearch.json
rename to salt/grafana/dashboards/managersearch/managersearch.json
diff --git a/salt/grafana/etc/dashboards/dashboard.yml b/salt/grafana/etc/dashboards/dashboard.yml
index 9ae71e6a4..e08484044 100644
--- a/salt/grafana/etc/dashboards/dashboard.yml
+++ b/salt/grafana/etc/dashboards/dashboard.yml
@@ -9,14 +9,14 @@ providers:
disableDeletion: false
editable: true
options:
- path: /etc/grafana/grafana_dashboards/master
+ path: /etc/grafana/grafana_dashboards/manager
- name: 'Master Search'
folder: 'Master Search'
type: file
disableDeletion: false
editable: true
options:
- path: /etc/grafana/grafana_dashboards/mastersearch
+ path: /etc/grafana/grafana_dashboards/managersearch
- name: 'Sensor Nodes'
folder: 'Sensor Nodes'
type: file
diff --git a/salt/grafana/etc/datasources/influxdb.yaml b/salt/grafana/etc/datasources/influxdb.yaml
index c9f98dc57..c70fd7137 100644
--- a/salt/grafana/etc/datasources/influxdb.yaml
+++ b/salt/grafana/etc/datasources/influxdb.yaml
@@ -1,4 +1,4 @@
-{%- set MASTER = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
apiVersion: 1
deleteDatasources:
@@ -10,7 +10,7 @@ datasources:
type: influxdb
access: proxy
database: telegraf
- url: https://{{ MASTER }}:8086
+ url: https://{{ MANAGER }}:8086
jsonData:
tlsAuth: false
tlsAuthWithCACert: false
diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls
index d3c457944..c47381c26 100644
--- a/salt/grafana/init.sls
+++ b/salt/grafana/init.sls
@@ -1,8 +1,8 @@
-{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
+{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
# Grafana all the things
grafanadir:
@@ -28,14 +28,14 @@ grafanadashdir:
grafanadashmdir:
file.directory:
- - name: /opt/so/conf/grafana/grafana_dashboards/master
+ - name: /opt/so/conf/grafana/grafana_dashboards/manager
- user: 939
- group: 939
- makedirs: True
grafanadashmsdir:
file.directory:
- - name: /opt/so/conf/grafana/grafana_dashboards/mastersearch
+ - name: /opt/so/conf/grafana/grafana_dashboards/managersearch
- user: 939
- group: 939
- makedirs: True
@@ -76,17 +76,17 @@ grafanaconf:
- template: jinja
- source: salt://grafana/etc
-{% if salt['pillar.get']('mastertab', False) %}
-{% for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %}
+{% if salt['pillar.get']('managertab', False) %}
+{% for SN, SNDATA in salt['pillar.get']('managertab', {}).items() %}
{% set NODETYPE = SN.split('_')|last %}
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
-dashboard-master:
+dashboard-manager:
file.managed:
- - name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json
+ - name: /opt/so/conf/grafana/grafana_dashboards/manager/{{ SN }}-Master.json
- user: 939
- group: 939
- template: jinja
- - source: salt://grafana/dashboards/master/master.json
+ - source: salt://grafana/dashboards/manager/manager.json
- defaults:
SERVERNAME: {{ SN }}
MANINT: {{ SNDATA.manint }}
@@ -99,17 +99,17 @@ dashboard-master:
{% endfor %}
{% endif %}
-{% if salt['pillar.get']('mastersearchtab', False) %}
-{% for SN, SNDATA in salt['pillar.get']('mastersearchtab', {}).items() %}
+{% if salt['pillar.get']('managersearchtab', False) %}
+{% for SN, SNDATA in salt['pillar.get']('managersearchtab', {}).items() %}
{% set NODETYPE = SN.split('_')|last %}
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
-dashboard-mastersearch:
+dashboard-managersearch:
file.managed:
- - name: /opt/so/conf/grafana/grafana_dashboards/mastersearch/{{ SN }}-MasterSearch.json
+ - name: /opt/so/conf/grafana/grafana_dashboards/managersearch/{{ SN }}-MasterSearch.json
- user: 939
- group: 939
- template: jinja
- - source: salt://grafana/dashboards/mastersearch/mastersearch.json
+ - source: salt://grafana/dashboards/managersearch/managersearch.json
- defaults:
SERVERNAME: {{ SN }}
MANINT: {{ SNDATA.manint }}
@@ -216,7 +216,7 @@ dashboard-{{ SN }}:
so-grafana:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-grafana:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-grafana:{{ VERSION }}
- hostname: grafana
- user: socore
- binds:
diff --git a/salt/idstools/init.sls b/salt/idstools/init.sls
index 078cb5b03..a73c21d72 100644
--- a/salt/idstools/init.sls
+++ b/salt/idstools/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
# IDSTools Setup
idstoolsdir:
file.directory:
@@ -60,7 +60,7 @@ synclocalnidsrules:
so-idstools:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
- hostname: so-idstools
- user: socore
- binds:
diff --git a/salt/influxdb/init.sls b/salt/influxdb/init.sls
index 774db2187..304018a12 100644
--- a/salt/influxdb/init.sls
+++ b/salt/influxdb/init.sls
@@ -1,9 +1,9 @@
-{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
+{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
# Influx DB
influxconfdir:
@@ -26,7 +26,7 @@ influxdbconf:
so-influxdb:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-influxdb:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-influxdb:{{ VERSION }}
- hostname: influxdb
- environment:
- INFLUXDB_HTTP_LOG_ENABLED=false
diff --git a/salt/kibana/bin/keepkibanahappy.sh b/salt/kibana/bin/keepkibanahappy.sh
index 28967ee24..e8534ec12 100644
--- a/salt/kibana/bin/keepkibanahappy.sh
+++ b/salt/kibana/bin/keepkibanahappy.sh
@@ -1,4 +1,4 @@
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
COUNT=0
diff --git a/salt/kibana/bin/so-kibana-config-load b/salt/kibana/bin/so-kibana-config-load
index 81872fd6a..85ad00bc6 100644
--- a/salt/kibana/bin/so-kibana-config-load
+++ b/salt/kibana/bin/so-kibana-config-load
@@ -1,20 +1,20 @@
#!/bin/bash
-# {%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master', False) -%}
+# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
-# {%- set MASTER = salt['pillar.get']('master:url_base', '') %}
+# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
KIBANA_VERSION="7.6.1"
# Copy template file
cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_objects.ndjson
-# {% if FLEET_NODE or FLEET_MASTER %}
+# {% if FLEET_NODE or FLEET_MANAGER %}
# Fleet IP
-sed -i "s/FLEETPLACEHOLDER/{{ MASTER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
+sed -i "s/FLEETPLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
# {% endif %}
# SOCtopus and Master
-sed -i "s/PLACEHOLDER/{{ MASTER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
+sed -i "s/PLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
# Load saved objects
curl -X POST "localhost:5601/api/saved_objects/_import" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1
diff --git a/salt/kibana/etc/kibana.yml b/salt/kibana/etc/kibana.yml
index dd0d6faa9..4d19b251b 100644
--- a/salt/kibana/etc/kibana.yml
+++ b/salt/kibana/etc/kibana.yml
@@ -1,6 +1,6 @@
---
# Default Kibana configuration from kibana-docker.
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
server.name: kibana
server.host: "0"
server.basePath: /kibana
diff --git a/salt/kibana/init.sls b/salt/kibana/init.sls
index 6379d6ad0..74b59f9ca 100644
--- a/salt/kibana/init.sls
+++ b/salt/kibana/init.sls
@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
{% set FEATURES = "-features" %}
@@ -69,13 +69,13 @@ kibanabin:
# Start the kibana docker
so-kibana:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-kibana:{{ VERSION }}{{ FEATURES }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-kibana:{{ VERSION }}{{ FEATURES }}
- hostname: kibana
- user: kibana
- environment:
- - ELASTICSEARCH_HOST={{ MASTER }}
+ - ELASTICSEARCH_HOST={{ MANAGER }}
- ELASTICSEARCH_PORT=9200
- - MASTER={{ MASTER }}
+ - MANAGER={{ MANAGER }}
- binds:
- /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw
- /opt/so/log/kibana:/var/log/kibana:rw
@@ -94,7 +94,7 @@ kibanadashtemplate:
wait_for_kibana:
module.run:
- http.wait_for_successful_query:
- - url: "http://{{MASTER}}:5601/api/saved_objects/_find?type=config"
+ - url: "http://{{MANAGER}}:5601/api/saved_objects/_find?type=config"
- wait_for: 180
- onchanges:
- file: kibanadashtemplate
diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls
index e2494e57a..252f0efdc 100644
--- a/salt/logstash/init.sls
+++ b/salt/logstash/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
@@ -24,13 +24,13 @@
# Logstash Section - Decide which pillar to use
{% set lsheap = salt['pillar.get']('logstash_settings:lsheap', '') %}
-{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %}
- {% set freq = salt['pillar.get']('master:freq', '0') %}
- {% set dstats = salt['pillar.get']('master:domainstats', '0') %}
+{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
+ {% set freq = salt['pillar.get']('manager:freq', '0') %}
+ {% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% elif grains['role'] == 'so-helix' %}
- {% set freq = salt['pillar.get']('master:freq', '0') %}
- {% set dstats = salt['pillar.get']('master:domainstats', '0') %}
+ {% set freq = salt['pillar.get']('manager:freq', '0') %}
+ {% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% endif %}
@@ -159,7 +159,7 @@ lslogdir:
so-logstash:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-logstash:{{ VERSION }}{{ FEATURES }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-logstash:{{ VERSION }}{{ FEATURES }}
- hostname: so-logstash
- name: so-logstash
- user: logstash
diff --git a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja
index 4d6595dd9..2ce204875 100644
--- a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja
+++ b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja
@@ -1,13 +1,13 @@
{%- if grains.role == 'so-heavynode' %}
-{%- set MASTER = salt['pillar.get']('elasticsearch:mainip', '') %}
+{%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- else %}
-{%- set MASTER = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
{% endif -%}
{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %}
input {
redis {
- host => '{{ MASTER }}'
+ host => '{{ MANAGER }}'
data_type => 'list'
key => 'logstash:unparsed'
type => 'redis-input'
diff --git a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja
index 987614a2c..a1c93d6bc 100644
--- a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9001_output_switch.conf.jinja b/salt/logstash/pipelines/config/so/9001_output_switch.conf.jinja
index 8e5e5f200..7cffcf1f7 100644
--- a/salt/logstash/pipelines/config/so/9001_output_switch.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9001_output_switch.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja
index 9153d5c44..4d2fd7640 100644
--- a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja
index 2e1e79f8b..efba078b4 100644
--- a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9026_output_dhcp.conf.jinja b/salt/logstash/pipelines/config/so/9026_output_dhcp.conf.jinja
index 3da9e83ef..94c646fc3 100644
--- a/salt/logstash/pipelines/config/so/9026_output_dhcp.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9026_output_dhcp.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9029_output_esxi.conf.jinja b/salt/logstash/pipelines/config/so/9029_output_esxi.conf.jinja
index b84ab4ec9..474b08533 100644
--- a/salt/logstash/pipelines/config/so/9029_output_esxi.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9029_output_esxi.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9030_output_greensql.conf.jinja b/salt/logstash/pipelines/config/so/9030_output_greensql.conf.jinja
index d6801530b..434212c72 100644
--- a/salt/logstash/pipelines/config/so/9030_output_greensql.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9030_output_greensql.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9031_output_iis.conf.jinja b/salt/logstash/pipelines/config/so/9031_output_iis.conf.jinja
index 67616110f..3d931015c 100644
--- a/salt/logstash/pipelines/config/so/9031_output_iis.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9031_output_iis.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9032_output_mcafee.conf.jinja b/salt/logstash/pipelines/config/so/9032_output_mcafee.conf.jinja
index c6641f671..4bec9fd5e 100644
--- a/salt/logstash/pipelines/config/so/9032_output_mcafee.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9032_output_mcafee.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja
index 0cc7a3b66..bffd90a40 100644
--- a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja
index 59cae7b65..157c32941 100644
--- a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja
index 21ae77095..dc4e98401 100644
--- a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja
index 54c75873d..0ba8030bb 100644
--- a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9300_output_windows.conf.jinja b/salt/logstash/pipelines/config/so/9300_output_windows.conf.jinja
index cddda5541..5730661bf 100644
--- a/salt/logstash/pipelines/config/so/9300_output_windows.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9300_output_windows.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9301_output_dns_windows.conf.jinja b/salt/logstash/pipelines/config/so/9301_output_dns_windows.conf.jinja
index 84fd1f5f7..10acb9af6 100644
--- a/salt/logstash/pipelines/config/so/9301_output_dns_windows.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9301_output_dns_windows.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja
index 1d36d774d..ed1be775e 100644
--- a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja
index 932a194ab..fc2b81479 100644
--- a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja
index 5a8f9f5ba..20c4b7243 100644
--- a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja
index 5116b86ea..3b37c3af5 100644
--- a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja
@@ -1,5 +1,5 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
diff --git a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja
index afa8d290a..5b9aaf80a 100644
--- a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja
@@ -1,9 +1,9 @@
-{% set MASTER = salt['pillar.get']('static:masterip', '') %}
+{% set MANAGER = salt['pillar.get']('static:managerip', '') %}
{% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %}
output {
redis {
- host => '{{ MASTER }}'
+ host => '{{ MANAGER }}'
data_type => 'list'
key => 'logstash:unparsed'
congestion_interval => 1
diff --git a/salt/master/files/acng/acng.conf b/salt/manager/files/acng/acng.conf
similarity index 100%
rename from salt/master/files/acng/acng.conf
rename to salt/manager/files/acng/acng.conf
diff --git a/salt/master/files/add_minion.sh b/salt/manager/files/add_minion.sh
similarity index 100%
rename from salt/master/files/add_minion.sh
rename to salt/manager/files/add_minion.sh
diff --git a/salt/master/files/registry/scripts/so-docker-download b/salt/manager/files/registry/scripts/so-docker-download
similarity index 88%
rename from salt/master/files/registry/scripts/so-docker-download
rename to salt/manager/files/registry/scripts/so-docker-download
index 1213ae72a..dcba7a531 100644
--- a/salt/master/files/registry/scripts/so-docker-download
+++ b/salt/manager/files/registry/scripts/so-docker-download
@@ -1,6 +1,6 @@
#!/bin/bash
-MASTER={{ MASTER }}
+MANAGER={{ MANAGER }}
VERSION="HH1.2.2"
TRUSTED_CONTAINERS=( \
"so-nginx:$VERSION" \
@@ -41,6 +41,6 @@ do
# Pull down the trusted docker image
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination
- docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
- docker push $MASTER:5000/soshybridhunter/$i
+ docker tag soshybridhunter/$i $MANAGER:5000/soshybridhunter/$i
+ docker push $MANAGER:5000/soshybridhunter/$i
done
diff --git a/salt/master/init.sls b/salt/manager/init.sls
similarity index 87%
rename from salt/master/init.sls
rename to salt/manager/init.sls
index 3c6b81e5e..9ca936b2a 100644
--- a/salt/master/init.sls
+++ b/salt/manager/init.sls
@@ -13,8 +13,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{% set masterproxy = salt['pillar.get']('static:masterupdate', '0') %}
+{% set MANAGER = salt['grains.get']('manager') %}
+{% set managerproxy = salt['pillar.get']('static:managerupdate', '0') %}
socore_own_saltstack:
file.directory:
@@ -25,7 +25,7 @@ socore_own_saltstack:
- user
- group
-{% if masterproxy == 1 %}
+{% if managerproxy == 1 %}
# Create the directories for apt-cacher-ng
aptcacherconfdir:
@@ -54,12 +54,12 @@ aptcacherlogdir:
acngcopyconf:
file.managed:
- name: /opt/so/conf/aptcacher-ng/etc/acng.conf
- - source: salt://master/files/acng/acng.conf
+ - source: salt://manager/files/acng/acng.conf
# Install the apt-cacher-ng container
so-aptcacherng:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
- hostname: so-acng
- restart_policy: always
- port_bindings:
diff --git a/salt/minio/init.sls b/salt/minio/init.sls
index 7b4dd5673..2d5941301 100644
--- a/salt/minio/init.sls
+++ b/salt/minio/init.sls
@@ -13,8 +13,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-{% set access_key = salt['pillar.get']('master:access_key', '') %}
-{% set access_secret = salt['pillar.get']('master:access_secret', '') %}
+{% set access_key = salt['pillar.get']('manager:access_key', '') %}
+{% set access_secret = salt['pillar.get']('manager:access_secret', '') %}
# Minio Setup
minioconfdir:
diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls
index 6bb99d98c..928d14c1f 100644
--- a/salt/mysql/init.sls
+++ b/salt/mysql/init.sls
@@ -1,7 +1,7 @@
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %}
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set MAINIP = salt['pillar.get']('elasticsearch:mainip') %}
{% set FLEETARCH = salt['grains.get']('role') %}
@@ -9,7 +9,7 @@
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% else %}
- {% set MAINIP = salt['pillar.get']('static:masterip') %}
+ {% set MAINIP = salt['pillar.get']('static:managerip') %}
{% endif %}
# MySQL Setup
@@ -71,7 +71,7 @@ mysql_password_none:
so-mysql:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-mysql:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-mysql:{{ VERSION }}
- hostname: so-mysql
- user: socore
- port_bindings:
diff --git a/salt/nginx/etc/nginx.conf.so-eval b/salt/nginx/etc/nginx.conf.so-eval
index 7e3a9a401..2998a5bf2 100644
--- a/salt/nginx/etc/nginx.conf.so-eval
+++ b/salt/nginx/etc/nginx.conf.so-eval
@@ -1,5 +1,5 @@
-{%- set masterip = salt['pillar.get']('master:mainip', '') %}
-{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
+{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
+{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri;
}
-{% if FLEET_MASTER %}
+{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
- grpc_pass grpcs://{{ masterip }}:8080;
+ grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
@@ -110,7 +110,7 @@ http {
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
- proxy_pass http://{{ masterip }}:9822;
+ proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -124,7 +124,7 @@ http {
location / {
auth_request /auth/sessions/whoami;
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -138,7 +138,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:4433;
+ proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -185,7 +185,7 @@ http {
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:3000/;
+ proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -198,7 +198,7 @@ http {
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:5601/;
+ proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -209,7 +209,7 @@ http {
}
location /nodered/ {
- proxy_pass http://{{ masterip }}:1880/;
+ proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -222,7 +222,7 @@ http {
}
location /playbook/ {
- proxy_pass http://{{ masterip }}:3200/playbook/;
+ proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -238,7 +238,7 @@ http {
}
{%- else %}
location /fleet/ {
- proxy_pass https://{{ masterip }}:8080;
+ proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -250,7 +250,7 @@ http {
{%- endif %}
location /thehive/ {
- proxy_pass http://{{ masterip }}:9000/thehive/;
+ proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -262,7 +262,7 @@ http {
}
location /cortex/ {
- proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -274,7 +274,7 @@ http {
}
location /soctopus/ {
- proxy_pass http://{{ masterip }}:7000/;
+ proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -297,7 +297,7 @@ http {
}
location /sensoroniagents/ {
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
diff --git a/salt/nginx/etc/nginx.conf.so-master b/salt/nginx/etc/nginx.conf.so-manager
similarity index 91%
rename from salt/nginx/etc/nginx.conf.so-master
rename to salt/nginx/etc/nginx.conf.so-manager
index de3a3a6c1..bdb342cac 100644
--- a/salt/nginx/etc/nginx.conf.so-master
+++ b/salt/nginx/etc/nginx.conf.so-manager
@@ -1,5 +1,5 @@
-{%- set masterip = salt['pillar.get']('master:mainip', '') %}
-{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
+{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
+{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri;
}
-{% if FLEET_MASTER %}
+{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
- grpc_pass grpcs://{{ masterip }}:8080;
+ grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
@@ -110,7 +110,7 @@ http {
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
- proxy_pass http://{{ masterip }}:9822;
+ proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -124,7 +124,7 @@ http {
location / {
auth_request /auth/sessions/whoami;
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -138,7 +138,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:4433;
+ proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -185,7 +185,7 @@ http {
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:3000/;
+ proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -198,7 +198,7 @@ http {
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:5601/;
+ proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -209,7 +209,7 @@ http {
}
location /nodered/ {
- proxy_pass http://{{ masterip }}:1880/;
+ proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -222,7 +222,7 @@ http {
}
location /playbook/ {
- proxy_pass http://{{ masterip }}:3200/playbook/;
+ proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -238,7 +238,7 @@ http {
}
{%- else %}
location /fleet/ {
- proxy_pass https://{{ masterip }}:8080;
+ proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -250,7 +250,7 @@ http {
{%- endif %}
location /thehive/ {
- proxy_pass http://{{ masterip }}:9000/thehive/;
+ proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -262,7 +262,7 @@ http {
}
location /cortex/ {
- proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -274,7 +274,7 @@ http {
}
location /soctopus/ {
- proxy_pass http://{{ masterip }}:7000/;
+ proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -297,7 +297,7 @@ http {
}
location /sensoroniagents/ {
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
diff --git a/salt/nginx/etc/nginx.conf.so-mastersearch b/salt/nginx/etc/nginx.conf.so-managersearch
similarity index 91%
rename from salt/nginx/etc/nginx.conf.so-mastersearch
rename to salt/nginx/etc/nginx.conf.so-managersearch
index 952f18cd9..cb7576923 100644
--- a/salt/nginx/etc/nginx.conf.so-mastersearch
+++ b/salt/nginx/etc/nginx.conf.so-managersearch
@@ -1,5 +1,5 @@
-{%- set masterip = salt['pillar.get']('master:mainip', '') %}
-{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
+{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
+{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri;
}
-{% if FLEET_MASTER %}
+{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
- grpc_pass grpcs://{{ masterip }}:8080;
+ grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
@@ -109,7 +109,7 @@ http {
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
- proxy_pass http://{{ masterip }}:9822;
+ proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -123,7 +123,7 @@ http {
location / {
auth_request /auth/sessions/whoami;
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -137,7 +137,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:4433;
+ proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -184,7 +184,7 @@ http {
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:3000/;
+ proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -197,7 +197,7 @@ http {
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:5601/;
+ proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -208,7 +208,7 @@ http {
}
location /nodered/ {
- proxy_pass http://{{ masterip }}:1880/;
+ proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -221,7 +221,7 @@ http {
}
location /playbook/ {
- proxy_pass http://{{ masterip }}:3200/playbook/;
+ proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -237,7 +237,7 @@ http {
}
{%- else %}
location /fleet/ {
- proxy_pass https://{{ masterip }}:8080;
+ proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -249,7 +249,7 @@ http {
{%- endif %}
location /thehive/ {
- proxy_pass http://{{ masterip }}:9000/thehive/;
+ proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -261,7 +261,7 @@ http {
}
location /cortex/ {
- proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -273,7 +273,7 @@ http {
}
location /soctopus/ {
- proxy_pass http://{{ masterip }}:7000/;
+ proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -296,7 +296,7 @@ http {
}
location /sensoroniagents/ {
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
diff --git a/salt/nginx/etc/nginx.conf.so-standalone b/salt/nginx/etc/nginx.conf.so-standalone
index de3a3a6c1..bdb342cac 100644
--- a/salt/nginx/etc/nginx.conf.so-standalone
+++ b/salt/nginx/etc/nginx.conf.so-standalone
@@ -1,5 +1,5 @@
-{%- set masterip = salt['pillar.get']('master:mainip', '') %}
-{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
+{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
+{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri;
}
-{% if FLEET_MASTER %}
+{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
- grpc_pass grpcs://{{ masterip }}:8080;
+ grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
@@ -110,7 +110,7 @@ http {
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
- proxy_pass http://{{ masterip }}:9822;
+ proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -124,7 +124,7 @@ http {
location / {
auth_request /auth/sessions/whoami;
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -138,7 +138,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:4433;
+ proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -185,7 +185,7 @@ http {
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:3000/;
+ proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -198,7 +198,7 @@ http {
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:5601/;
+ proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -209,7 +209,7 @@ http {
}
location /nodered/ {
- proxy_pass http://{{ masterip }}:1880/;
+ proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -222,7 +222,7 @@ http {
}
location /playbook/ {
- proxy_pass http://{{ masterip }}:3200/playbook/;
+ proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -238,7 +238,7 @@ http {
}
{%- else %}
location /fleet/ {
- proxy_pass https://{{ masterip }}:8080;
+ proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -250,7 +250,7 @@ http {
{%- endif %}
location /thehive/ {
- proxy_pass http://{{ masterip }}:9000/thehive/;
+ proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -262,7 +262,7 @@ http {
}
location /cortex/ {
- proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -274,7 +274,7 @@ http {
}
location /soctopus/ {
- proxy_pass http://{{ masterip }}:7000/;
+ proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -297,7 +297,7 @@ http {
}
location /sensoroniagents/ {
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
diff --git a/salt/nginx/files/navigator_config.json b/salt/nginx/files/navigator_config.json
index 7e132cbf8..0c69995e3 100644
--- a/salt/nginx/files/navigator_config.json
+++ b/salt/nginx/files/navigator_config.json
@@ -1,4 +1,4 @@
-{%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- set ip = salt['pillar.get']('static:managerip', '') %}
{
"enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json",
diff --git a/salt/nginx/init.sls b/salt/nginx/init.sls
index 73f14a7ed..2c6f55ef1 100644
--- a/salt/nginx/init.sls
+++ b/salt/nginx/init.sls
@@ -1,6 +1,6 @@
-{% set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) %}
+{% set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) %}
{% set FLEETNODE = salt['pillar.get']('static:fleet_node', False) %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
# Drop the correct nginx config based on role
@@ -61,15 +61,15 @@ navigatordefaultlayer:
so-nginx:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-nginx:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-nginx:{{ VERSION }}
- hostname: so-nginx
- binds:
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/so/log/nginx/:/var/log/nginx:rw
- /opt/so/tmp/nginx/:/var/lib/nginx:rw
- /opt/so/tmp/nginx/:/run:rw
- - /etc/pki/masterssl.crt:/etc/pki/nginx/server.crt:ro
- - /etc/pki/masterssl.key:/etc/pki/nginx/server.key:ro
+ - /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
+ - /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
- /opt/so/conf/fleet/packages:/opt/socore/html/packages
# ATT&CK Navigator binds
- /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro
@@ -78,7 +78,7 @@ so-nginx:
- port_bindings:
- 80:80
- 443:443
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- 8090:8090
{%- endif %}
- watch:
diff --git a/salt/nodered/files/nodered_load_flows b/salt/nodered/files/nodered_load_flows
index 5617b1022..985c1c49a 100644
--- a/salt/nodered/files/nodered_load_flows
+++ b/salt/nodered/files/nodered_load_flows
@@ -1,4 +1,4 @@
-{%- set ip = salt['pillar.get']('static:masterip', '') -%}
+{%- set ip = salt['pillar.get']('static:managerip', '') -%}
#!/bin/bash
default_salt_dir=/opt/so/saltstack/default
diff --git a/salt/nodered/files/so_flows.json b/salt/nodered/files/so_flows.json
index 8ab8cbf81..ad780ceb9 100644
--- a/salt/nodered/files/so_flows.json
+++ b/salt/nodered/files/so_flows.json
@@ -1,4 +1,4 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') -%}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') -%}
-[{"id":"dca608c3.7d8af8","type":"tab","label":"TheHive - Webhook Events","disabled":false,"info":""},{"id":"4db74fa6.2556d","type":"tls-config","z":"","name":"","cert":"","key":"","ca":"","certname":"","keyname":"","caname":"","servername":"","verifyservercert":false},{"id":"aa6cf50d.a02fc8","type":"http in","z":"dca608c3.7d8af8","name":"TheHive Listener","url":"/thehive","method":"post","upload":false,"swaggerDoc":"","x":120,"y":780,"wires":[["2b92aebb.853dc2","2fce29bb.1b1376","82ad0f08.7a53f"]]},{"id":"2b92aebb.853dc2","type":"debug","z":"dca608c3.7d8af8","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","targetType":"msg","x":470,"y":940,"wires":[]},{"id":"a4ecb84a.805958","type":"switch","z":"dca608c3.7d8af8","name":"Operation","property":"payload.operation","propertyType":"msg","rules":[{"t":"eq","v":"Creation","vt":"str"},{"t":"eq","v":"Update","vt":"str"},{"t":"eq","v":"Delete","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":580,"y":780,"wires":[["f1e954fd.3c21d8"],["65928861.c90a48"],["a259a26c.a21"]],"outputLabels":["Creation","Update","Delete"]},{"id":"f1e954fd.3c21d8","type":"switch","z":"dca608c3.7d8af8","name":"Creation","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":480,"wires":[["e88b4cc2.f6afe"],["8c54e39.a1b4f2"],["64203fe8.e0ad5"],["3511de51.889a02"],["14544a8b.b6b2f5"],["44c595a4.45d45c"],["3eb4bedf.6e20a2"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact","case_task","case_task_log","action","alert","user"],"info":"No webhook data is received for the following events:\n\n- Creation of Dashboard\n- Creation of Case Templates\n"},{"id":"65928861.c90a48","type":"switch","z":"dca608c3.7d8af8","name":"Update","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":860,"wires":[["eebe1748.1cd348"],["d703adc0.12fd1"],["2b738415.408d4c"],["6d97371a.406348"],["4ae621e1.9ae6"],["5786cee2.98109"],["54077728.447648"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact",null,"case_task","case_task_log","alert","user"]},{"id":"a259a26c.a21","type":"switch","z":"dca608c3.7d8af8","name":"Delete","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":890,"y":1200,"wires":[["60c8bcfb.eff1f4"],["df708bab.348308"],["e9a8650c.e20cc8"]],"outputLabels":["case","case_artifact",""],"info":"Deleting a case task doesnt actually trigger a delete event. It triggers an `update` event where the status = cancelled"},{"id":"54077728.447648","type":"switch","z":"dca608c3.7d8af8","name":"User","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Locked","vt":"str"},{"t":"eq","v":"Ok","vt":"str"}],"checkall":"false","repair":false,"outputs":2,"x":1130,"y":980,"wires":[["9429d6c5.5ac788"],["4e3e091c.d35388"]]},{"id":"9429d6c5.5ac788","type":"function","z":"dca608c3.7d8af8","name":"status: Locked","func":"msg.topic = \"[The Hive] A user account was locked\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1380,"y":972,"wires":[[]],"info":"- User account was locked"},{"id":"4e3e091c.d35388","type":"function","z":"dca608c3.7d8af8","name":"status: Ok","func":"msg.topic = \"[The Hive] A user account was changed\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1360,"y":1020,"wires":[[]],"info":"- User account was unlocked\n- User description was changed\n- User role was changed\n- User API key was added\n- User API key was revoked\n"},{"id":"485f3be.1ffcfc4","type":"function","z":"dca608c3.7d8af8","name":"status: Open","func":"// Fires when a Case is updated AND status = open\n// This can include things like TLP/PAP changes\n\nreturn msg;","outputs":1,"noerr":0,"x":1370,"y":660,"wires":[[]]},{"id":"eebe1748.1cd348","type":"switch","z":"dca608c3.7d8af8","name":"case","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Open","vt":"str"}],"checkall":"true","repair":false,"outputs":1,"x":1130,"y":740,"wires":[["485f3be.1ffcfc4","e4b7b4bf.2fb828"]],"info":"- A case was modified"},{"id":"8c54e39.a1b4f2","type":"switch","z":"dca608c3.7d8af8","name":"case_artifact: Run Analyzer","property":"payload.object.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1600,"y":340,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["eb8cfeb7.a7118","a5dd8a8a.065b88"]],"info":"# References\n\n\n"},{"id":"2fce29bb.1b1376","type":"function","z":"dca608c3.7d8af8","name":"Add headers","func":"msg.thehive_url = 'https://{{ MASTERIP }}/thehive';\nmsg.cortex_url = 'https://{{ MASTERIP }}/cortex';\nmsg.cortex_id = 'CORTEX-SERVER-ID';\nreturn msg;","outputs":1,"noerr":0,"x":350,"y":780,"wires":[["a4ecb84a.805958"]]},{"id":"e4b7b4bf.2fb828","type":"function","z":"dca608c3.7d8af8","name":"status: Resolved","func":"// Fires when a case is closed (resolved)\n\nreturn msg;","outputs":1,"noerr":0,"x":1390,"y":720,"wires":[[]]},{"id":"e88b4cc2.f6afe","type":"function","z":"dca608c3.7d8af8","name":"case","func":"// Fires when a case is created\n// or when a responder is generated against a case\n\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":320,"wires":[[]]},{"id":"64203fe8.e0ad5","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is created\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":400,"wires":[[]]},{"id":"3511de51.889a02","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"// Fires when a case task log is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1163,"y":440,"wires":[[]]},{"id":"14544a8b.b6b2f5","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"// Fires when a Responder or Analyzser is Run on an existing observable\n\nreturn msg;","outputs":1,"noerr":0,"x":1173,"y":480,"wires":[[]]},{"id":"2b738415.408d4c","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"\nreturn msg;","outputs":1,"noerr":0,"x":1170,"y":820,"wires":[[]]},{"id":"3eb4bedf.6e20a2","type":"function","z":"dca608c3.7d8af8","name":"user","func":"// Fires when a user is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1133,"y":560,"wires":[[]]},{"id":"d703adc0.12fd1","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"// Fires when an artifact is updated\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":780,"wires":[[]]},{"id":"6d97371a.406348","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is updated\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":860,"wires":[[]]},{"id":"4ae621e1.9ae6","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is updated\n\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":900,"wires":[[]]},{"id":"60c8bcfb.eff1f4","type":"function","z":"dca608c3.7d8af8","name":"case","func":"//Fires when a case is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":1160,"wires":[[]]},{"id":"df708bab.348308","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"//Fires when a case_artifact is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":1200,"wires":[[]]},{"id":"e9a8650c.e20cc8","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":1240,"wires":[[]]},{"id":"5786cee2.98109","type":"function","z":"dca608c3.7d8af8","name":"alert","func":"//Fires when an alert is updated\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":940,"wires":[[]]},{"id":"44c595a4.45d45c","type":"change","z":"dca608c3.7d8af8","d":true,"name":"Convert Alert Msg to Artifacts","rules":[{"t":"move","p":"payload.object.artifacts","pt":"msg","to":"payload","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":1200,"y":520,"wires":[["6dcca25e.04bd2c"]]},{"id":"6dcca25e.04bd2c","type":"split","z":"dca608c3.7d8af8","name":"Split Artifacts","splt":"\\n","spltType":"str","arraySplt":1,"arraySpltType":"len","stream":false,"addname":"","x":1430,"y":520,"wires":[["767c84f2.c9ba2c"]]},{"id":"767c84f2.c9ba2c","type":"switch","z":"dca608c3.7d8af8","name":"alert: Run Analyzer","property":"payload.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1630,"y":400,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["a5dd8a8a.065b88","eb8cfeb7.a7118"]],"info":"# References\n\n\n"},{"id":"82ad0f08.7a53f","type":"http response","z":"dca608c3.7d8af8","name":"Ack Event Receipt","statusCode":"200","headers":{},"x":250,"y":940,"wires":[]},{"id":"a5dd8a8a.065b88","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: CERT DNS","func":"msg.analyzer_id = \"4f28afc20d78f98df425e36e561af33f\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1930,"y":420,"wires":[["f050a09f.b2201"]]},{"id":"eb8cfeb7.a7118","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: Urlscan","func":"msg.analyzer_id = \"54e51b62c6c8ddc3cbc3cbdd889a0557\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1920,"y":320,"wires":[["f050a09f.b2201"]]},{"id":"1c448528.3032fb","type":"http request","z":"dca608c3.7d8af8","name":"Submit to Cortex","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ CORTEXKEY }}"},"x":2450,"y":420,"wires":[["ea6614fb.752a78"]]},{"id":"ea6614fb.752a78","type":"debug","z":"dca608c3.7d8af8","name":"Debug","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"true","targetType":"full","x":2670,"y":360,"wires":[]},{"id":"f050a09f.b2201","type":"switch","z":"dca608c3.7d8af8","name":"Cases vs Alerts","property":"tag","propertyType":"msg","rules":[{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"observable","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":2200,"y":360,"wires":[["f7fca977.a73b28"],["1c448528.3032fb"]],"inputLabels":["Data"],"outputLabels":["Cases","Alerts"]},{"id":"f7fca977.a73b28","type":"http request","z":"dca608c3.7d8af8","name":"Submit to TheHive","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ HIVEKEY }}"},"x":2450,"y":280,"wires":[["ea6614fb.752a78"]]}]
+[{"id":"dca608c3.7d8af8","type":"tab","label":"TheHive - Webhook Events","disabled":false,"info":""},{"id":"4db74fa6.2556d","type":"tls-config","z":"","name":"","cert":"","key":"","ca":"","certname":"","keyname":"","caname":"","servername":"","verifyservercert":false},{"id":"aa6cf50d.a02fc8","type":"http in","z":"dca608c3.7d8af8","name":"TheHive Listener","url":"/thehive","method":"post","upload":false,"swaggerDoc":"","x":120,"y":780,"wires":[["2b92aebb.853dc2","2fce29bb.1b1376","82ad0f08.7a53f"]]},{"id":"2b92aebb.853dc2","type":"debug","z":"dca608c3.7d8af8","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","targetType":"msg","x":470,"y":940,"wires":[]},{"id":"a4ecb84a.805958","type":"switch","z":"dca608c3.7d8af8","name":"Operation","property":"payload.operation","propertyType":"msg","rules":[{"t":"eq","v":"Creation","vt":"str"},{"t":"eq","v":"Update","vt":"str"},{"t":"eq","v":"Delete","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":580,"y":780,"wires":[["f1e954fd.3c21d8"],["65928861.c90a48"],["a259a26c.a21"]],"outputLabels":["Creation","Update","Delete"]},{"id":"f1e954fd.3c21d8","type":"switch","z":"dca608c3.7d8af8","name":"Creation","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":480,"wires":[["e88b4cc2.f6afe"],["8c54e39.a1b4f2"],["64203fe8.e0ad5"],["3511de51.889a02"],["14544a8b.b6b2f5"],["44c595a4.45d45c"],["3eb4bedf.6e20a2"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact","case_task","case_task_log","action","alert","user"],"info":"No webhook data is received for the following events:\n\n- Creation of Dashboard\n- Creation of Case Templates\n"},{"id":"65928861.c90a48","type":"switch","z":"dca608c3.7d8af8","name":"Update","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":860,"wires":[["eebe1748.1cd348"],["d703adc0.12fd1"],["2b738415.408d4c"],["6d97371a.406348"],["4ae621e1.9ae6"],["5786cee2.98109"],["54077728.447648"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact",null,"case_task","case_task_log","alert","user"]},{"id":"a259a26c.a21","type":"switch","z":"dca608c3.7d8af8","name":"Delete","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":890,"y":1200,"wires":[["60c8bcfb.eff1f4"],["df708bab.348308"],["e9a8650c.e20cc8"]],"outputLabels":["case","case_artifact",""],"info":"Deleting a case task doesnt actually trigger a delete event. It triggers an `update` event where the status = cancelled"},{"id":"54077728.447648","type":"switch","z":"dca608c3.7d8af8","name":"User","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Locked","vt":"str"},{"t":"eq","v":"Ok","vt":"str"}],"checkall":"false","repair":false,"outputs":2,"x":1130,"y":980,"wires":[["9429d6c5.5ac788"],["4e3e091c.d35388"]]},{"id":"9429d6c5.5ac788","type":"function","z":"dca608c3.7d8af8","name":"status: Locked","func":"msg.topic = \"[The Hive] A user account was locked\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1380,"y":972,"wires":[[]],"info":"- User account was locked"},{"id":"4e3e091c.d35388","type":"function","z":"dca608c3.7d8af8","name":"status: Ok","func":"msg.topic = \"[The Hive] A user account was changed\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1360,"y":1020,"wires":[[]],"info":"- User account was unlocked\n- User description was changed\n- User role was changed\n- User API key was added\n- User API key was revoked\n"},{"id":"485f3be.1ffcfc4","type":"function","z":"dca608c3.7d8af8","name":"status: Open","func":"// Fires when a Case is updated AND status = open\n// This can include things like TLP/PAP changes\n\nreturn msg;","outputs":1,"noerr":0,"x":1370,"y":660,"wires":[[]]},{"id":"eebe1748.1cd348","type":"switch","z":"dca608c3.7d8af8","name":"case","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Open","vt":"str"}],"checkall":"true","repair":false,"outputs":1,"x":1130,"y":740,"wires":[["485f3be.1ffcfc4","e4b7b4bf.2fb828"]],"info":"- A case was modified"},{"id":"8c54e39.a1b4f2","type":"switch","z":"dca608c3.7d8af8","name":"case_artifact: Run Analyzer","property":"payload.object.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1600,"y":340,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["eb8cfeb7.a7118","a5dd8a8a.065b88"]],"info":"# References\n\n\n"},{"id":"2fce29bb.1b1376","type":"function","z":"dca608c3.7d8af8","name":"Add headers","func":"msg.thehive_url = 'https://{{ MANAGERIP }}/thehive';\nmsg.cortex_url = 'https://{{ MANAGERIP }}/cortex';\nmsg.cortex_id = 'CORTEX-SERVER-ID';\nreturn msg;","outputs":1,"noerr":0,"x":350,"y":780,"wires":[["a4ecb84a.805958"]]},{"id":"e4b7b4bf.2fb828","type":"function","z":"dca608c3.7d8af8","name":"status: Resolved","func":"// Fires when a case is closed (resolved)\n\nreturn msg;","outputs":1,"noerr":0,"x":1390,"y":720,"wires":[[]]},{"id":"e88b4cc2.f6afe","type":"function","z":"dca608c3.7d8af8","name":"case","func":"// Fires when a case is created\n// or when a responder is generated against a case\n\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":320,"wires":[[]]},{"id":"64203fe8.e0ad5","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is created\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":400,"wires":[[]]},{"id":"3511de51.889a02","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"// Fires when a case task log is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1163,"y":440,"wires":[[]]},{"id":"14544a8b.b6b2f5","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"// Fires when a Responder or Analyzser is Run on an existing observable\n\nreturn msg;","outputs":1,"noerr":0,"x":1173,"y":480,"wires":[[]]},{"id":"2b738415.408d4c","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"\nreturn msg;","outputs":1,"noerr":0,"x":1170,"y":820,"wires":[[]]},{"id":"3eb4bedf.6e20a2","type":"function","z":"dca608c3.7d8af8","name":"user","func":"// Fires when a user is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1133,"y":560,"wires":[[]]},{"id":"d703adc0.12fd1","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"// Fires when an artifact is updated\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":780,"wires":[[]]},{"id":"6d97371a.406348","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is updated\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":860,"wires":[[]]},{"id":"4ae621e1.9ae6","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is updated\n\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":900,"wires":[[]]},{"id":"60c8bcfb.eff1f4","type":"function","z":"dca608c3.7d8af8","name":"case","func":"//Fires when a case is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":1160,"wires":[[]]},{"id":"df708bab.348308","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"//Fires when a case_artifact is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":1200,"wires":[[]]},{"id":"e9a8650c.e20cc8","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":1240,"wires":[[]]},{"id":"5786cee2.98109","type":"function","z":"dca608c3.7d8af8","name":"alert","func":"//Fires when an alert is updated\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":940,"wires":[[]]},{"id":"44c595a4.45d45c","type":"change","z":"dca608c3.7d8af8","d":true,"name":"Convert Alert Msg to Artifacts","rules":[{"t":"move","p":"payload.object.artifacts","pt":"msg","to":"payload","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":1200,"y":520,"wires":[["6dcca25e.04bd2c"]]},{"id":"6dcca25e.04bd2c","type":"split","z":"dca608c3.7d8af8","name":"Split Artifacts","splt":"\\n","spltType":"str","arraySplt":1,"arraySpltType":"len","stream":false,"addname":"","x":1430,"y":520,"wires":[["767c84f2.c9ba2c"]]},{"id":"767c84f2.c9ba2c","type":"switch","z":"dca608c3.7d8af8","name":"alert: Run Analyzer","property":"payload.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1630,"y":400,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["a5dd8a8a.065b88","eb8cfeb7.a7118"]],"info":"# References\n\n\n"},{"id":"82ad0f08.7a53f","type":"http response","z":"dca608c3.7d8af8","name":"Ack Event Receipt","statusCode":"200","headers":{},"x":250,"y":940,"wires":[]},{"id":"a5dd8a8a.065b88","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: CERT DNS","func":"msg.analyzer_id = \"4f28afc20d78f98df425e36e561af33f\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1930,"y":420,"wires":[["f050a09f.b2201"]]},{"id":"eb8cfeb7.a7118","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: Urlscan","func":"msg.analyzer_id = \"54e51b62c6c8ddc3cbc3cbdd889a0557\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1920,"y":320,"wires":[["f050a09f.b2201"]]},{"id":"1c448528.3032fb","type":"http request","z":"dca608c3.7d8af8","name":"Submit to Cortex","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ CORTEXKEY }}"},"x":2450,"y":420,"wires":[["ea6614fb.752a78"]]},{"id":"ea6614fb.752a78","type":"debug","z":"dca608c3.7d8af8","name":"Debug","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"true","targetType":"full","x":2670,"y":360,"wires":[]},{"id":"f050a09f.b2201","type":"switch","z":"dca608c3.7d8af8","name":"Cases vs Alerts","property":"tag","propertyType":"msg","rules":[{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"observable","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":2200,"y":360,"wires":[["f7fca977.a73b28"],["1c448528.3032fb"]],"inputLabels":["Data"],"outputLabels":["Cases","Alerts"]},{"id":"f7fca977.a73b28","type":"http request","z":"dca608c3.7d8af8","name":"Submit to TheHive","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ HIVEKEY }}"},"x":2450,"y":280,"wires":[["ea6614fb.752a78"]]}]
diff --git a/salt/pcap/files/sensoroni.json b/salt/pcap/files/sensoroni.json
index ed673d969..edcec2f6c 100644
--- a/salt/pcap/files/sensoroni.json
+++ b/salt/pcap/files/sensoroni.json
@@ -1,11 +1,11 @@
-{%- set MASTER = grains['master'] -%}
+{%- set MANAGER = grains['manager'] -%}
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
{
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
"logLevel":"debug",
"agent": {
"pollIntervalMs": 10000,
- "serverUrl": "https://{{ MASTER }}/sensoroniagents",
+ "serverUrl": "https://{{ MANAGER }}/sensoroniagents",
"verifyCert": false,
"modules": {
"statickeyauth": {
diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls
index bcf09b765..a492ffc82 100644
--- a/salt/pcap/init.sls
+++ b/salt/pcap/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %}
{% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %}
{% set BPF_COMPILED = "" %}
@@ -129,7 +129,7 @@ sensoronilog:
so-steno:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-steno:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-steno:{{ VERSION }}
- network_mode: host
- privileged: True
- port_bindings:
@@ -146,7 +146,7 @@ so-steno:
so-sensoroni:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
- network_mode: host
- binds:
- /opt/so/conf/steno/certs:/etc/stenographer/certs:rw
diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls
index eca8bda40..da1461871 100644
--- a/salt/playbook/init.sls
+++ b/salt/playbook/init.sls
@@ -1,7 +1,7 @@
-{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
+{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %}
+{% set MANAGER = salt['grains.get']('manager') %}
+{% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
{%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook', None) -%}
@@ -40,7 +40,7 @@ query_playbookdbuser_grants:
query_updatwebhooks:
mysql_query.run:
- database: playbook
- - query: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
+ - query: "update webhooks set url = 'http://{{MANAGERIP}}:7000/playbook/webhook' where project_id = 1"
- connection_host: {{ MAINIP }}
- connection_port: 3306
- connection_user: root
@@ -53,8 +53,8 @@ query_updatepluginurls:
update settings set value =
"--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess
project: '1'
- convert_url: http://{{MASTERIP}}:7000/playbook/sigmac
- create_url: http://{{MASTERIP}}:7000/playbook/play"
+ convert_url: http://{{MANAGERIP}}:7000/playbook/sigmac
+ create_url: http://{{MANAGERIP}}:7000/playbook/play"
where id = 43
- connection_host: {{ MAINIP }}
- connection_port: 3306
@@ -73,11 +73,11 @@ playbook_password_none:
so-playbook:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-playbook:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-playbook:{{ VERSION }}
- hostname: playbook
- name: so-playbook
- environment:
- - REDMINE_DB_MYSQL={{ MASTERIP }}
+ - REDMINE_DB_MYSQL={{ MANAGERIP }}
- REDMINE_DB_DATABASE=playbook
- REDMINE_DB_USERNAME=playbookdbuser
- REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }}
diff --git a/salt/reactor/fleet.sls b/salt/reactor/fleet.sls
index 4b09abe0a..c29ab85ed 100644
--- a/salt/reactor/fleet.sls
+++ b/salt/reactor/fleet.sls
@@ -13,7 +13,7 @@ def run():
STATICFILE = f"{LOCAL_SALT_DIR}/pillar/static.sls"
SECRETSFILE = f"{LOCAL_SALT_DIR}/pillar/secrets.sls"
- if MINIONID.split('_')[-1] in ['master','eval','fleet','mastersearch','standalone']:
+ if MINIONID.split('_')[-1] in ['manager','eval','fleet','managersearch','standalone']:
if ACTION == 'enablefleet':
logging.info('so/fleet enablefleet reactor')
@@ -27,7 +27,7 @@ def run():
if ROLE == 'so-fleet':
line = re.sub(r'fleet_node: \S*', f"fleet_node: True", line.rstrip())
else:
- line = re.sub(r'fleet_master: \S*', f"fleet_master: True", line.rstrip())
+ line = re.sub(r'fleet_manager: \S*', f"fleet_manager: True", line.rstrip())
print(line)
# Update the enroll secret in the secrets pillar
@@ -50,7 +50,7 @@ def run():
PACKAGEVERSION = data['data']['current-package-version']
PACKAGEHOSTNAME = data['data']['package-hostname']
- MASTER = data['data']['master']
+ MANAGER = data['data']['manager']
VERSION = data['data']['version']
ESECRET = data['data']['enroll-secret']
@@ -59,7 +59,7 @@ def run():
# Run Docker container that will build the packages
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", f"type=bind,source={LOCAL_SALT_DIR}/salt/fleet/packages,target=/output", \
- "--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MASTER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
+ "--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MANAGER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
f"{ESECRET}", f"{PACKAGEHOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii')
# Update the 'packages-built' timestamp on the webpage (stored in the static pillar)
diff --git a/salt/redis/init.sls b/salt/redis/init.sls
index 5db53957c..ec36d164c 100644
--- a/salt/redis/init.sls
+++ b/salt/redis/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
# Redis Setup
redisconfdir:
@@ -47,7 +47,7 @@ redisconfsync:
so-redis:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
- hostname: so-redis
- user: socore
- port_bindings:
diff --git a/salt/soc/files/kratos/kratos.yaml b/salt/soc/files/kratos/kratos.yaml
index 7939ec35b..2171971bc 100644
--- a/salt/soc/files/kratos/kratos.yaml
+++ b/salt/soc/files/kratos/kratos.yaml
@@ -1,4 +1,4 @@
-{%- set WEBACCESS = salt['pillar.get']('master:url_base', '') -%}
+{%- set WEBACCESS = salt['pillar.get']('manager:url_base', '') -%}
{%- set KRATOSKEY = salt['pillar.get']('kratos:kratoskey', '') -%}
selfservice:
diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json
index 693c44aeb..7c7614a14 100644
--- a/salt/soc/files/soc/soc.json
+++ b/salt/soc/files/soc/soc.json
@@ -1,4 +1,4 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
{
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
@@ -12,10 +12,10 @@
"jobDir": "jobs"
},
"kratos": {
- "hostUrl": "http://{{ MASTERIP }}:4434/"
+ "hostUrl": "http://{{ MANAGERIP }}:4434/"
},
"elastic": {
- "hostUrl": "http://{{ MASTERIP }}:9200",
+ "hostUrl": "http://{{ MANAGERIP }}:9200",
"username": "",
"password": "",
"verifyCert": false
diff --git a/salt/soc/init.sls b/salt/soc/init.sls
index cc2c9dfd6..bf7250e72 100644
--- a/salt/soc/init.sls
+++ b/salt/soc/init.sls
@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
socdir:
file.directory:
@@ -33,7 +33,7 @@ socsync:
so-soc:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
- hostname: soc
- name: so-soc
- binds:
@@ -84,7 +84,7 @@ kratossync:
so-kratos:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-kratos:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-kratos:{{ VERSION }}
- hostname: kratos
- name: so-kratos
- binds:
diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf
index d2c3eea2d..bdf4aafa9 100644
--- a/salt/soctopus/files/SOCtopus.conf
+++ b/salt/soctopus/files/SOCtopus.conf
@@ -1,10 +1,10 @@
-{%- set MASTER = salt['pillar.get']('master:url_base', '') %}
+{%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
[es]
-es_url = http://{{MASTER}}:9200
-es_ip = {{MASTER}}
+es_url = http://{{MANAGER}}:9200
+es_ip = {{MANAGER}}
es_user = YOURESUSER
es_pass = YOURESPASS
es_index_pattern = so-*
@@ -12,7 +12,7 @@ es_verifycert = no
[cortex]
auto_analyze_alerts = no
-cortex_url = https://{{MASTER}}/cortex/
+cortex_url = https://{{MANAGER}}/cortex/
cortex_key = {{ CORTEXKEY }}
supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
@@ -33,7 +33,7 @@ grr_user = YOURGRRUSER
grr_pass = YOURGRRPASS
[hive]
-hive_url = https://{{MASTER}}/thehive/
+hive_url = https://{{MANAGER}}/thehive/
hive_key = {{ HIVEKEY }}
hive_tlp = 3
hive_verifycert = no
@@ -60,7 +60,7 @@ slack_url = YOURSLACKWORKSPACE
slack_webhook = YOURSLACKWEBHOOK
[playbook]
-playbook_url = http://{{MASTER}}:3200/playbook
+playbook_url = http://{{MANAGER}}:3200/playbook
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
playbook_verifycert = no
playbook_unit_test_index = playbook-testing
diff --git a/salt/soctopus/files/templates/es-generic.template b/salt/soctopus/files/templates/es-generic.template
index cdda8a19b..b56050741 100644
--- a/salt/soctopus/files/templates/es-generic.template
+++ b/salt/soctopus/files/templates/es-generic.template
@@ -1,4 +1,4 @@
-{% set ES = salt['pillar.get']('static:masterip', '') %}
+{% set ES = salt['pillar.get']('static:managerip', '') %}
alert: modules.so.playbook-es.PlaybookESAlerter
elasticsearch_host: "{{ ES }}:9200"
diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template
index 68dc040fc..4369a666e 100644
--- a/salt/soctopus/files/templates/generic.template
+++ b/salt/soctopus/files/templates/generic.template
@@ -1,5 +1,5 @@
-{% set es = salt['pillar.get']('static:masterip', '') %}
-{% set hivehost = salt['pillar.get']('static:masterip', '') %}
+{% set es = salt['pillar.get']('static:managerip', '') %}
+{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
alert: hivealerter
diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template
index 28ea29ee9..f49e4fdfe 100644
--- a/salt/soctopus/files/templates/osquery.template
+++ b/salt/soctopus/files/templates/osquery.template
@@ -1,5 +1,5 @@
-{% set es = salt['pillar.get']('static:masterip', '') %}
-{% set hivehost = salt['pillar.get']('static:masterip', '') %}
+{% set es = salt['pillar.get']('static:managerip', '') %}
+{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
alert: hivealerter
diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls
index 6c06fecff..1621de936 100644
--- a/salt/soctopus/init.sls
+++ b/salt/soctopus/init.sls
@@ -1,7 +1,7 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{%- set MASTER_URL = salt['pillar.get']('master:url_base', '') %}
-{%- set MASTER_IP = salt['pillar.get']('static:masterip', '') %}
+{% set MANAGER = salt['grains.get']('manager') %}
+{%- set MANAGER_URL = salt['pillar.get']('manager:url_base', '') %}
+{%- set MANAGER_IP = salt['pillar.get']('static:managerip', '') %}
soctopusdir:
file.directory:
@@ -50,7 +50,7 @@ playbookrulessync:
so-soctopus:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-soctopus:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-soctopus:{{ VERSION }}
- hostname: soctopus
- name: so-soctopus
- binds:
@@ -61,4 +61,4 @@ so-soctopus:
- port_bindings:
- 0.0.0.0:7000:7000
- extra_hosts:
- - {{MASTER_URL}}:{{MASTER_IP}}
+ - {{MANAGER_URL}}:{{MANAGER_IP}}
diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls
index db4bc97ea..1ef5e81c2 100644
--- a/salt/ssl/init.sls
+++ b/salt/ssl/init.sls
@@ -1,5 +1,5 @@
-{% set master = salt['grains.get']('master') %}
-{% set masterip = salt['pillar.get']('static:masterip', '') %}
+{% set manager = salt['grains.get']('manager') %}
+{% set managerip = salt['pillar.get']('static:managerip', '') %}
{% set HOSTNAME = salt['grains.get']('host') %}
{% set global_ca_text = [] %}
{% set global_ca_server = [] %}
@@ -7,13 +7,13 @@
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
-{% if grains.id.split('_')|last in ['master', 'eval', 'standalone'] %}
+{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone'] %}
{% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %}
{% set ca_server = grains.id %}
{% else %}
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
- {% if 'master' in host.split('_')|last or host.split('_')|last == 'standalone' %}
+ {% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
{% do global_ca_server.append(host) %}
{% endif %}
@@ -43,7 +43,7 @@ m2cryptopkgs:
- ca_server: {{ ca_server }}
- signing_policy: influxdb
- public_key: /etc/pki/influxdb.key
- - CN: {{ master }}
+ - CN: {{ manager }}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -59,7 +59,7 @@ influxkeyperms:
- mode: 640
- group: 939
-{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
+{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone'] %}
# Request a cert and drop it where it needs to go to be distributed
/etc/pki/filebeat.crt:
@@ -70,7 +70,7 @@ influxkeyperms:
{% if grains.role == 'so-heavynode' %}
- CN: {{grains.id}}
{% else %}
- - CN: {{master}}
+ - CN: {{manager}}
{% endif %}
- days_remaining: 0
- days_valid: 820
@@ -119,7 +119,7 @@ fbcrtlink:
- ca_server: {{ ca_server }}
- signing_policy: registry
- public_key: /etc/pki/registry.key
- - CN: {{ master }}
+ - CN: {{ manager }}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -136,31 +136,31 @@ regkeyperms:
- group: 939
# Create a cert for the reverse proxy
-/etc/pki/masterssl.crt:
+/etc/pki/managerssl.crt:
x509.certificate_managed:
- ca_server: {{ ca_server }}
- - signing_policy: masterssl
- - public_key: /etc/pki/masterssl.key
- - CN: {{ master }}
+ - signing_policy: managerssl
+ - public_key: /etc/pki/managerssl.key
+ - CN: {{ manager }}
- days_remaining: 0
- days_valid: 820
- backup: True
- managed_private_key:
- name: /etc/pki/masterssl.key
+ name: /etc/pki/managerssl.key
bits: 4096
backup: True
msslkeyperms:
file.managed:
- replace: False
- - name: /etc/pki/masterssl.key
+ - name: /etc/pki/managerssl.key
- mode: 640
- group: 939
# Create a private key and cert for OSQuery
/etc/pki/fleet.key:
x509.private_key_managed:
- - CN: {{ master }}
+ - CN: {{ manager }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
@@ -169,8 +169,8 @@ msslkeyperms:
/etc/pki/fleet.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/fleet.key
- - CN: {{ master }}
- - subjectAltName: DNS:{{ master }},IP:{{ masterip }}
+ - CN: {{ manager }}
+ - subjectAltName: DNS:{{ manager }},IP:{{ managerip }}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -187,7 +187,7 @@ fleetkeyperms:
- group: 939
{% endif %}
-{% if grains['role'] in ['so-sensor', 'so-master', 'so-node', 'so-eval', 'so-helix', 'so-mastersearch', 'so-heavynode', 'so-fleet', 'so-standalone'] %}
+{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone'] %}
fbcertdir:
file.directory:
@@ -203,7 +203,7 @@ fbcertdir:
{% if grains.role == 'so-heavynode' %}
- CN: {{grains.id}}
{% else %}
- - CN: {{master}}
+ - CN: {{manager}}
{% endif %}
- days_remaining: 0
- days_valid: 820
@@ -238,25 +238,25 @@ chownfilebeatp8:
{% if grains['role'] == 'so-fleet' %}
# Create a cert for the reverse proxy
-/etc/pki/masterssl.crt:
+/etc/pki/managerssl.crt:
x509.certificate_managed:
- ca_server: {{ ca_server }}
- - signing_policy: masterssl
- - public_key: /etc/pki/masterssl.key
+ - signing_policy: managerssl
+ - public_key: /etc/pki/managerssl.key
- CN: {{ HOSTNAME }}
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }} {% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }} {% endif %}
- days_remaining: 0
- days_valid: 820
- backup: True
- managed_private_key:
- name: /etc/pki/masterssl.key
+ name: /etc/pki/managerssl.key
bits: 4096
backup: True
msslkeyperms:
file.managed:
- replace: False
- - name: /etc/pki/masterssl.key
+ - name: /etc/pki/managerssl.key
- mode: 640
- group: 939
diff --git a/salt/strelka/files/backend/backend.yaml b/salt/strelka/files/backend/backend.yaml
index 76a2ae3af..b25e5630d 100644
--- a/salt/strelka/files/backend/backend.yaml
+++ b/salt/strelka/files/backend/backend.yaml
@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
- {%- set ip = salt['pillar.get']('static:masterip') %}
+ {%- set ip = salt['pillar.get']('static:managerip') %}
{%- endif -%}
logging_cfg: '/etc/strelka/logging.yaml'
limits:
diff --git a/salt/strelka/files/filestream/filestream.yaml b/salt/strelka/files/filestream/filestream.yaml
index c45fd8644..539e4314c 100644
--- a/salt/strelka/files/filestream/filestream.yaml
+++ b/salt/strelka/files/filestream/filestream.yaml
@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
- {%- set ip = salt['pillar.get']('static:masterip') %}
+ {%- set ip = salt['pillar.get']('static:managerip') %}
{%- endif -%}
conn:
server: '{{ ip }}:57314'
diff --git a/salt/strelka/files/frontend/frontend.yaml b/salt/strelka/files/frontend/frontend.yaml
index 56df323f9..5d72f1e0d 100644
--- a/salt/strelka/files/frontend/frontend.yaml
+++ b/salt/strelka/files/frontend/frontend.yaml
@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
- {%- set ip = salt['pillar.get']('static:masterip') %}
+ {%- set ip = salt['pillar.get']('static:managerip') %}
{%- endif -%}
server: ":57314"
coordinator:
diff --git a/salt/strelka/files/manager/manager.yaml b/salt/strelka/files/manager/manager.yaml
index 8a5966ac9..db9dd7f91 100644
--- a/salt/strelka/files/manager/manager.yaml
+++ b/salt/strelka/files/manager/manager.yaml
@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
- {%- set ip = salt['pillar.get']('static:masterip') %}
+ {%- set ip = salt['pillar.get']('static:managerip') %}
{%- endif -%}
coordinator:
addr: '{{ ip }}:6380'
diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls
index 4a422b642..a77b635fe 100644
--- a/salt/strelka/init.sls
+++ b/salt/strelka/init.sls
@@ -12,8 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-{%- set MASTER = grains['master'] %}
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGER = grains['manager'] %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{%- set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') -%}
@@ -79,7 +79,7 @@ strelkastagedir:
strelka_coordinator:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
- name: so-strelka-coordinator
- entrypoint: redis-server --save "" --appendonly no
- port_bindings:
@@ -87,7 +87,7 @@ strelka_coordinator:
strelka_gatekeeper:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
- name: so-strelka-gatekeeper
- entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru
- port_bindings:
@@ -95,7 +95,7 @@ strelka_gatekeeper:
strelka_frontend:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-strelka-frontend:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-strelka-frontend:{{ VERSION }}
- binds:
- /opt/so/conf/strelka/frontend/:/etc/strelka/:ro
- /nsm/strelka/log/:/var/log/strelka/:rw
@@ -107,7 +107,7 @@ strelka_frontend:
strelka_backend:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-strelka-backend:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-strelka-backend:{{ VERSION }}
- binds:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/rules/:/etc/yara/:ro
@@ -117,7 +117,7 @@ strelka_backend:
strelka_manager:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-strelka-manager:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-strelka-manager:{{ VERSION }}
- binds:
- /opt/so/conf/strelka/manager/:/etc/strelka/:ro
- name: so-strelka-manager
@@ -125,7 +125,7 @@ strelka_manager:
strelka_filestream:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-strelka-filestream:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-strelka-filestream:{{ VERSION }}
- binds:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka
diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls
index 11d178654..7790887d9 100644
--- a/salt/suricata/init.sls
+++ b/salt/suricata/init.sls
@@ -16,7 +16,7 @@
{% set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
{% set BROVER = salt['pillar.get']('static:broversion', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set BPF_NIDS = salt['pillar.get']('nids:bpf') %}
{% set BPF_STATUS = 0 %}
@@ -132,7 +132,7 @@ suribpf:
so-suricata:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-suricata:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-suricata:{{ VERSION }}
- privileged: True
- environment:
- INTERFACE={{ interface }}
diff --git a/salt/suricata/master.sls b/salt/suricata/manager.sls
similarity index 100%
rename from salt/suricata/master.sls
rename to salt/suricata/manager.sls
diff --git a/salt/suricata/suricata_config.map.jinja b/salt/suricata/suricata_config.map.jinja
index 6260c1ec1..557d4e519 100644
--- a/salt/suricata/suricata_config.map.jinja
+++ b/salt/suricata/suricata_config.map.jinja
@@ -11,7 +11,7 @@ HOME_NET: "[{{salt['pillar.get']('sensor:hnsensor')}}]"
{% endload %}
{% else %}
{% load_yaml as homenet %}
-HOME_NET: "[{{salt['pillar.get']('static:hnmaster', '')}}]"
+HOME_NET: "[{{salt['pillar.get']('static:hnmanager', '')}}]"
{% endload %}
{% endif %}
diff --git a/salt/tcpreplay/init.sls b/salt/tcpreplay/init.sls
index 5a054bf5d..78c6a18c6 100644
--- a/salt/tcpreplay/init.sls
+++ b/salt/tcpreplay/init.sls
@@ -1,11 +1,11 @@
{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-eval' %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
so-tcpreplay:
docker_container.running:
- network_mode: "host"
- - image: {{ MASTER }}:5000/soshybridhunter/so-tcpreplay:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-tcpreplay:{{ VERSION }}
- name: so-tcpreplay
- user: root
- interactive: True
diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf
index be99dc849..f768efe74 100644
--- a/salt/telegraf/etc/telegraf.conf
+++ b/salt/telegraf/etc/telegraf.conf
@@ -13,7 +13,7 @@
# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
-{%- set MASTER = grains['master'] %}
+{%- set MANAGER = grains['manager'] %}
{% set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') %}
{% set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %}
{% set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %}
@@ -98,7 +98,7 @@
## urls will be written to each interval.
# urls = ["unix:///var/run/influxdb.sock"]
# urls = ["udp://127.0.0.1:8089"]
- urls = ["https://{{ MASTER }}:8086"]
+ urls = ["https://{{ MANAGER }}:8086"]
## The target database for metrics; will be created as needed.
@@ -616,13 +616,13 @@
# # Read stats from one or more Elasticsearch servers or clusters
-{% if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
+{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
[[inputs.elasticsearch]]
# ## specify a list of one or more Elasticsearch servers
# # you can add username and password to your url to use basic authentication:
# # servers = ["http://user:pass@localhost:9200"]
- servers = ["http://{{ MASTER }}:9200"]
+ servers = ["http://{{ MANAGER }}:9200"]
{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
[[inputs.elasticsearch]]
servers = ["http://{{ NODEIP }}:9200"]
@@ -666,7 +666,7 @@
# # Read metrics from one or more commands that can output to stdout
# ## Commands array
-{% if grains['role'] in ['so-master', 'so-mastersearch'] %}
+{% if grains['role'] in ['so-manager', 'so-managersearch'] %}
[[inputs.exec]]
commands = [
"/scripts/redis.sh",
diff --git a/salt/telegraf/init.sls b/salt/telegraf/init.sls
index 9ae0903b9..782707a44 100644
--- a/salt/telegraf/init.sls
+++ b/salt/telegraf/init.sls
@@ -1,4 +1,4 @@
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
# Add Telegraf to monitor all the things.
@@ -36,7 +36,7 @@ tgrafconf:
so-telegraf:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-telegraf:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-telegraf:{{ VERSION }}
- environment:
- HOST_PROC=/host/proc
- HOST_ETC=/host/etc
@@ -53,7 +53,7 @@ so-telegraf:
- /proc:/host/proc:ro
- /nsm:/host/nsm:ro
- /etc:/host/etc:ro
- {% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-mastersearch' %}
+ {% if grains['role'] == 'so-manager' or grains['role'] == 'so-eval' or grains['role'] == 'so-managersearch' %}
- /etc/pki/ca.crt:/etc/telegraf/ca.crt:ro
{% else %}
- /etc/ssl/certs/intca.crt:/etc/telegraf/ca.crt:ro
diff --git a/salt/thehive/etc/application.conf b/salt/thehive/etc/application.conf
index 8630cb386..f06c3f7c6 100644
--- a/salt/thehive/etc/application.conf
+++ b/salt/thehive/etc/application.conf
@@ -1,4 +1,4 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
# Secret Key
@@ -6,7 +6,7 @@
# WARNING: If you deploy your application on several servers, make sure to use the same key.
play.http.secret.key="letsdewdis"
play.http.context=/thehive/
-search.uri = "http://{{ MASTERIP }}:9400"
+search.uri = "http://{{ MANAGERIP }}:9400"
# Elasticsearch
search {
# Name of the index
@@ -14,8 +14,8 @@ search {
# Name of the Elasticsearch cluster
cluster = thehive
# Address of the Elasticsearch instance
- host = ["{{ MASTERIP }}:9500"]
- #search.uri = "http://{{ MASTERIP }}:9500"
+ host = ["{{ MANAGERIP }}:9500"]
+ #search.uri = "http://{{ MANAGERIP }}:9500"
# Scroll keepalive
keepalive = 1m
# Size of the page for scroll
@@ -135,7 +135,7 @@ play.modules.enabled += connectors.cortex.CortexConnector
cortex {
"CORTEX-SERVER-ID" {
- url = "http://{{ MASTERIP }}:9001/cortex/"
+ url = "http://{{ MANAGERIP }}:9001/cortex/"
key = "{{ CORTEXKEY }}"
# # HTTP client configuration (SSL and proxy)
# ws {}
@@ -210,9 +210,9 @@ misp {
}
webhooks {
NodeRedWebHook {
- url = "http://{{ MASTERIP }}:1880/thehive"
+ url = "http://{{ MANAGERIP }}:1880/thehive"
}
#SOCtopusWebHook {
- # url = "http://{{ MASTERIP }}:7000/enrich"
+ # url = "http://{{ MANAGERIP }}:7000/enrich"
#}
}
diff --git a/salt/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf
index 28fbe6791..b9cbe20cc 100644
--- a/salt/thehive/etc/cortex-application.conf
+++ b/salt/thehive/etc/cortex-application.conf
@@ -1,11 +1,11 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
# Secret Key
# The secret key is used to secure cryptographic functions.
# WARNING: If you deploy your application on several servers, make sure to use the same key.
play.http.secret.key="letsdewdis"
play.http.context=/cortex/
-search.uri = "http://{{ MASTERIP }}:9400"
+search.uri = "http://{{ MANAGERIP }}:9400"
# Elasticsearch
search {
@@ -14,7 +14,7 @@ search {
# Name of the Elasticsearch cluster
cluster = thehive
# Address of the Elasticsearch instance
- host = ["{{ MASTERIP }}:9500"]
+ host = ["{{ MANAGERIP }}:9500"]
# Scroll keepalive
keepalive = 1m
# Size of the page for scroll
diff --git a/salt/thehive/init.sls b/salt/thehive/init.sls
index da07247c4..a1b9b50f1 100644
--- a/salt/thehive/init.sls
+++ b/salt/thehive/init.sls
@@ -1,6 +1,6 @@
-{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
+{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
thehiveconfdir:
file.directory:
- name: /opt/so/conf/thehive/etc
@@ -71,7 +71,7 @@ thehiveesdata:
so-thehive-es:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-thehive-es:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-thehive-es:{{ VERSION }}
- hostname: so-thehive-es
- name: so-thehive-es
- user: 939
@@ -99,7 +99,7 @@ so-thehive-es:
# Install Cortex
so-cortex:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-thehive-cortex:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-thehive-cortex:{{ VERSION }}
- hostname: so-cortex
- name: so-cortex
- user: 939
@@ -118,9 +118,9 @@ cortexscript:
so-thehive:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-thehive:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-thehive:{{ VERSION }}
- environment:
- - ELASTICSEARCH_HOST={{ MASTERIP }}
+ - ELASTICSEARCH_HOST={{ MANAGERIP }}
- hostname: so-thehive
- name: so-thehive
- user: 939
diff --git a/salt/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init
index 063ae498d..1d0fe29f0 100644
--- a/salt/thehive/scripts/cortex_init
+++ b/salt/thehive/scripts/cortex_init
@@ -1,5 +1,5 @@
#!/bin/bash
-{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{% set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{%- set CORTEXUSER = salt['pillar.get']('static:cortexuser', '') %}
{%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %}
@@ -11,7 +11,7 @@ default_salt_dir=/opt/so/saltstack/default
cortex_init(){
sleep 60
- CORTEX_IP="{{MASTERIP}}"
+ CORTEX_IP="{{MANAGERIP}}"
CORTEX_USER="{{CORTEXUSER}}"
CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
CORTEX_KEY="{{CORTEXKEY}}"
@@ -54,7 +54,7 @@ if [ -f /opt/so/state/cortex.txt ]; then
exit 0
else
rm -f garbage_file
- while ! wget -O garbage_file {{MASTERIP}}:9500 2>/dev/null
+ while ! wget -O garbage_file {{MANAGERIP}}:9500 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init
index 296004e77..69ff72fa7 100755
--- a/salt/thehive/scripts/hive_init
+++ b/salt/thehive/scripts/hive_init
@@ -1,12 +1,12 @@
#!/bin/bash
-{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{% set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
{%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %}
{%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
thehive_init(){
sleep 120
- THEHIVE_IP="{{MASTERIP}}"
+ THEHIVE_IP="{{MANAGERIP}}"
THEHIVE_USER="{{THEHIVEUSER}}"
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
THEHIVE_KEY="{{THEHIVEKEY}}"
@@ -52,7 +52,7 @@ if [ -f /opt/so/state/thehive.txt ]; then
exit 0
else
rm -f garbage_file
- while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
+ while ! wget -O garbage_file {{MANAGERIP}}:9400 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
diff --git a/salt/top.sls b/salt/top.sls
index fbf9e32ef..a04e75657 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -1,10 +1,10 @@
{%- set BROVER = salt['pillar.get']('static:broversion', '') -%}
{%- set WAZUH = salt['pillar.get']('static:wazuh', '0') -%}
-{%- set THEHIVE = salt['pillar.get']('master:thehive', '0') -%}
-{%- set PLAYBOOK = salt['pillar.get']('master:playbook', '0') -%}
-{%- set FREQSERVER = salt['pillar.get']('master:freq', '0') -%}
-{%- set DOMAINSTATS = salt['pillar.get']('master:domainstats', '0') -%}
-{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
+{%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') -%}
+{%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') -%}
+{%- set FREQSERVER = salt['pillar.get']('manager:freq', '0') -%}
+{%- set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') -%}
+{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set STRELKA = salt['pillar.get']('strelka:enabled', '0') -%}
@@ -30,7 +30,7 @@ base:
- telegraf
- firewall
- idstools
- - suricata.master
+ - suricata.manager
- pcap
- suricata
- zeek
@@ -56,7 +56,7 @@ base:
- strelka
{%- endif %}
- filebeat
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
@@ -65,7 +65,7 @@ base:
- ca
- ssl
- registry
- - master
+ - manager
- common
- nginx
- telegraf
@@ -74,9 +74,9 @@ base:
- soc
- firewall
- idstools
- - suricata.master
+ - suricata.manager
- healthcheck
- {%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
+ {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
{%- if WAZUH != 0 %}
@@ -95,7 +95,7 @@ base:
- filebeat
- curator
- elastalert
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet
- redis
- fleet.install_package
@@ -117,7 +117,7 @@ base:
{%- endif %}
- '*_master':
+ '*_manager':
- ca
- ssl
- registry
@@ -128,11 +128,11 @@ base:
- grafana
- soc
- firewall
- - master
+ - manager
- idstools
- - suricata.master
+ - suricata.manager
- redis
- {%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
+ {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
{%- if WAZUH != 0 %}
@@ -145,7 +145,7 @@ base:
- filebeat
- utility
- schedule
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet
- fleet.install_package
{%- endif %}
@@ -167,7 +167,7 @@ base:
- ca
- ssl
- registry
- - master
+ - manager
- common
- nginx
- telegraf
@@ -176,10 +176,10 @@ base:
- soc
- firewall
- idstools
- - suricata.master
+ - suricata.manager
- healthcheck
- redis
- {%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
+ {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
{%- if WAZUH != 0 %}
@@ -199,7 +199,7 @@ base:
- filebeat
- curator
- elastalert
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet
- redis
- fleet.install_package
@@ -227,7 +227,7 @@ base:
- common
- firewall
- logstash
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
@@ -239,7 +239,7 @@ base:
- logstash
- elasticsearch
- curator
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
@@ -249,7 +249,7 @@ base:
- common
- firewall
- elasticsearch
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
@@ -268,12 +268,12 @@ base:
- elasticsearch
- curator
- filebeat
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
- '*_mastersensor':
+ '*_managersensor':
- common
- nginx
- telegraf
@@ -281,13 +281,13 @@ base:
- grafana
- firewall
- sensor
- - master
- {%- if FLEETMASTER or FLEETNODE %}
+ - manager
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
- '*_mastersearch':
+ '*_managersearch':
- ca
- ssl
- registry
@@ -298,11 +298,11 @@ base:
- grafana
- soc
- firewall
- - master
+ - manager
- idstools
- - suricata.master
+ - suricata.manager
- redis
- {%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
+ {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
{%- if WAZUH != 0 %}
@@ -316,7 +316,7 @@ base:
- filebeat
- utility
- schedule
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet
- fleet.install_package
{%- endif %}
@@ -348,7 +348,7 @@ base:
- elasticsearch
- curator
- filebeat
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- pcap
diff --git a/salt/utility/bin/crossthestreams b/salt/utility/bin/crossthestreams
index c8768230e..31cbdfceb 100644
--- a/salt/utility/bin/crossthestreams
+++ b/salt/utility/bin/crossthestreams
@@ -1,6 +1,6 @@
#!/bin/bash
-{% set ES = salt['pillar.get']('master:mainip', '') %}
-{%- set MASTER = grains['master'] %}
+{% set ES = salt['pillar.get']('manager:mainip', '') %}
+{%- set MANAGER = grains['manager'] %}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
@@ -29,7 +29,7 @@ fi
echo "Applying cross cluster search config..."
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \
- -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MASTER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
+ -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
# Add all the search nodes to cross cluster searching.
diff --git a/salt/utility/bin/eval b/salt/utility/bin/eval
index 7ff0ef886..87692e40f 100644
--- a/salt/utility/bin/eval
+++ b/salt/utility/bin/eval
@@ -1,5 +1,5 @@
#!/bin/bash
-{% set ES = salt['pillar.get']('master:mainip', '') %}
+{% set ES = salt['pillar.get']('manager:mainip', '') %}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
diff --git a/salt/utility/init.sls b/salt/utility/init.sls
index 87cfe8e87..00899f69a 100644
--- a/salt/utility/init.sls
+++ b/salt/utility/init.sls
@@ -1,5 +1,5 @@
# This state is for checking things
-{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-standalone'] %}
+{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %}
# Make sure Cross Cluster is good. Will need some logic once we have hot/warm
crossclusterson:
cmd.script:
diff --git a/salt/wazuh/files/agent/ossec.conf b/salt/wazuh/files/agent/ossec.conf
index ffc7922b0..37971aa93 100644
--- a/salt/wazuh/files/agent/ossec.conf
+++ b/salt/wazuh/files/agent/ossec.conf
@@ -1,5 +1,5 @@
-{%- if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set ip = salt['pillar.get']('static:managerip', '') %}
{%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
{%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- elif grains['role'] == 'so-sensor' %}
diff --git a/salt/wazuh/files/agent/wazuh-register-agent b/salt/wazuh/files/agent/wazuh-register-agent
index b38474d8e..f2fd8693f 100755
--- a/salt/wazuh/files/agent/wazuh-register-agent
+++ b/salt/wazuh/files/agent/wazuh-register-agent
@@ -1,5 +1,5 @@
-{%- if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set ip = salt['pillar.get']('static:managerip', '') %}
{%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
{%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- elif grains['role'] == 'so-sensor' %}
diff --git a/salt/wazuh/files/wazuh-manager-whitelist b/salt/wazuh/files/wazuh-manager-whitelist
index 66dc13cd9..d39d68e36 100755
--- a/salt/wazuh/files/wazuh-manager-whitelist
+++ b/salt/wazuh/files/wazuh-manager-whitelist
@@ -1,4 +1,4 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{%- set WAZUH_ENABLED = salt['pillar.get']('static:wazuh', '0') %}
#!/bin/bash
local_salt_dir=/opt/so/saltstack/local
@@ -21,12 +21,12 @@ local_salt_dir=/opt/so/saltstack/local
# Check if Wazuh enabled
if [ {{ WAZUH_ENABLED }} ]; then
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
- if ! grep -q "{{ MASTERIP }}" $WAZUH_MGR_CFG ; then
+ if ! grep -q "{{ MANAGERIP }}" $WAZUH_MGR_CFG ; then
DATE=`date`
sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG
sed -i '/^$/N;/^\n$/D' $WAZUH_MGR_CFG
- echo -e "\n \n {{ MASTERIP }}\n \n" >> $WAZUH_MGR_CFG
- echo "Added whitelist entry for {{ MASTERIP }} in $WAZUH_MGR_CFG."
+ echo -e "\n \n {{ MANAGERIP }}\n \n" >> $WAZUH_MGR_CFG
+ echo "Added whitelist entry for {{ MANAGERIP }} in $WAZUH_MGR_CFG."
echo
fi
fi
diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls
index 3e0969359..6e8c757f1 100644
--- a/salt/wazuh/init.sls
+++ b/salt/wazuh/init.sls
@@ -1,6 +1,6 @@
{%- set HOSTNAME = salt['grains.get']('host', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
# Add ossec group
ossecgroup:
group.present:
@@ -83,7 +83,7 @@ wazuhmgrwhitelist:
so-wazuh:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-wazuh:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-wazuh:{{ VERSION }}
- hostname: {{HOSTNAME}}-wazuh-manager
- name: so-wazuh
- detach: True
diff --git a/salt/yum/etc/yum.conf.jinja b/salt/yum/etc/yum.conf.jinja
index a370bbf4f..81f981c1d 100644
--- a/salt/yum/etc/yum.conf.jinja
+++ b/salt/yum/etc/yum.conf.jinja
@@ -11,6 +11,6 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }}
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release
-{% if salt['pillar.get']('static:masterupdate', '0') %}
-proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
+{% if salt['pillar.get']('static:managerupdate', '0') %}
+proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('manager')) }}:3142
{% endif %}
\ No newline at end of file
diff --git a/salt/zeek/init.sls b/salt/zeek/init.sls
index 246b43c90..af8b2b4d6 100644
--- a/salt/zeek/init.sls
+++ b/salt/zeek/init.sls
@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('manager') %}
{% set BPF_ZEEK = salt['pillar.get']('zeek:bpf', {}) %}
{% set BPF_STATUS = 0 %}
{% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %}
@@ -156,7 +156,7 @@ localzeeksync:
so-zeek:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-zeek:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-zeek:{{ VERSION }}
- privileged: True
- binds:
- /nsm/zeek/logs:/nsm/zeek/logs:rw
diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults
index ac0033f83..156697a28 100644
--- a/setup/automation/pm_standalone_defaults
+++ b/setup/automation/pm_standalone_defaults
@@ -32,7 +32,7 @@ BROVERSION=ZEEK
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
-HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
+HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
@@ -40,8 +40,8 @@ install_type=STANDALONE
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
-MASTERADV=BASIC
-MASTERUPDATES=1
+MANAGERADV=BASIC
+MANAGERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=
@@ -55,7 +55,7 @@ NIDS=Suricata
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
-NODEUPDATES=MASTER
+NODEUPDATES=MANAGER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
diff --git a/setup/so-functions b/setup/so-functions
index 5bbb319eb..7741b4181 100755
--- a/setup/so-functions
+++ b/setup/so-functions
@@ -24,7 +24,7 @@ SOVERSION=$(cat ../VERSION)
accept_salt_key_remote() {
systemctl restart salt-minion
- echo "Accept the key remotely on the master" >> "$setup_log" 2>&1
+ echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1
# Delete the key just in case.
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y
salt-call state.apply ca
@@ -43,11 +43,11 @@ add_admin_user() {
}
-add_master_hostfile() {
+add_manager_hostfile() {
[ -n "$TESTING" ] && return
- echo "Checking if I can resolve master. If not add to hosts file" >> "$setup_log" 2>&1
+ echo "Checking if I can resolve manager. If not add to hosts file" >> "$setup_log" 2>&1
# Pop up an input to get the IP address
MSRVIP=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your Master Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
@@ -60,7 +60,7 @@ addtotab_generate_templates() {
local addtotab_path=$local_salt_dir/pillar/data
- for i in evaltab mastersearchtab mastertab nodestab sensorstab standalonetab; do
+ for i in evaltab managersearchtab managertab nodestab sensorstab standalonetab; do
printf '%s\n'\
"$i:"\
"" > "$addtotab_path"/$i.sls
@@ -87,11 +87,11 @@ so_add_user() {
fi
}
-add_socore_user_master() {
+add_socore_user_manager() {
so_add_user "socore" "939" "939" "/opt/so" >> "$setup_log" 2>&1
}
-add_soremote_user_master() {
+add_soremote_user_manager() {
so_add_user "soremote" "947" "947" "/home/soremote" "$SOREMOTEPASS1" >> "$setup_log" 2>&1
}
@@ -152,7 +152,7 @@ bro_logs_enabled() {
"brologs:"\
" enabled:" > "$brologs_pillar"
- if [ "$MASTERADV" = 'ADVANCED' ]; then
+ if [ "$MANAGERADV" = 'ADVANCED' ]; then
for BLOG in "${BLOGS[@]}"; do
echo " - $BLOG" | tr -d '"' >> "$brologs_pillar"
done
@@ -265,12 +265,12 @@ check_web_pass() {
check_pass_match "$WEBPASSWD1" "$WEBPASSWD2" "WPMATCH"
}
-clear_master() {
- # Clear out the old master public key in case this is a re-install.
- # This only happens if you re-install the master.
+clear_manager() {
+ # Clear out the old manager public key in case this is a re-install.
+ # This only happens if you re-install the manager.
if [ -f /etc/salt/pki/minion/minion_master.pub ]; then
{
- echo "Clearing old master key";
+ echo "Clearing old Salt master key";
rm -f /etc/salt/pki/minion/minion_master.pub;
systemctl -q restart salt-minion;
} >> "$setup_log" 2>&1
@@ -360,7 +360,7 @@ configure_minion() {
'helix')
echo "master: $HOSTNAME" >> "$minion_config"
;;
- 'master' | 'eval' | 'mastersearch' | 'standalone')
+ 'manager' | 'eval' | 'managersearch' | 'standalone')
printf '%s\n'\
"master: $HOSTNAME"\
"mysql.host: '$MAINIP'"\
@@ -437,9 +437,9 @@ check_requirements() {
fi
}
-copy_master_config() {
+copy_salt_master_config() {
- # Copy the master config template to the proper directory
+ # Copy the Salt master config template to the proper directory
if [ "$setup_type" = 'iso' ]; then
cp /root/SecurityOnion/files/master /etc/salt/master >> "$setup_log" 2>&1
else
@@ -452,7 +452,7 @@ copy_master_config() {
copy_minion_tmp_files() {
case "$install_type" in
- 'MASTER' | 'EVAL' | 'HELIXSENSOR' | 'MASTERSEARCH' | 'STANDALONE')
+ 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE')
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
if [ -d "$temp_install_dir"/salt ] ; then
@@ -461,12 +461,12 @@ copy_minion_tmp_files() {
;;
*)
{
- echo "scp pillar and salt files in $temp_install_dir to master $local_salt_dir";
+ echo "scp pillar and salt files in $temp_install_dir to manager $local_salt_dir";
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/salt/patch/os/schedules/* soremote@"$MSRV":/tmp/"$MINION_ID"/schedules;
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/master/files/add_minion.sh "$MINION_ID";
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/manager/files/add_minion.sh "$MINION_ID";
} >> "$setup_log" 2>&1
;;
esac
@@ -479,8 +479,8 @@ copy_ssh_key() {
mkdir -p /root/.ssh
ssh-keygen -f /root/.ssh/so.key -t rsa -q -N "" < /dev/zero
chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh
- echo "Copying the SSH key to the master"
- #Copy the key over to the master
+ echo "Copying the SSH key to the manager"
+ #Copy the key over to the manager
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
}
@@ -703,7 +703,7 @@ docker_install() {
else
case "$install_type" in
- 'MASTER' | 'EVAL')
+ 'MANAGER' | 'EVAL')
apt-get update >> "$setup_log" 2>&1
;;
*)
@@ -733,7 +733,7 @@ docker_registry() {
echo "Setting up Docker Registry" >> "$setup_log" 2>&1
mkdir -p /etc/docker >> "$setup_log" 2>&1
- # Make the host use the master docker registry
+ # Make the host use the manager docker registry
if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi
printf '%s\n'\
"{"\
@@ -832,7 +832,7 @@ firewall_generate_templates() {
cp ../files/firewall/* /opt/so/saltstack/local/salt/firewall/ >> "$setup_log" 2>&1
- for i in analyst beats_endpoint sensor master minion osquery_endpoint search_node wazuh_endpoint; do
+ for i in analyst beats_endpoint sensor manager minion osquery_endpoint search_node wazuh_endpoint; do
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost "$i" 127.0.0.1
done
@@ -846,7 +846,7 @@ fleet_pillar() {
printf '%s\n'\
"fleet:"\
" mainip: $MAINIP"\
- " master: $MSRV"\
+ " manager: $MSRV"\
"" > "$pillar_file"
}
@@ -883,7 +883,7 @@ got_root() {
get_minion_type() {
local minion_type
case "$install_type" in
- 'EVAL' | 'MASTERSEARCH' | 'MASTER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE')
+ 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE')
minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]')
;;
'HELIXSENSOR')
@@ -916,13 +916,13 @@ install_cleanup() {
}
-master_pillar() {
+manager_pillar() {
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
- # Create the master pillar
+ # Create the manager pillar
printf '%s\n'\
- "master:"\
+ "manager:"\
" mainip: $MAINIP"\
" mainint: $MNIC"\
" esheap: $ES_HEAP_SIZE"\
@@ -931,7 +931,7 @@ master_pillar() {
" domainstats: 0" >> "$pillar_file"
- if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'HELIXSENSOR' ] || [ "$install_type" = 'MASTERSEARCH' ] || [ "$install_type" = 'STANDALONE' ]; then
+ if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'HELIXSENSOR' ] || [ "$install_type" = 'MANAGERSEARCH' ] || [ "$install_type" = 'STANDALONE' ]; then
printf '%s\n'\
" mtu: $MTU" >> "$pillar_file"
fi
@@ -978,19 +978,19 @@ master_pillar() {
cat "$pillar_file" >> "$setup_log" 2>&1
}
-master_static() {
+manager_static() {
local static_pillar="$local_salt_dir/pillar/static.sls"
# Create a static file for global values
printf '%s\n'\
"static:"\
" soversion: $SOVERSION"\
- " hnmaster: $HNMASTER"\
+ " hnmanager: $HNMANAGER"\
" ntpserver: $NTPSERVER"\
" proxy: $PROXY"\
" broversion: $BROVERSION"\
" ids: $NIDS"\
- " masterip: $MAINIP"\
+ " managerip: $MAINIP"\
" hiveuser: hiveadmin"\
" hivepassword: hivechangeme"\
" hivekey: $HIVEKEY"\
@@ -1001,7 +1001,7 @@ master_static() {
" cortexorguser: soadmin"\
" cortexorguserkey: $CORTEXORGUSERKEY"\
" fleet_custom_hostname: "\
- " fleet_master: False"\
+ " fleet_manager: False"\
" fleet_node: False"\
" fleet_packages-timestamp: N/A"\
" fleet_packages-version: 1"\
@@ -1009,7 +1009,7 @@ master_static() {
" fleet_ip: N/A"\
" sensoronikey: $SENSORONIKEY"\
" wazuh: $WAZUH"\
- " masterupdate: $MASTERUPDATES"\
+ " managerupdate: $MANAGERUPDATES"\
"strelka:"\
" enabled: $STRELKA"\
" rules: $STRELKARULES"\
@@ -1074,7 +1074,7 @@ elasticsearch_pillar() {
" replicas: 0"\
"" >> "$pillar_file"
- if [ "$install_type" != 'EVAL' ] && [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'MASTERSEARCH' ] && [ "$install_type" != 'STANDALONE' ]; then
+ if [ "$install_type" != 'EVAL' ] && [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'MANAGERSEARCH' ] && [ "$install_type" != 'STANDALONE' ]; then
printf '%s\n'\
"logstash_settings:"\
" ls_pipeline_batch_size: $LSPIPELINEBATCH"\
@@ -1162,11 +1162,11 @@ saltify() {
set_progress_str 6 'Installing various dependencies'
yum -y install wget nmap-ncat >> "$setup_log" 2>&1
case "$install_type" in
- 'MASTER' | 'EVAL' | 'MASTERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE')
+ 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE')
reserve_group_ids >> "$setup_log" 2>&1
yum -y install epel-release >> "$setup_log" 2>&1
yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1
- # Download Ubuntu Keys in case master updates = 1
+ # Download Ubuntu Keys in case manager updates = 1
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
@@ -1177,7 +1177,7 @@ saltify() {
systemctl enable salt-master >> "$setup_log" 2>&1
;;
*)
- if [ "$MASTERUPDATES" = '1' ]; then
+ if [ "$MANAGERUPDATES" = '1' ]; then
{
# Create the GPG Public Key for the Salt Repo
cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key;
@@ -1233,7 +1233,7 @@ saltify() {
'FLEET')
if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi
;;
- 'MASTER' | 'EVAL' | 'MASTERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR?
+ 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR?
if [ "$OSVER" != "xenial" ]; then local py_ver_url_path="/py3"; else local py_ver_url_path="/apt"; fi
# Add saltstack repo(s)
@@ -1263,9 +1263,9 @@ saltify() {
apt-mark hold salt-master >> "$setup_log" 2>&1
;;
*)
- # Copy down the gpg keys and install them from the master
+ # Copy down the gpg keys and install them from the manager
mkdir "$temp_install_dir"/gpg >> "$setup_log" 2>&1
- echo "scp the gpg keys and install them from the master" >> "$setup_log" 2>&1
+ echo "scp the gpg keys and install them from the manager" >> "$setup_log" 2>&1
scp -v -i /root/.ssh/so.key soremote@"$MSRV":/opt/so/gpg/* "$temp_install_dir"/gpg >> "$setup_log" 2>&1
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
@@ -1291,7 +1291,7 @@ saltify() {
salt_checkin() {
case "$install_type" in
- 'MASTER' | 'EVAL' | 'HELIXSENSOR' | 'MASTERSEARCH' | 'STANDALONE') # Fix Mine usage
+ 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE') # Fix Mine usage
{
echo "Building Certificate Authority";
salt-call state.apply ca;
@@ -1359,7 +1359,7 @@ setup_salt_master_dirs() {
cp -R ../salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1
fi
- echo "Chown the salt dirs on the master for socore" >> "$setup_log" 2>&1
+ echo "Chown the salt dirs on the manager for socore" >> "$setup_log" 2>&1
chown -R socore:socore /opt/so
}
@@ -1414,7 +1414,7 @@ sensor_pillar() {
" brobpf:"\
" pcapbpf:"\
" nidsbpf:"\
- " master: $MSRV"\
+ " manager: $MSRV"\
" mtu: $MTU"\
" uniqueid: $(date '+%s')" >> "$pillar_file"
if [ "$HNSENSOR" != 'inherit' ]; then
@@ -1460,7 +1460,7 @@ set_hostname() {
set_hostname_iso
- if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
+ if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then
if ! getent hosts "$MSRV"; then
echo "$MSRVIP $MSRV" >> /etc/hosts
fi
@@ -1487,13 +1487,13 @@ set_initial_firewall_policy() {
if [ -f $default_salt_dir/salt/common/tools/sbin/so-firewall ]; then chmod +x $default_salt_dir/salt/common/tools/sbin/so-firewall; fi
case "$install_type" in
- 'MASTER')
- $default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
+ 'MANAGER')
+ $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost minion "$MAINIP"
- $default_salt_dir/pillar/data/addtotab.sh mastertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
+ $default_salt_dir/pillar/data/addtotab.sh managertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
- 'EVAL' | 'MASTERSEARCH' | 'STANDALONE')
- $default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
+ 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE')
+ $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
@@ -1501,8 +1501,8 @@ set_initial_firewall_policy() {
'EVAL')
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE True
;;
- 'MASTERSEARCH')
- $default_salt_dir/pillar/data/addtotab.sh mastersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
+ 'MANAGERSEARCH')
+ $default_salt_dir/pillar/data/addtotab.sh managersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'STANDALONE')
$default_salt_dir/pillar/data/addtotab.sh standalonetab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE
@@ -1510,7 +1510,7 @@ set_initial_firewall_policy() {
esac
;;
'HELIXSENSOR')
- $default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
+ $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
;;
@@ -1569,7 +1569,7 @@ set_management_interface() {
set_node_type() {
case "$install_type" in
- 'SEARCHNODE' | 'EVAL' | 'MASTERSEARCH' | 'HEAVYNODE' | 'STANDALONE')
+ 'SEARCHNODE' | 'EVAL' | 'MANAGERSEARCH' | 'HEAVYNODE' | 'STANDALONE')
NODETYPE='search'
;;
'HOTNODE')
@@ -1582,13 +1582,13 @@ set_node_type() {
}
set_updates() {
- if [ "$MASTERUPDATES" = '1' ]; then
+ if [ "$MANAGERUPDATES" = '1' ]; then
if [ "$OS" = 'centos' ]; then
if ! grep -q "$MSRV" /etc/yum.conf; then
echo "proxy=http://$MSRV:3142" >> /etc/yum.conf
fi
else
- # Set it up so the updates roll through the master
+ # Set it up so the updates roll through the manager
printf '%s\n'\
"Acquire::http::Proxy \"http://$MSRV:3142\";"\
"Acquire::https::Proxy \"http://$MSRV:3142\";" > /etc/apt/apt.conf.d/00Proxy
@@ -1609,7 +1609,7 @@ update_sudoers() {
echo "soremote ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/common/tools/sbin/so-firewall" | tee -a /etc/sudoers
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/data/addtotab.sh" | tee -a /etc/sudoers
- echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/master/files/add_minion.sh" | tee -a /etc/sudoers
+ echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/manager/files/add_minion.sh" | tee -a /etc/sudoers
else
echo "User soremote already granted sudo privileges" >> "$setup_log" 2>&1
fi
@@ -1625,7 +1625,7 @@ update_packages() {
}
use_turbo_proxy() {
- if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
+ if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then
echo "turbo is not supported on this install type" >> $setup_log 2>&1
return
fi
@@ -1649,7 +1649,7 @@ ls_heapsize() {
fi
case "$install_type" in
- 'MASTERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
+ 'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
LS_HEAP_SIZE='1000m'
;;
'EVAL')
@@ -1661,7 +1661,7 @@ ls_heapsize() {
esac
export LS_HEAP_SIZE
- if [[ "$install_type" =~ ^(EVAL|MASTERSEARCH|STANDALONE)$ ]]; then
+ if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
NODE_LS_HEAP_SIZE=LS_HEAP_SIZE
export NODE_LS_HEAP_SIZE
fi
@@ -1683,7 +1683,7 @@ es_heapsize() {
fi
export ES_HEAP_SIZE
- if [[ "$install_type" =~ ^(EVAL|MASTERSEARCH|STANDALONE)$ ]]; then
+ if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
NODE_ES_HEAP_SIZE=ES_HEAP_SIZE
export NODE_ES_HEAP_SIZE
fi
diff --git a/setup/so-setup b/setup/so-setup
index 3f6d42380..8844e87ed 100755
--- a/setup/so-setup
+++ b/setup/so-setup
@@ -129,21 +129,21 @@ whiptail_install_type
if [ "$install_type" = 'EVAL' ]; then
is_node=true
- is_master=true
+ is_manager=true
is_sensor=true
is_eval=true
elif [ "$install_type" = 'STANDALONE' ]; then
- is_master=true
- is_distmaster=true
+ is_manager=true
+ is_distmanager=true
is_node=true
is_sensor=true
-elif [ "$install_type" = 'MASTERSEARCH' ]; then
- is_master=true
- is_distmaster=true
+elif [ "$install_type" = 'MANAGERSEARCH' ]; then
+ is_manager=true
+ is_distmanager=true
is_node=true
-elif [ "$install_type" = 'MASTER' ]; then
- is_master=true
- is_distmaster=true
+elif [ "$install_type" = 'MANAGER' ]; then
+ is_manager=true
+ is_distmanager=true
elif [ "$install_type" = 'SENSOR' ]; then
is_sensor=true
is_minion=true
@@ -169,7 +169,7 @@ elif [[ $is_fleet_standalone ]]; then
check_requirements "dist" "fleet"
elif [[ $is_sensor && ! $is_eval ]]; then
check_requirements "dist" "sensor"
-elif [[ $is_distmaster || $is_minion ]]; then
+elif [[ $is_distmanager || $is_minion ]]; then
check_requirements "dist"
fi
@@ -214,15 +214,15 @@ if [[ $is_helix ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
HNSENSOR=inherit
- MASTERUPDATES=0
+ MANAGERUPDATES=0
fi
-if [[ $is_helix || ( $is_master && $is_node ) ]]; then
+if [[ $is_helix || ( $is_manager && $is_node ) ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
fi
-if [[ $is_master && $is_node ]]; then
+if [[ $is_manager && $is_node ]]; then
LSPIPELINEWORKERS=1
LSPIPELINEBATCH=125
LSINPUTTHREADS=1
@@ -241,16 +241,16 @@ if [[ $is_helix || $is_sensor ]]; then
calculate_useable_cores
fi
-if [[ $is_helix || $is_master ]]; then
- whiptail_homenet_master
+if [[ $is_helix || $is_manager ]]; then
+ whiptail_homenet_manager
fi
-if [[ $is_helix || $is_master || $is_node ]]; then
+if [[ $is_helix || $is_manager || $is_node ]]; then
set_base_heapsizes
fi
-if [[ $is_master && ! $is_eval ]]; then
- whiptail_master_adv
+if [[ $is_manager && ! $is_eval ]]; then
+ whiptail_manager_adv
whiptail_bro_version
whiptail_nids
whiptail_rule_setup
@@ -259,12 +259,12 @@ if [[ $is_master && ! $is_eval ]]; then
whiptail_oinkcode
fi
- if [ "$MASTERADV" = 'ADVANCED' ] && [ "$BROVERSION" != 'SURICATA' ]; then
- whiptail_master_adv_service_brologs
+ if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$BROVERSION" != 'SURICATA' ]; then
+ whiptail_manager_adv_service_brologs
fi
fi
-if [[ $is_master ]]; then
+if [[ $is_manager ]]; then
whiptail_components_adv_warning
whiptail_enable_components
if [[ $STRELKA == 1 ]]; then
@@ -274,10 +274,10 @@ if [[ $is_master ]]; then
get_redirect
fi
-if [[ $is_distmaster || ( $is_sensor || $is_node || $is_fleet_standalone ) && ! $is_eval ]]; then
- whiptail_master_updates
- if [[ $setup_type == 'network' && $MASTERUPDATES == 1 ]]; then
- whiptail_master_updates_warning
+if [[ $is_distmanager || ( $is_sensor || $is_node || $is_fleet_standalone ) && ! $is_eval ]]; then
+ whiptail_manager_updates
+ if [[ $setup_type == 'network' && $MANAGERUPDATES == 1 ]]; then
+ whiptail_manager_updates_warning
fi
fi
@@ -285,7 +285,7 @@ if [[ $is_minion ]]; then
whiptail_management_server
fi
-if [[ $is_distmaster ]]; then
+if [[ $is_distmanager ]]; then
collect_soremote_inputs
fi
@@ -349,20 +349,20 @@ fi
{
set_hostname;
set_version;
- clear_master;
+ clear_manager;
} >> $setup_log 2>&1
-if [[ $is_master ]]; then
+if [[ $is_manager ]]; then
{
generate_passwords;
secrets_pillar;
- add_socore_user_master;
+ add_socore_user_manager;
} >> $setup_log 2>&1
fi
-if [[ $is_master && ! $is_eval ]]; then
- add_soremote_user_master >> $setup_log 2>&1
+if [[ $is_manager && ! $is_eval ]]; then
+ add_soremote_user_manager >> $setup_log 2>&1
fi
set_main_ip >> $setup_log 2>&1
@@ -408,12 +408,12 @@ fi
set_progress_str 9 'Initializing Salt minion'
configure_minion "$minion_type" >> $setup_log 2>&1
- if [[ $is_master || $is_helix ]]; then
+ if [[ $is_manager || $is_helix ]]; then
set_progress_str 10 'Configuring Salt master'
{
create_local_directories;
addtotab_generate_templates;
- copy_master_config;
+ copy_salt_master_config;
setup_salt_master_dirs;
firewall_generate_templates;
} >> $setup_log 2>&1
@@ -421,11 +421,11 @@ fi
set_progress_str 11 'Updating sudoers file for soremote user'
update_sudoers >> $setup_log 2>&1
- set_progress_str 12 'Generating master static pillar'
- master_static >> $setup_log 2>&1
+ set_progress_str 12 'Generating manager static pillar'
+ manager_static >> $setup_log 2>&1
- set_progress_str 13 'Generating master pillar'
- master_pillar >> $setup_log 2>&1
+ set_progress_str 13 'Generating manager pillar'
+ manager_pillar >> $setup_log 2>&1
fi
@@ -446,22 +446,22 @@ fi
fi
if [[ $is_minion ]]; then
- set_progress_str 20 'Accepting Salt key on master'
+ set_progress_str 20 'Accepting Salt key on manager'
accept_salt_key_remote >> $setup_log 2>&1
fi
- if [[ $is_master ]]; then
+ if [[ $is_manager ]]; then
set_progress_str 20 'Accepting Salt key'
salt-key -ya "$MINION_ID" >> $setup_log 2>&1
fi
- set_progress_str 21 'Copying minion pillars to master'
+ set_progress_str 21 'Copying minion pillars to manager'
copy_minion_tmp_files >> $setup_log 2>&1
set_progress_str 22 'Generating CA and checking in'
salt_checkin >> $setup_log 2>&1
- if [[ $is_master || $is_helix ]]; then
+ if [[ $is_manager || $is_helix ]]; then
set_progress_str 25 'Configuring firewall'
set_initial_firewall_policy >> $setup_log 2>&1
@@ -474,14 +474,14 @@ fi
salt-call state.apply -l info registry >> $setup_log 2>&1
docker_seed_registry 2>> "$setup_log" # ~ 60% when finished
- set_progress_str 60 "$(print_salt_state_apply 'master')"
- salt-call state.apply -l info master >> $setup_log 2>&1
+ set_progress_str 60 "$(print_salt_state_apply 'manager')"
+ salt-call state.apply -l info manager >> $setup_log 2>&1
set_progress_str 61 "$(print_salt_state_apply 'idstools')"
salt-call state.apply -l info idstools >> $setup_log 2>&1
- set_progress_str 61 "$(print_salt_state_apply 'suricata.master')"
- salt-call state.apply -l info suricata.master >> $setup_log 2>&1
+ set_progress_str 61 "$(print_salt_state_apply 'suricata.manager')"
+ salt-call state.apply -l info suricata.manager >> $setup_log 2>&1
fi
@@ -499,7 +499,7 @@ fi
set_progress_str 64 "$(print_salt_state_apply 'nginx')"
salt-call state.apply -l info nginx >> $setup_log 2>&1
- if [[ $is_master || $is_node ]]; then
+ if [[ $is_manager || $is_node ]]; then
set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')"
salt-call state.apply -l info elasticsearch >> $setup_log 2>&1
fi
@@ -520,7 +520,7 @@ fi
salt-call state.apply -l info curator >> $setup_log 2>&1
fi
- if [[ $is_master ]]; then
+ if [[ $is_manager ]]; then
set_progress_str 69 "$(print_salt_state_apply 'soc')"
salt-call state.apply -l info soc >> $setup_log 2>&1
@@ -584,12 +584,12 @@ fi
fi
fi
- if [[ $is_master || $is_helix ]]; then
+ if [[ $is_manager || $is_helix ]]; then
set_progress_str 81 "$(print_salt_state_apply 'utility')"
salt-call state.apply -l info utility >> $setup_log 2>&1
fi
- if [[ ( $is_helix || $is_master || $is_node ) && ! $is_eval ]]; then
+ if [[ ( $is_helix || $is_manager || $is_node ) && ! $is_eval ]]; then
set_progress_str 82 "$(print_salt_state_apply 'logstash')"
salt-call state.apply -l info logstash >> $setup_log 2>&1
@@ -601,7 +601,7 @@ fi
filter_unused_nics >> $setup_log 2>&1
network_setup >> $setup_log 2>&1
- if [[ $is_master ]]; then
+ if [[ $is_manager ]]; then
set_progress_str 87 'Adding user to SOC'
add_web_user >> $setup_log 2>&1
fi
diff --git a/setup/so-whiptail b/setup/so-whiptail
index e165ba351..12fdecf99 100755
--- a/setup/so-whiptail
+++ b/setup/so-whiptail
@@ -405,23 +405,23 @@ whiptail_helix_apikey() {
}
-whiptail_homenet_master() {
+whiptail_homenet_manager() {
[ -n "$TESTING" ] && return
- HNMASTER=$(whiptail --title "Security Onion Setup" --inputbox \
+ HNMANAGER=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your HOME_NET separated by ," 10 75 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
- export HNMASTER
+ export HNMANAGER
}
whiptail_homenet_sensor() {
[ -n "$TESTING" ] && return
- # Ask to inherit from master
+ # Ask to inherit from manager
whiptail --title "Security Onion Setup" --yesno "Do you want to inherit the HOME_NET from the Master?" 8 75
local exitstatus=$?
@@ -459,10 +459,10 @@ whiptail_install_type() {
if [[ $install_type == "DISTRIBUTED" ]]; then
install_type=$(whiptail --title "Security Onion Setup" --radiolist \
"Choose distributed node type:" 13 60 6 \
- "MASTER" "Start a new grid " ON \
+ "MANAGER" "Start a new grid " ON \
"SENSOR" "Create a forward only sensor " OFF \
"SEARCHNODE" "Add a search node with parsing " OFF \
- "MASTERSEARCH" "Master + search node " OFF \
+ "MANAGERSEARCH" "Master + search node " OFF \
"FLEET" "Dedicated Fleet Osquery Node " OFF \
"HEAVYNODE" "Sensor + Search Node " OFF \
3>&1 1>&2 2>&3
@@ -606,20 +606,20 @@ whiptail_management_server() {
whiptail_check_exitstatus $exitstatus
if ! getent hosts "$MSRV"; then
- add_master_hostfile
+ add_manager_hostfile
fi
}
# Ask if you want to do advanced setup of the Master
-whiptail_master_adv() {
+whiptail_manager_adv() {
[ -n "$TESTING" ] && return
- MASTERADV=$(whiptail --title "Security Onion Setup" --radiolist \
- "Choose what type of master install:" 20 75 4 \
- "BASIC" "Install master with recommended settings" ON \
- "ADVANCED" "Do additional configuration to the master" OFF 3>&1 1>&2 2>&3 )
+ MANAGERADV=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose what type of manager install:" 20 75 4 \
+ "BASIC" "Install manager with recommended settings" ON \
+ "ADVANCED" "Do additional configuration to the manager" OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -627,7 +627,7 @@ whiptail_master_adv() {
}
# Ask which additional components to install
-whiptail_master_adv_service_brologs() {
+whiptail_manager_adv_service_brologs() {
[ -n "$TESTING" ] && return
@@ -792,7 +792,7 @@ whiptail_patch_name_new_schedule() {
[ -n "$TESTING" ] && return
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
- "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -800,7 +800,7 @@ whiptail_patch_name_new_schedule() {
while [[ -z "$PATCHSCHEDULENAME" ]]; do
whiptail --title "Security Onion Setup" --msgbox "Please enter a name for this OS patch schedule." 8 75
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
- "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
done
@@ -851,7 +851,7 @@ whiptail_patch_schedule_import() {
unset PATCHSCHEDULENAME
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -859,7 +859,7 @@ whiptail_patch_schedule_import() {
while [[ -z "$PATCHSCHEDULENAME" ]]; do
whiptail --title "Security Onion Setup" --msgbox "Please enter a name for the OS patch schedule you want to inherit." 8 75
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -945,7 +945,7 @@ whiptail_rule_setup() {
# Get pulled pork info
RULESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
- "Which IDS ruleset would you like to use?\n\nThis master server is responsible for downloading the IDS ruleset from the Internet.\n\nSensors then pull a copy of this ruleset from the master server.\n\nIf you select a commercial ruleset, it is your responsibility to purchase enough licenses for all of your sensors in compliance with your vendor's policies." 20 75 4 \
+ "Which IDS ruleset would you like to use?\n\nThis manager server is responsible for downloading the IDS ruleset from the Internet.\n\nSensors then pull a copy of this ruleset from the manager server.\n\nIf you select a commercial ruleset, it is your responsibility to purchase enough licenses for all of your sensors in compliance with your vendor's policies." 20 75 4 \
"ETOPEN" "Emerging Threats Open" ON \
"ETPRO" "Emerging Threats PRO" OFF \
"TALOSET" "Snort Subscriber (Talos) and ET NoGPL rulesets" OFF \
@@ -1098,34 +1098,34 @@ whiptail_suricata_pins() {
}
-whiptail_master_updates() {
+whiptail_manager_updates() {
[ -n "$TESTING" ] && return
local update_string
update_string=$(whiptail --title "Security Onion Setup" --radiolist \
"How would you like to download OS package updates for your grid?:" 20 75 4 \
- "MASTER" "Master node is proxy for updates." ON \
+ "MANAGER" "Master node is proxy for updates." ON \
"OPEN" "Each node connects to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
case "$update_string" in
- 'MASTER')
- MASTERUPDATES='1'
+ 'MANAGER')
+ MANAGERUPDATES='1'
;;
*)
- MASTERUPDATES='0'
+ MANAGERUPDATES='0'
;;
esac
}
-whiptail_master_updates_warning() {
+whiptail_manager_updates_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup"\
- --msgbox "Updating through the master node requires the master to have internet access, press ENTER to continue"\
+ --msgbox "Updating through the manager node requires the manager to have internet access, press ENTER to continue"\
8 75
local exitstatus=$?
@@ -1138,7 +1138,7 @@ whiptail_node_updates() {
NODEUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
"How would you like to download OS package updates for your grid?:" 20 75 4 \
- "MASTER" "Master node is proxy for updates." ON \
+ "MANAGER" "Master node is proxy for updates." ON \
"OPEN" "Each node connects to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
diff --git a/upgrade/so-update-functions b/upgrade/so-update-functions
index 8b7fcd312..a0a4b0288 100644
--- a/upgrade/so-update-functions
+++ b/upgrade/so-update-functions
@@ -32,7 +32,7 @@ fi
HOSTNAME=$(hostname)
# List all the containers
-if [ $MASTERCHECK != 'so-helix' ]; then
+if [ $MANAGERCHECK != 'so-helix' ]; then
TRUSTED_CONTAINERS=( \
"so-acng:$BUILD$UPDATEVERSION" \
"so-thehive-cortex:$BUILD$UPDATEVERSION" \
@@ -136,13 +136,13 @@ detect_os() {
}
-master_check() {
- # Check to see if this is a master
- MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
- if [ $MASTERCHECK == 'so-eval' OR $MASTERCHECK == 'so-master' OR $MASTERCHECK == 'so-mastersearch' ]; then
- echo "This is a master. We can proceed"
+manager_check() {
+ # Check to see if this is a manager
+ MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
+ if [ $MANAGERCHECK == 'so-eval' OR $MANAGERCHECK == 'so-manager' OR $MANAGERCHECK == 'so-managersearch' ]; then
+ echo "This is a manager. We can proceed"
else
- echo "Please run soup on the master. The master controls all updates."
+ echo "Please run soup on the manager. The manager controls all updates."
exit
}
diff --git a/upgrade/soup b/upgrade/soup
index 19fa0203f..068782f04 100644
--- a/upgrade/soup
+++ b/upgrade/soup
@@ -19,7 +19,7 @@ SCRIPTDIR=$(dirname "$0")
source $SCRIPTDIR/so-update-functions
# Update Packages
-master_check
+manager_check
update_all_packages
update_held_packages