diff --git a/README.md b/README.md
index b425ee490..1cff4b355 100644
--- a/README.md
+++ b/README.md
@@ -57,7 +57,7 @@
- Fixed an issue where geoip was not properly parsed.
- ATT&CK Navigator is now it's own state.
- Standlone mode is now supported.
-- Mastersearch previously used the same Grafana dashboard as a Search node. It now has its own dashboard that incorporates panels from the Master node and Search node dashboards.
+- Managersearch previously used the same Grafana dashboard as a Search node. It now has its own dashboard that incorporates panels from the Manager node and Search node dashboards.
### Known Issues:
diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml
index 6dff82823..5d9b662b6 100644
--- a/files/firewall/assigned_hostgroups.local.map.yaml
+++ b/files/firewall/assigned_hostgroups.local.map.yaml
@@ -13,8 +13,8 @@ role:
fleet:
heavynode:
helixsensor:
- master:
- mastersearch:
+ manager:
+ managersearch:
standalone:
searchnode:
sensor:
\ No newline at end of file
diff --git a/files/firewall/hostgroups.local.yaml b/files/firewall/hostgroups.local.yaml
index 6426ae207..794105627 100644
--- a/files/firewall/hostgroups.local.yaml
+++ b/files/firewall/hostgroups.local.yaml
@@ -24,7 +24,7 @@ firewall:
ips:
delete:
insert:
- master:
+ manager:
ips:
delete:
insert:
diff --git a/pillar/docker/config.sls b/pillar/docker/config.sls
index f8426b8cb..dd73f3aa9 100644
--- a/pillar/docker/config.sls
+++ b/pillar/docker/config.sls
@@ -1,12 +1,12 @@
-{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
+{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
-{% set WAZUH = salt['pillar.get']('master:wazuh', '0') %}
-{% set THEHIVE = salt['pillar.get']('master:thehive', '0') %}
-{% set PLAYBOOK = salt['pillar.get']('master:playbook', '0') %}
-{% set FREQSERVER = salt['pillar.get']('master:freq', '0') %}
-{% set DOMAINSTATS = salt['pillar.get']('master:domainstats', '0') %}
+{% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %}
+{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
+{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
+{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
+{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
{% set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
-{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
+{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
eval:
containers:
@@ -20,7 +20,7 @@ eval:
- so-soc
- so-kratos
- so-idstools
- {% if FLEETMASTER %}
+ {% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
@@ -83,7 +83,7 @@ hot_node:
- so-logstash
- so-elasticsearch
- so-curator
-master_search:
+manager_search:
containers:
- so-nginx
- so-telegraf
@@ -99,7 +99,7 @@ master_search:
- so-elastalert
- so-filebeat
- so-soctopus
- {% if FLEETMASTER %}
+ {% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
@@ -122,7 +122,7 @@ master_search:
{% if DOMAINSTATS != '0' %}
- so-domainstats
{% endif %}
-master:
+manager:
containers:
- so-dockerregistry
- so-nginx
@@ -141,7 +141,7 @@ master:
- so-kibana
- so-elastalert
- so-filebeat
- {% if FLEETMASTER %}
+ {% if FLEETMANAGER %}
- so-mysql
- so-fleet
- so-redis
diff --git a/pillar/firewall/ports.sls b/pillar/firewall/ports.sls
index 29f711c13..4f7c06bec 100644
--- a/pillar/firewall/ports.sls
+++ b/pillar/firewall/ports.sls
@@ -17,7 +17,7 @@ firewall:
- 5644
- 9822
udp:
- master:
+ manager:
ports:
tcp:
- 1514
diff --git a/pillar/logstash/eval.sls b/pillar/logstash/eval.sls
index 39a87dc77..fcdd13bb7 100644
--- a/pillar/logstash/eval.sls
+++ b/pillar/logstash/eval.sls
@@ -16,6 +16,14 @@ logstash:
- so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja
templates:
- - so/so-beats-template.json
+ - so/so-beats-template.json.jinja
- so/so-common-template.json
- - so/so-zeek-template.json
+ - so/so-firewall-template.json.jinja
+ - so/so-flow-template.json.jinja
+ - so/so-ids-template.json.jinja
+ - so/so-import-template.json.jinja
+ - so/so-osquery-template.json.jinja
+ - so/so-ossec-template.json.jinja
+ - so/so-strelka-template.json.jinja
+ - so/so-syslog-template.json.jinja
+ - so/so-zeek-template.json.jinja
diff --git a/pillar/logstash/master.sls b/pillar/logstash/manager.sls
similarity index 92%
rename from pillar/logstash/master.sls
rename to pillar/logstash/manager.sls
index 1ff41b43c..9c16d2625 100644
--- a/pillar/logstash/master.sls
+++ b/pillar/logstash/manager.sls
@@ -1,6 +1,6 @@
logstash:
pipelines:
- master:
+ manager:
config:
- so/0009_input_beats.conf
- so/0010_input_hhbeats.conf
diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls
index 6b3d0422e..9c069fd20 100644
--- a/pillar/logstash/search.sls
+++ b/pillar/logstash/search.sls
@@ -12,5 +12,14 @@ logstash:
- so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja
templates:
+ - so/so-beats-template.json.jinja
- so/so-common-template.json
- - so/so-zeek-template.json
+ - so/so-firewall-template.json.jinja
+ - so/so-flow-template.json.jinja
+ - so/so-ids-template.json.jinja
+ - so/so-import-template.json.jinja
+ - so/so-osquery-template.json.jinja
+ - so/so-ossec-template.json.jinja
+ - so/so-strelka-template.json.jinja
+ - so/so-syslog-template.json.jinja
+ - so/so-zeek-template.json.jinja
diff --git a/pillar/top.sls b/pillar/top.sls
index a691cf028..6eba800a9 100644
--- a/pillar/top.sls
+++ b/pillar/top.sls
@@ -6,10 +6,10 @@ base:
- match: compound
- zeek
- '*_mastersearch or *_heavynode':
+ '*_managersearch or *_heavynode':
- match: compound
- logstash
- - logstash.master
+ - logstash.manager
- logstash.search
'*_sensor':
@@ -18,16 +18,16 @@ base:
- healthcheck.sensor
- minions.{{ grains.id }}
- '*_master or *_mastersearch':
+ '*_manager or *_managersearch':
- match: compound
- static
- data.*
- secrets
- minions.{{ grains.id }}
- '*_master':
+ '*_manager':
- logstash
- - logstash.master
+ - logstash.manager
'*_eval':
- static
@@ -39,7 +39,7 @@ base:
'*_standalone':
- logstash
- - logstash.master
+ - logstash.manager
- logstash.search
- data.*
- brologs
diff --git a/salt/_modules/telegraf.py b/salt/_modules/telegraf.py
index 6fa33f89a..aa98af039 100644
--- a/salt/_modules/telegraf.py
+++ b/salt/_modules/telegraf.py
@@ -6,7 +6,7 @@ import socket
def send(data):
- mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('master:mainint'))
+ mainint = __salt__['pillar.get']('sensor:mainint', __salt__['pillar.get']('manager:mainint'))
mainip = __salt__['grains.get']('ip_interfaces').get(mainint)[0]
dstport = 8094
diff --git a/salt/ca/files/signing_policies.conf b/salt/ca/files/signing_policies.conf
index e253f8911..b25a9935b 100644
--- a/salt/ca/files/signing_policies.conf
+++ b/salt/ca/files/signing_policies.conf
@@ -26,7 +26,7 @@ x509_signing_policies:
- extendedKeyUsage: serverAuth
- days_valid: 820
- copypath: /etc/pki/issued_certs/
- masterssl:
+ managerssl:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
diff --git a/salt/ca/init.sls b/salt/ca/init.sls
index 60d7adb3a..da442cc2a 100644
--- a/salt/ca/init.sls
+++ b/salt/ca/init.sls
@@ -1,4 +1,4 @@
-{% set master = salt['grains.get']('master') %}
+{% set manager = salt['grains.get']('master') %}
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://ca/files/signing_policies.conf
@@ -20,7 +20,7 @@ pki_private_key:
/etc/pki/ca.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key
- - CN: {{ master }}
+ - CN: {{ manager }}
- C: US
- ST: Utah
- L: Salt Lake City
diff --git a/salt/common/init.sls b/salt/common/init.sls
index 0ecba198d..ef558d3e5 100644
--- a/salt/common/init.sls
+++ b/salt/common/init.sls
@@ -1,3 +1,5 @@
+{% set role = grains.id.split('_') | last %}
+
# Add socore Group
socoregroup:
group.present:
@@ -131,3 +133,15 @@ utilsyncscripts:
- file_mode: 755
- template: jinja
- source: salt://common/tools/sbin
+
+{% if role in ['eval', 'standalone', 'sensor', 'heavynode'] %}
+# Add sensor cleanup
+/usr/sbin/so-sensor-clean:
+ cron.present:
+ - user: root
+ - minute: '*'
+ - hour: '*'
+ - daymonth: '*'
+ - month: '*'
+ - dayweek: '*'
+{% endif %}
diff --git a/salt/common/maps/fleet_master.map.jinja b/salt/common/maps/fleet_manager.map.jinja
similarity index 100%
rename from salt/common/maps/fleet_master.map.jinja
rename to salt/common/maps/fleet_manager.map.jinja
diff --git a/salt/common/maps/master.map.jinja b/salt/common/maps/manager.map.jinja
similarity index 100%
rename from salt/common/maps/master.map.jinja
rename to salt/common/maps/manager.map.jinja
diff --git a/salt/common/maps/mastersearch.map.jinja b/salt/common/maps/managersearch.map.jinja
similarity index 100%
rename from salt/common/maps/mastersearch.map.jinja
rename to salt/common/maps/managersearch.map.jinja
diff --git a/salt/common/maps/so-status.map.jinja b/salt/common/maps/so-status.map.jinja
index f67f4bcd6..f30291f90 100644
--- a/salt/common/maps/so-status.map.jinja
+++ b/salt/common/maps/so-status.map.jinja
@@ -18,14 +18,14 @@
}
},grain='id', merge=salt['pillar.get']('docker')) %}
-{% if role in ['eval', 'mastersearch', 'master', 'standalone'] %}
- {{ append_containers('master', 'grafana', 0) }}
- {{ append_containers('static', 'fleet_master', 0) }}
- {{ append_containers('master', 'wazuh', 0) }}
- {{ append_containers('master', 'thehive', 0) }}
- {{ append_containers('master', 'playbook', 0) }}
- {{ append_containers('master', 'freq', 0) }}
- {{ append_containers('master', 'domainstats', 0) }}
+{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
+ {{ append_containers('manager', 'grafana', 0) }}
+ {{ append_containers('static', 'fleet_manager', 0) }}
+ {{ append_containers('manager', 'wazuh', 0) }}
+ {{ append_containers('manager', 'thehive', 0) }}
+ {{ append_containers('manager', 'playbook', 0) }}
+ {{ append_containers('manager', 'freq', 0) }}
+ {{ append_containers('manager', 'domainstats', 0) }}
{% endif %}
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
@@ -37,7 +37,7 @@
{% endif %}
{% if role == 'searchnode' %}
- {{ append_containers('master', 'wazuh', 0) }}
+ {{ append_containers('manager', 'wazuh', 0) }}
{% endif %}
{% if role == 'sensor' %}
diff --git a/salt/common/tools/sbin/so-bro-logs b/salt/common/tools/sbin/so-bro-logs
index 173d23029..4f55eb7f4 100755
--- a/salt/common/tools/sbin/so-bro-logs
+++ b/salt/common/tools/sbin/so-bro-logs
@@ -11,7 +11,7 @@ bro_logs_enabled() {
}
-whiptail_master_adv_service_brologs() {
+whiptail_manager_adv_service_brologs() {
BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
"conn" "Connection Logging" ON \
@@ -54,5 +54,5 @@ whiptail_master_adv_service_brologs() {
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
}
-whiptail_master_adv_service_brologs
+whiptail_manager_adv_service_brologs
bro_logs_enabled
diff --git a/salt/common/tools/sbin/so-docker-refresh b/salt/common/tools/sbin/so-docker-refresh
index d4796818d..bd9993570 100644
--- a/salt/common/tools/sbin/so-docker-refresh
+++ b/salt/common/tools/sbin/so-docker-refresh
@@ -21,13 +21,13 @@ got_root(){
fi
}
-master_check() {
- # Check to see if this is a master
- MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
- if [ $MASTERCHECK == 'so-eval' ] || [ $MASTERCHECK == 'so-master' ] || [ $MASTERCHECK == 'so-mastersearch' ] || [ $MASTERCHECK == 'so-standalone' ] || [ $MASTERCHECK == 'so-helix' ]; then
- echo "This is a master. We can proceed"
+manager_check() {
+ # Check to see if this is a manager
+ MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
+ if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ]; then
+ echo "This is a manager. We can proceed"
else
- echo "Please run soup on the master. The master controls all updates."
+ echo "Please run soup on the manager. The manager controls all updates."
exit 1
fi
}
@@ -56,13 +56,13 @@ version_check() {
fi
}
got_root
-master_check
+manager_check
version_check
# Use the hostname
HOSTNAME=$(hostname)
# List all the containers
-if [ $MASTERCHECK != 'so-helix' ]; then
+if [ $MANAGERCHECK != 'so-helix' ]; then
TRUSTED_CONTAINERS=( \
"so-acng:$VERSION" \
"so-thehive-cortex:$VERSION" \
diff --git a/salt/common/tools/sbin/so-elastalert-create b/salt/common/tools/sbin/so-elastalert-create
index 0270503bf..683b53ed1 100755
--- a/salt/common/tools/sbin/so-elastalert-create
+++ b/salt/common/tools/sbin/so-elastalert-create
@@ -198,7 +198,7 @@ EOF
read alertoption
if [ $alertoption = "1" ] ; then
- echo "Please enter the email address you want to send the alerts to. Note: Ensure the Master Server is configured for SMTP."
+ echo "Please enter the email address you want to send the alerts to. Note: Ensure the Manager Server is configured for SMTP."
read emailaddress
cat << EOF >> "$rulename.yaml"
# (Required)
diff --git a/salt/common/tools/sbin/so-elastic-clear b/salt/common/tools/sbin/so-elastic-clear
index f7030bc13..04c153f85 100755
--- a/salt/common/tools/sbin/so-elastic-clear
+++ b/salt/common/tools/sbin/so-elastic-clear
@@ -14,7 +14,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
. /usr/sbin/so-common
SKIP=0
@@ -50,7 +50,7 @@ done
if [ $SKIP -ne 1 ]; then
# List indices
echo
- curl {{ MASTERIP }}:9200/_cat/indices?v
+ curl {{ MANAGERIP }}:9200/_cat/indices?v
echo
# Inform user we are about to delete all data
echo
@@ -89,10 +89,10 @@ fi
# Delete data
echo "Deleting data..."
-INDXS=$(curl -s -XGET {{ MASTERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
+INDXS=$(curl -s -XGET {{ MANAGERIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
for INDX in ${INDXS}
do
- curl -XDELETE "{{ MASTERIP }}:9200/${INDX}" > /dev/null 2>&1
+ curl -XDELETE "{{ MANAGERIP }}:9200/${INDX}" > /dev/null 2>&1
done
#Start Logstash/Filebeat
diff --git a/salt/common/tools/sbin/so-elastic-download b/salt/common/tools/sbin/so-elastic-download
index 4c3406c74..b52d88c45 100755
--- a/salt/common/tools/sbin/so-elastic-download
+++ b/salt/common/tools/sbin/so-elastic-download
@@ -1,5 +1,5 @@
#!/bin/bash
-MASTER=MASTER
+MANAGER=MANAGER
VERSION="HH1.1.4"
TRUSTED_CONTAINERS=( \
"so-nginx:$VERSION" \
@@ -37,7 +37,7 @@ do
echo "Downloading $i"
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination
- docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
- docker push $MASTER:5000/soshybridhunter/$i
+ docker tag soshybridhunter/$i $MANAGER:5000/soshybridhunter/$i
+ docker push $MANAGER:5000/soshybridhunter/$i
docker rmi soshybridhunter/$i
done
diff --git a/salt/common/tools/sbin/so-elasticsearch-indices-rw b/salt/common/tools/sbin/so-elasticsearch-indices-rw
index d49fd5f1b..6e9eebe47 100644
--- a/salt/common/tools/sbin/so-elasticsearch-indices-rw
+++ b/salt/common/tools/sbin/so-elasticsearch-indices-rw
@@ -15,7 +15,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
+IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
ESPORT=9200
THEHIVEESPORT=9400
diff --git a/salt/common/tools/sbin/so-elasticsearch-templates b/salt/common/tools/sbin/so-elasticsearch-templates
index 829e2a68d..6b3e19d30 100755
--- a/salt/common/tools/sbin/so-elasticsearch-templates
+++ b/salt/common/tools/sbin/so-elasticsearch-templates
@@ -1,4 +1,4 @@
-{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
+{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
#
@@ -16,7 +16,7 @@
# along with this program. If not, see .
default_salt_dir=/opt/so/saltstack/default
-ELASTICSEARCH_HOST="{{ MASTERIP}}"
+ELASTICSEARCH_HOST="{{ MANAGERIP}}"
ELASTICSEARCH_PORT=9200
#ELASTICSEARCH_AUTH=""
diff --git a/salt/common/tools/sbin/so-import-pcap b/salt/common/tools/sbin/so-import-pcap
index 9e54820e0..02b7ffedc 100755
--- a/salt/common/tools/sbin/so-import-pcap
+++ b/salt/common/tools/sbin/so-import-pcap
@@ -15,28 +15,29 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion') %}
-{%- set MASTERIP = salt['pillar.get']('static:masterip') -%}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip') -%}
function usage {
cat << EOF
Usage: $0 [pcap-file-2] [pcap-file-N]
-Imports one or more PCAP files for analysis. If available, curator will be automatically stopped.
+Imports one or more PCAP files onto a sensor node. The PCAP traffic will be analyzed and
+made available for review in the Security Onion toolset.
EOF
}
function pcapinfo() {
PCAP=$1
ARGS=$2
- docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MASTER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
+ docker run --rm -v $PCAP:/input.pcap --entrypoint capinfos {{ MANAGER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
}
function pcapfix() {
PCAP=$1
PCAP_OUT=$2
- docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MASTER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
+ docker run --rm -v $PCAP:/input.pcap -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/soshybridhunter/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
}
function suricata() {
@@ -57,7 +58,7 @@ function suricata() {
-v ${NSM_PATH}/:/nsm/:rw \
-v $PCAP:/input.pcap:ro \
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
- {{ MASTER }}:5000/soshybridhunter/so-suricata:{{ VERSION }} \
+ {{ MANAGER }}:5000/soshybridhunter/so-suricata:{{ VERSION }} \
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
}
@@ -85,7 +86,7 @@ function zeek() {
-v /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro \
--entrypoint /opt/zeek/bin/zeek \
-w /nsm/zeek/logs \
- {{ MASTER }}:5000/soshybridhunter/so-zeek:{{ VERSION }} \
+ {{ MANAGER }}:5000/soshybridhunter/so-zeek:{{ VERSION }} \
-C -r /input.pcap local > $NSM_PATH/logs/console.log 2>&1
}
@@ -110,14 +111,6 @@ for i in "$@"; do
fi
done
-if ! [ -d /opt/so/conf/curator ]; then
- echo "Curator is not installed on this node and cannot be stopped automatically."
-else
- echo -n "Stopping curator..."
- so-curator-stop > /dev/null 2>&1
- echo "Done"
-fi
-
# track if we have any valid or invalid pcaps
INVALID_PCAPS="no"
VALID_PCAPS="no"
@@ -206,17 +199,20 @@ if [ "$INVALID_PCAPS" = "yes" ]; then
echo "Please note! One or more pcaps was invalid! You can scroll up to see which ones were invalid."
fi
+START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
+END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
+
if [ "$VALID_PCAPS" = "yes" ]; then
cat << EOF
Import complete!
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
-https://{{ MASTERIP }}/kibana/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:'${START_OLDEST}T00:00:00.000Z',mode:absolute,to:'${END_NEWEST}T00:00:00.000Z'))
+https://{{ MANAGERIP }}/kibana/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:'${START_OLDEST}T00:00:00.000Z',mode:absolute,to:'${END_NEWEST}T00:00:00.000Z'))
or you can manually set your Time Range to be:
From: $START_OLDEST To: $END_NEWEST
-Please note that it may take 30 seconds or more for events to appear in Kibana.
+Please note that it may take 30 seconds or more for events to appear in Onion Hunt.
EOF
fi
diff --git a/salt/common/tools/sbin/so-kibana-config-export b/salt/common/tools/sbin/so-kibana-config-export
index f64e12a0e..8ee3f59b5 100755
--- a/salt/common/tools/sbin/so-kibana-config-export
+++ b/salt/common/tools/sbin/so-kibana-config-export
@@ -1,9 +1,9 @@
#!/bin/bash
#
-# {%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master', False) -%}
+# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
# {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %}
-# {%- set MASTER = salt['pillar.get']('master:url_base', '') %}
+# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
@@ -20,7 +20,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-KIBANA_HOST={{ MASTER }}
+KIBANA_HOST={{ MANAGER }}
KSO_PORT=5601
OUTFILE="saved_objects.ndjson"
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
@@ -29,7 +29,7 @@ curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_H
sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE
# Clean up for Fleet, if applicable
-# {% if FLEET_NODE or FLEET_MASTER %}
+# {% if FLEET_NODE or FLEET_MANAGER %}
# Fleet IP
-sed -i "s/{{ MASTER }}/FLEETPLACEHOLDER/g" $OUTFILE
+sed -i "s/{{ MANAGER }}/FLEETPLACEHOLDER/g" $OUTFILE
# {% endif %}
diff --git a/salt/common/tools/sbin/so-sensor-clean b/salt/common/tools/sbin/so-sensor-clean
new file mode 100644
index 000000000..886b16fcd
--- /dev/null
+++ b/salt/common/tools/sbin/so-sensor-clean
@@ -0,0 +1,121 @@
+#!/bin/bash
+
+# Delete Zeek Logs based on defined CRIT_DISK_USAGE value
+
+# Copyright 2014,2015,2016,2017,2018, 2019 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+SENSOR_DIR='/nsm'
+CRIT_DISK_USAGE=90
+CUR_USAGE=$(df -P $SENSOR_DIR | tail -1 | awk '{print $5}' | tr -d %)
+LOG="/opt/so/log/sensor_clean.log"
+TODAY=$(date -u "+%Y-%m-%d")
+
+clean () {
+ ## find the oldest Zeek logs directory
+ OLDEST_DIR=$(ls /nsm/zeek/logs/ | grep -v "current" | grep -v "stats" | grep -v "packetloss" | grep -v "zeek_clean" | sort | head -n 1)
+ if [ -z "$OLDEST_DIR" -o "$OLDEST_DIR" == ".." -o "$OLDEST_DIR" == "." ]
+ then
+ echo "$(date) - No old Zeek logs available to clean up in /nsm/zeek/logs/" >> $LOG
+ #exit 0
+ else
+ echo "$(date) - Removing directory: /nsm/zeek/logs/$OLDEST_DIR" >> $LOG
+ rm -rf /nsm/zeek/logs/"$OLDEST_DIR"
+ fi
+
+
+ ## Remarking for now, as we are moving extracted files to /nsm/strelka/processed
+ ## find oldest files in extracted directory and exclude today
+ #OLDEST_EXTRACT=$(find /nsm/zeek/extracted/complete -type f -printf '%T+ %p\n' 2>/dev/null | sort | grep -v $TODAY | head -n 1)
+ #if [ -z "$OLDEST_EXTRACT" -o "$OLDEST_EXTRACT" == ".." -o "$OLDEST_EXTRACT" == "." ]
+ #then
+ # echo "$(date) - No old extracted files available to clean up in /nsm/zeek/extracted/complete" >> $LOG
+ #else
+ # OLDEST_EXTRACT_DATE=`echo $OLDEST_EXTRACT | awk '{print $1}' | cut -d+ -f1`
+ # OLDEST_EXTRACT_FILE=`echo $OLDEST_EXTRACT | awk '{print $2}'`
+ # echo "$(date) - Removing extracted files for $OLDEST_EXTRACT_DATE" >> $LOG
+ # find /nsm/zeek/extracted/complete -type f -printf '%T+ %p\n' | grep $OLDEST_EXTRACT_DATE | awk '{print $2}' |while read FILE
+ # do
+ # echo "$(date) - Removing extracted file: $FILE" >> $LOG
+ # rm -f "$FILE"
+ # done
+ #fi
+
+ ## Clean up Zeek extracted files processed by Strelka
+ STRELKA_FILES='/nsm/strelka/processed'
+ OLDEST_STRELKA=$(find $STRELKA_FILES -type f -printf '%T+ %p\n' | sort -n | head -n 1 )
+ if [ -z "$OLDEST_STRELKA" -o "$OLDEST_STRELKA" == ".." -o "$OLDEST_STRELKA" == "." ]
+ then
+ echo "$(date) - No old files available to clean up in $STRELKA_FILES" >> $LOG
+ else
+ OLDEST_STRELKA_DATE=`echo $OLDEST_STRELKA | awk '{print $1}' | cut -d+ -f1`
+ OLDEST_STRELKA_FILE=`echo $OLDEST_STRELKA | awk '{print $2}'`
+ echo "$(date) - Removing extracted files for $OLDEST_STRELKA_DATE" >> $LOG
+ find $STRELKA_FILES -type f -printf '%T+ %p\n' | grep $OLDEST_STRELKA_DATE | awk '{print $2}' |while read FILE
+ do
+ echo "$(date) - Removing file: $FILE" >> $LOG
+ rm -f "$FILE"
+ done
+ fi
+
+ ## Clean up Suricata log files
+ SURICATA_LOGS='/nsm/suricata'
+ OLDEST_SURICATA=$(find $STRELKA_FILES -type f -printf '%T+ %p\n' | sort -n | head -n 1)
+ if [ -z "$OLDEST_SURICATA" -o "$OLDEST_SURICATA" == ".." -o "$OLDEST_SURICATA" == "." ]
+ then
+ echo "$(date) - No old files available to clean up in $SURICATA_LOGS" >> $LOG
+ else
+ OLDEST_SURICATA_DATE=`echo $OLDEST_SURICATA | awk '{print $1}' | cut -d+ -f1`
+ OLDEST_SURICATA_FILE=`echo $OLDEST_SURICATA | awk '{print $2}'`
+ echo "$(date) - Removing logs for $OLDEST_SURICATA_DATE" >> $LOG
+ find $SURICATA_LOGS -type f -printf '%T+ %p\n' | grep $OLDEST_SURICATA_DATE | awk '{print $2}' |while read FILE
+ do
+ echo "$(date) - Removing file: $FILE" >> $LOG
+ rm -f "$FILE"
+ done
+ fi
+
+ ## Clean up extracted pcaps from Steno
+ PCAPS='/nsm/pcapout'
+ OLDEST_PCAP=$(find $PCAPS -type f -printf '%T+ %p\n' | sort -n | head -n 1 )
+ if [ -z "$OLDEST_PCAP" -o "$OLDEST_PCAP" == ".." -o "$OLDEST_PCAP" == "." ]
+ then
+ echo "$(date) - No old files available to clean up in $PCAPS" >> $LOG
+ else
+ OLDEST_PCAP_DATE=`echo $OLDEST_PCAP | awk '{print $1}' | cut -d+ -f1`
+ OLDEST_PCAP_FILE=`echo $OLDEST_PCAP | awk '{print $2}'`
+ echo "$(date) - Removing extracted files for $OLDEST_PCAP_DATE" >> $LOG
+ find $PCAPS -type f -printf '%T+ %p\n' | grep $OLDEST_PCAP_DATE | awk '{print $2}' |while read FILE
+ do
+ echo "$(date) - Removing file: $FILE" >> $LOG
+ rm -f "$FILE"
+ done
+ fi
+}
+
+# Check to see if we are already running
+IS_RUNNING=$(ps aux | grep "sensor_clean" | grep -v grep | wc -l)
+[ "$IS_RUNNING" -gt 2 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >> $LOG && exit 0
+
+if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then
+ while [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ];
+ do
+ clean
+ CUR_USAGE=$(df -P $SENSOR_DIR | tail -1 | awk '{print $5}' | tr -d %)
+ done
+else
+ echo "$(date) - Current usage value of $CUR_USAGE not greater than CRIT_DISK_USAGE value of $CRIT_DISK_USAGE..." >> $LOG
+fi
+
diff --git a/salt/curator/files/action/delete.yml b/salt/curator/files/action/delete.yml
index f24f0b781..fb3945c1d 100644
--- a/salt/curator/files/action/delete.yml
+++ b/salt/curator/files/action/delete.yml
@@ -1,8 +1,4 @@
-{%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
- {%- set log_size_limit = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
-{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set log_size_limit = salt['pillar.get']('master:log_size_limit', '') -%}
-{%- endif %}
+{%- set log_size_limit = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
diff --git a/salt/curator/files/action/so-beats-close.yml b/salt/curator/files/action/so-beats-close.yml
new file mode 100644
index 000000000..dbbcca1c8
--- /dev/null
+++ b/salt/curator/files/action/so-beats-close.yml
@@ -0,0 +1,29 @@
+{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-beats:close', 30) -%}
+---
+# Remember, leave a key empty if there is no value. None will be a string,
+# not a Python "NoneType"
+#
+# Also remember that all examples have 'disable_action' set to True. If you
+# want to use this action as a template, be sure to set this to False after
+# copying it.
+actions:
+ 1:
+ action: close
+ description: >-
+ Close Beats indices older than {{cur_close_days}} days.
+ options:
+ delete_aliases: False
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: regex
+ value: '^(logstash-beats.*|so-beats.*)$'
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: '%Y.%m.%d'
+ unit: days
+ unit_count: {{cur_close_days}}
+ exclude:
diff --git a/salt/curator/files/action/close.yml b/salt/curator/files/action/so-firewall-close.yml
similarity index 58%
rename from salt/curator/files/action/close.yml
rename to salt/curator/files/action/so-firewall-close.yml
index d0bd1d5d1..46f0b39a9 100644
--- a/salt/curator/files/action/close.yml
+++ b/salt/curator/files/action/so-firewall-close.yml
@@ -1,9 +1,4 @@
-{%- if grains['role'] in ['so-node', 'so-searchnode', 'so-heavynode'] %}
- {%- set cur_close_days = salt['pillar.get']('elasticsearch:cur_close_days', '') -%}
-{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set cur_close_days = salt['pillar.get']('master:cur_close_days', '') -%}
-{%- endif -%}
-
+{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-firewall:close', 30) -%}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
@@ -15,8 +10,7 @@ actions:
1:
action: close
description: >-
- Close indices older than {{cur_close_days}} days (based on index name), for logstash-
- prefixed indices.
+ Close Firewall indices older than {{cur_close_days}} days.
options:
delete_aliases: False
timeout_override:
@@ -25,7 +19,7 @@ actions:
filters:
- filtertype: pattern
kind: regex
- value: '^(logstash-.*|so-.*)$'
+ value: '^(logstash-firewall.*|so-firewall.*)$'
- filtertype: age
source: name
direction: older
diff --git a/salt/curator/files/action/so-ids-close.yml b/salt/curator/files/action/so-ids-close.yml
new file mode 100644
index 000000000..89f08d8d1
--- /dev/null
+++ b/salt/curator/files/action/so-ids-close.yml
@@ -0,0 +1,29 @@
+{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-ids:close', 30) -%}
+---
+# Remember, leave a key empty if there is no value. None will be a string,
+# not a Python "NoneType"
+#
+# Also remember that all examples have 'disable_action' set to True. If you
+# want to use this action as a template, be sure to set this to False after
+# copying it.
+actions:
+ 1:
+ action: close
+ description: >-
+ Close IDS indices older than {{cur_close_days}} days.
+ options:
+ delete_aliases: False
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: regex
+ value: '^(logstash-ids.*|so-ids.*)$'
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: '%Y.%m.%d'
+ unit: days
+ unit_count: {{cur_close_days}}
+ exclude:
diff --git a/salt/curator/files/action/so-import-close.yml b/salt/curator/files/action/so-import-close.yml
new file mode 100644
index 000000000..b9ee6e5da
--- /dev/null
+++ b/salt/curator/files/action/so-import-close.yml
@@ -0,0 +1,29 @@
+{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-import:close', 30) -%}
+---
+# Remember, leave a key empty if there is no value. None will be a string,
+# not a Python "NoneType"
+#
+# Also remember that all examples have 'disable_action' set to True. If you
+# want to use this action as a template, be sure to set this to False after
+# copying it.
+actions:
+ 1:
+ action: close
+ description: >-
+ Close Import indices older than {{cur_close_days}} days.
+ options:
+ delete_aliases: False
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: regex
+ value: '^(logstash-import.*|so-import.*)$'
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: '%Y.%m.%d'
+ unit: days
+ unit_count: {{cur_close_days}}
+ exclude:
diff --git a/salt/curator/files/action/so-osquery-close.yml b/salt/curator/files/action/so-osquery-close.yml
new file mode 100644
index 000000000..152a41afa
--- /dev/null
+++ b/salt/curator/files/action/so-osquery-close.yml
@@ -0,0 +1,29 @@
+{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-osquery:close', 30) -%}
+---
+# Remember, leave a key empty if there is no value. None will be a string,
+# not a Python "NoneType"
+#
+# Also remember that all examples have 'disable_action' set to True. If you
+# want to use this action as a template, be sure to set this to False after
+# copying it.
+actions:
+ 1:
+ action: close
+ description: >-
+ Close osquery indices older than {{cur_close_days}} days.
+ options:
+ delete_aliases: False
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: regex
+ value: '^(logstash-osquery.*|so-osquery.*)$'
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: '%Y.%m.%d'
+ unit: days
+ unit_count: {{cur_close_days}}
+ exclude:
diff --git a/salt/curator/files/action/so-ossec-close.yml b/salt/curator/files/action/so-ossec-close.yml
new file mode 100644
index 000000000..5ee8c91de
--- /dev/null
+++ b/salt/curator/files/action/so-ossec-close.yml
@@ -0,0 +1,29 @@
+{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-ossec:close', 30) -%}
+---
+# Remember, leave a key empty if there is no value. None will be a string,
+# not a Python "NoneType"
+#
+# Also remember that all examples have 'disable_action' set to True. If you
+# want to use this action as a template, be sure to set this to False after
+# copying it.
+actions:
+ 1:
+ action: close
+ description: >-
+ Close ossec indices older than {{cur_close_days}} days.
+ options:
+ delete_aliases: False
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: regex
+ value: '^(logstash-ossec.*|so-ossec.*)$'
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: '%Y.%m.%d'
+ unit: days
+ unit_count: {{cur_close_days}}
+ exclude:
diff --git a/salt/curator/files/action/so-strelka-close.yml b/salt/curator/files/action/so-strelka-close.yml
new file mode 100644
index 000000000..a07ab94e8
--- /dev/null
+++ b/salt/curator/files/action/so-strelka-close.yml
@@ -0,0 +1,29 @@
+{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-strelka:close', 30) -%}
+---
+# Remember, leave a key empty if there is no value. None will be a string,
+# not a Python "NoneType"
+#
+# Also remember that all examples have 'disable_action' set to True. If you
+# want to use this action as a template, be sure to set this to False after
+# copying it.
+actions:
+ 1:
+ action: close
+ description: >-
+ Close Strelka indices older than {{cur_close_days}} days.
+ options:
+ delete_aliases: False
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: regex
+ value: '^(logstash-strelka.*|so-strelka.*)$'
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: '%Y.%m.%d'
+ unit: days
+ unit_count: {{cur_close_days}}
+ exclude:
diff --git a/salt/curator/files/action/so-syslog-close.yml b/salt/curator/files/action/so-syslog-close.yml
new file mode 100644
index 000000000..3aae50566
--- /dev/null
+++ b/salt/curator/files/action/so-syslog-close.yml
@@ -0,0 +1,29 @@
+{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-syslog:close', 30) -%}
+---
+# Remember, leave a key empty if there is no value. None will be a string,
+# not a Python "NoneType"
+#
+# Also remember that all examples have 'disable_action' set to True. If you
+# want to use this action as a template, be sure to set this to False after
+# copying it.
+actions:
+ 1:
+ action: close
+ description: >-
+ Close syslog indices older than {{cur_close_days}} days.
+ options:
+ delete_aliases: False
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: regex
+ value: '^(logstash-syslog.*|so-syslog.*)$'
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: '%Y.%m.%d'
+ unit: days
+ unit_count: {{cur_close_days}}
+ exclude:
diff --git a/salt/curator/files/action/so-zeek-close.yml b/salt/curator/files/action/so-zeek-close.yml
new file mode 100644
index 000000000..ec1ab9eff
--- /dev/null
+++ b/salt/curator/files/action/so-zeek-close.yml
@@ -0,0 +1,29 @@
+{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settins:so-zeek:close', 30) -%}
+---
+# Remember, leave a key empty if there is no value. None will be a string,
+# not a Python "NoneType"
+#
+# Also remember that all examples have 'disable_action' set to True. If you
+# want to use this action as a template, be sure to set this to False after
+# copying it.
+actions:
+ 1:
+ action: close
+ description: >-
+ Close Zeek indices older than {{cur_close_days}} days.
+ options:
+ delete_aliases: False
+ timeout_override:
+ continue_if_exception: False
+ disable_action: False
+ filters:
+ - filtertype: pattern
+ kind: regex
+ value: '^(logstash-zeek.*|so-zeek.*)$'
+ - filtertype: age
+ source: name
+ direction: older
+ timestring: '%Y.%m.%d'
+ unit: days
+ unit_count: {{cur_close_days}}
+ exclude:
diff --git a/salt/curator/files/bin/so-curator-closed-delete-delete b/salt/curator/files/bin/so-curator-closed-delete-delete
index 4a6458394..0d894db2f 100755
--- a/salt/curator/files/bin/so-curator-closed-delete-delete
+++ b/salt/curator/files/bin/so-curator-closed-delete-delete
@@ -5,10 +5,10 @@
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
{%- set LOG_SIZE_LIMIT = salt['pillar.get']('elasticsearch:log_size_limit', '') -%}
-{%- elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set ELASTICSEARCH_HOST = salt['pillar.get']('master:mainip', '') -%}
- {%- set ELASTICSEARCH_PORT = salt['pillar.get']('master:es_port', '') -%}
- {%- set LOG_SIZE_LIMIT = salt['pillar.get']('master:log_size_limit', '') -%}
+{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
+ {%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
+ {%- set LOG_SIZE_LIMIT = salt['pillar.get']('manager:log_size_limit', '') -%}
{%- endif -%}
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
diff --git a/salt/curator/files/curator.yml b/salt/curator/files/curator.yml
index e9b8a63ba..3b019923e 100644
--- a/salt/curator/files/curator.yml
+++ b/salt/curator/files/curator.yml
@@ -1,7 +1,7 @@
{% if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set elasticsearch = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{% elif grains['role'] in ['so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set elasticsearch = salt['pillar.get']('master:mainip', '') -%}
+{% elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set elasticsearch = salt['pillar.get']('manager:mainip', '') -%}
{%- endif %}
---
diff --git a/salt/curator/init.sls b/salt/curator/init.sls
index 8d3147242..0896e0c6a 100644
--- a/salt/curator/init.sls
+++ b/salt/curator/init.sls
@@ -1,6 +1,6 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{% if grains['role'] in ['so-eval', 'so-node', 'so-mastersearch', 'so-heavynode', 'so-standalone'] %}
+{% set MANAGER = salt['grains.get']('master') %}
+{% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %}
# Curator
# Create the group
curatorgroup:
@@ -30,18 +30,10 @@ curlogdir:
- user: 934
- group: 939
-curcloseconf:
- file.managed:
- - name: /opt/so/conf/curator/action/close.yml
- - source: salt://curator/files/action/close.yml
- - user: 934
- - group: 939
- - template: jinja
-
-curdelconf:
- file.managed:
- - name: /opt/so/conf/curator/action/delete.yml
- - source: salt://curator/files/action/delete.yml
+actionconfs:
+ file.recurse:
+ - name: /opt/so/conf/curator/action
+ - source: salt://curator/files/action
- user: 934
- group: 939
- template: jinja
@@ -119,7 +111,7 @@ so-curatordeletecron:
so-curator:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-curator:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-curator:{{ VERSION }}
- hostname: curator
- name: so-curator
- user: curator
diff --git a/salt/deprecated-bro/files/local.bro b/salt/deprecated-bro/files/local.bro
index afe4b94ca..30b216548 100644
--- a/salt/deprecated-bro/files/local.bro
+++ b/salt/deprecated-bro/files/local.bro
@@ -127,11 +127,11 @@
@load policy/hassh
# You can load your own intel into:
-# /opt/so/saltstack/bro/policy/intel/ on the master
+# /opt/so/saltstack/bro/policy/intel/ on the manager
@load intel
# Load a custom Bro policy
-# /opt/so/saltstack/bro/policy/custom/ on the master
+# /opt/so/saltstack/bro/policy/custom/ on the manager
#@load custom/somebropolicy.bro
# Write logs in JSON
diff --git a/salt/deprecated-bro/files/local.bro.community b/salt/deprecated-bro/files/local.bro.community
index 2ae12d7f2..76b18587f 100644
--- a/salt/deprecated-bro/files/local.bro.community
+++ b/salt/deprecated-bro/files/local.bro.community
@@ -121,11 +121,11 @@
@load policy/ja3
# You can load your own intel into:
-# /opt/so/saltstack/bro/policy/intel/ on the master
+# /opt/so/saltstack/bro/policy/intel/ on the manager
@load intel
# Load a custom Bro policy
-# /opt/so/saltstack/bro/policy/custom/ on the master
+# /opt/so/saltstack/bro/policy/custom/ on the manager
#@load custom/somebropolicy.bro
# Use JSON
diff --git a/salt/elastalert/files/elastalert_config.yaml b/salt/elastalert/files/elastalert_config.yaml
index 2ecf08ffa..7646e8221 100644
--- a/salt/elastalert/files/elastalert_config.yaml
+++ b/salt/elastalert/files/elastalert_config.yaml
@@ -1,5 +1,5 @@
-{% set esip = salt['pillar.get']('master:mainip', '') %}
-{% set esport = salt['pillar.get']('master:es_port', '') %}
+{% set esip = salt['pillar.get']('manager:mainip', '') %}
+{% set esport = salt['pillar.get']('manager:es_port', '') %}
# This is the folder that contains the rule yaml files
# Any .yaml file will be loaded as a rule
rules_folder: /opt/elastalert/rules/
@@ -86,3 +86,25 @@ alert_time_limit:
index_settings:
shards: 1
replicas: 0
+
+logging:
+ version: 1
+ incremental: false
+ disable_existing_loggers: false
+ formatters:
+ logline:
+ format: '%(asctime)s %(levelname)+8s %(name)+20s %(message)s'
+
+ handlers:
+ file:
+ class : logging.FileHandler
+ formatter: logline
+ level: INFO
+ filename: /var/log/elastalert/elastalert.log
+
+ loggers:
+ '':
+ level: INFO
+ handlers:
+ - file
+ propagate: false
diff --git a/salt/elastalert/files/rules/so/suricata_thehive.yaml b/salt/elastalert/files/rules/so/suricata_thehive.yaml
index 82698b7a8..cd887c9f9 100644
--- a/salt/elastalert/files/rules/so/suricata_thehive.yaml
+++ b/salt/elastalert/files/rules/so/suricata_thehive.yaml
@@ -1,7 +1,7 @@
-{% set es = salt['pillar.get']('static:masterip', '') %}
-{% set hivehost = salt['pillar.get']('static:masterip', '') %}
+{% set es = salt['pillar.get']('static:managerip', '') %}
+{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
-{% set MASTER = salt['pillar.get']('master:url_base', '') %}
+{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance.
#
@@ -39,7 +39,7 @@ hive_alert_config:
title: '{match[rule][name]}'
type: 'NIDS'
source: 'SecurityOnion'
- description: "`SOC Hunt Pivot:` \n\n \n\n `Kibana Dashboard Pivot:` \n\n \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
+ description: "`SOC Hunt Pivot:` \n\n \n\n `Kibana Dashboard Pivot:` \n\n \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
severity: 2
tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}']
tlp: 3
diff --git a/salt/elastalert/files/rules/so/wazuh_thehive.yaml b/salt/elastalert/files/rules/so/wazuh_thehive.yaml
index 1e275dce8..ccb79e1e5 100644
--- a/salt/elastalert/files/rules/so/wazuh_thehive.yaml
+++ b/salt/elastalert/files/rules/so/wazuh_thehive.yaml
@@ -1,7 +1,7 @@
-{% set es = salt['pillar.get']('static:masterip', '') %}
-{% set hivehost = salt['pillar.get']('static:masterip', '') %}
+{% set es = salt['pillar.get']('static:managerip', '') %}
+{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
-{% set MASTER = salt['pillar.get']('master:url_base', '') %}
+{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance.
#
@@ -38,7 +38,7 @@ hive_alert_config:
title: '{match[rule][name]}'
type: 'wazuh'
source: 'SecurityOnion'
- description: "`SOC Hunt Pivot:` \n\n \n\n `Kibana Dashboard Pivot:` \n\n "
+ description: "`SOC Hunt Pivot:` \n\n \n\n `Kibana Dashboard Pivot:` \n\n "
severity: 2
tags: ['{match[rule][id]}','{match[host][name]}']
tlp: 3
diff --git a/salt/elastalert/init.sls b/salt/elastalert/init.sls
index b79acf77f..9bfc8ded4 100644
--- a/salt/elastalert/init.sls
+++ b/salt/elastalert/init.sls
@@ -13,12 +13,12 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
-{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %}
- {% set esalert = salt['pillar.get']('master:elastalert', '1') %}
- {% set esip = salt['pillar.get']('master:mainip', '') %}
- {% set esport = salt['pillar.get']('master:es_port', '') %}
+{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
+ {% set esalert = salt['pillar.get']('manager:elastalert', '1') %}
+ {% set esip = salt['pillar.get']('manager:mainip', '') %}
+ {% set esport = salt['pillar.get']('manager:es_port', '') %}
{% elif grains['role'] == 'so-node' %}
{% set esalert = salt['pillar.get']('elasticsearch:elastalert', '0') %}
{% endif %}
@@ -101,7 +101,7 @@ elastaconf:
so-elastalert:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-elastalert:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-elastalert:{{ VERSION }}
- hostname: elastalert
- name: so-elastalert
- user: elastalert
diff --git a/salt/elasticsearch/files/elasticsearch.yml b/salt/elasticsearch/files/elasticsearch.yml
index 81603967b..4d5d5b2e4 100644
--- a/salt/elasticsearch/files/elasticsearch.yml
+++ b/salt/elasticsearch/files/elasticsearch.yml
@@ -1,6 +1,11 @@
-{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
-{%- set esclustername = salt['pillar.get']('master:esclustername', '') %}
-cluster.name: "{{ esclustername }}"
+{%- set NODE_ROUTE_TYPE = salt['pillar.get']('elasticsearch:node_route_type', 'hot') %}
+{%- if salt['pillar.get']('elasticsearch:hot_warm_enabled') or salt['pillar.get']('elasticsearch:true_cluster') %}
+{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:true_cluster_name', '') %}
+{%- else %}
+{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername', '') %}
+{%- endif %}
+{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
+cluster.name: "{{ ESCLUSTERNAME }}"
network.host: 0.0.0.0
# minimum_master_nodes need to be explicitly set when bound on a public IP
@@ -10,20 +15,13 @@ discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.
path.logs: /var/log/elasticsearch
action.destructive_requires_name: true
-{%- else %}
-{%- set esclustername = salt['grains.get']('host', '') %}
-{%- set nodeip = salt['pillar.get']('elasticsearch:mainip', '') -%}
-cluster.name: "{{ esclustername }}"
-network.host: 0.0.0.0
-discovery.zen.minimum_master_nodes: 1
-path.logs: /var/log/elasticsearch
-action.destructive_requires_name: true
transport.bind_host: 0.0.0.0
-transport.publish_host: {{ nodeip }}
+transport.publish_host: {{ NODEIP }}
transport.publish_port: 9300
-{%- endif %}
cluster.routing.allocation.disk.threshold_enabled: true
cluster.routing.allocation.disk.watermark.low: 95%
cluster.routing.allocation.disk.watermark.high: 98%
cluster.routing.allocation.disk.watermark.flood_stage: 98%
-script.max_compilations_rate: 1000/1m
\ No newline at end of file
+node.attr.box_type: {{ NODE_ROUTE_TYPE }}
+node.name: {{ ESCLUSTERNAME }}
+script.max_compilations_rate: 1000/1m
diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls
index 7292c055e..14cc38434 100644
--- a/salt/elasticsearch/init.sls
+++ b/salt/elasticsearch/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
@@ -22,9 +22,9 @@
{% set FEATURES = '' %}
{% endif %}
-{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %}
- {% set esclustername = salt['pillar.get']('master:esclustername', '') %}
- {% set esheap = salt['pillar.get']('master:esheap', '') %}
+{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
+ {% set esclustername = salt['pillar.get']('manager:esclustername', '') %}
+ {% set esheap = salt['pillar.get']('manager:esheap', '') %}
{% elif grains['role'] in ['so-node','so-heavynode'] %}
{% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %}
{% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %}
@@ -101,7 +101,7 @@ eslogdir:
so-elasticsearch:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-elasticsearch:{{ VERSION }}{{ FEATURES }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-elasticsearch:{{ VERSION }}{{ FEATURES }}
- hostname: elasticsearch
- name: so-elasticsearch
- user: elasticsearch
@@ -141,7 +141,7 @@ so-elasticsearch-pipelines:
- file: esyml
- file: so-elasticsearch-pipelines-file
-{% if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
+{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
so-elasticsearch-templates:
cmd.run:
- name: /usr/sbin/so-elasticsearch-templates
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index 6d33c1bdf..6aeac7bba 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -1,7 +1,7 @@
{%- if grains.role == 'so-heavynode' %}
-{%- set MASTER = salt['pillar.get']('sensor:mainip' '') %}
+{%- set MANAGER = salt['pillar.get']('sensor:mainip' '') %}
{%- else %}
-{%- set MASTER = grains['master'] %}
+{%- set MANAGER = salt['grains.get']('master') %}
{%- endif %}
@@ -9,7 +9,7 @@
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
{%- set WAZUHENABLED = salt['pillar.get']('static:wazuh', '0') %}
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
-{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
+{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
name: {{ HOSTNAME }}
@@ -126,6 +126,8 @@ filebeat.inputs:
category: network
imported: true
processors:
+ - add_tags:
+ tags: [import]
- dissect:
tokenizer: "/nsm/import/%{import.id}/zeek/logs/%{import.file}"
field: "log.file.path"
@@ -164,6 +166,8 @@ filebeat.inputs:
category: network
imported: true
processors:
+ - add_tags:
+ tags: [import]
- dissect:
tokenizer: "/nsm/import/%{import.id}/suricata/%{import.file}"
field: "log.file.path"
@@ -214,7 +218,7 @@ filebeat.inputs:
{%- endif %}
-{%- if FLEETMASTER or FLEETNODE %}
+{%- if FLEETMANAGER or FLEETNODE %}
- type: log
paths:
@@ -252,7 +256,7 @@ output.{{ type }}:
{%- if grains['role'] == "so-eval" %}
output.elasticsearch:
enabled: true
- hosts: ["{{ MASTER }}:9200"]
+ hosts: ["{{ MANAGER }}:9200"]
pipelines:
- pipeline: "%{[module]}.%{[dataset]}"
indices:
@@ -280,7 +284,7 @@ output.logstash:
enabled: true
# The Logstash hosts
- hosts: ["{{ MASTER }}:5644"]
+ hosts: ["{{ MANAGER }}:5644"]
# Number of workers per Logstash host.
#worker: 1
diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls
index 6fc06f582..8a2b868ce 100644
--- a/salt/filebeat/init.sls
+++ b/salt/filebeat/init.sls
@@ -12,8 +12,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{% set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{% set MANAGER = salt['grains.get']('master') %}
+{% set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
{% set FEATURES = "-features" %}
@@ -51,10 +51,10 @@ filebeatconfsync:
OUTPUT: {{ salt['pillar.get']('filebeat:config:output', {}) }}
so-filebeat:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-filebeat:{{ VERSION }}{{ FEATURES }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-filebeat:{{ VERSION }}{{ FEATURES }}
- hostname: so-filebeat
- user: root
- - extra_hosts: {{ MASTER }}:{{ MASTERIP }}
+ - extra_hosts: {{ MANAGER }}:{{ MANAGERIP }}
- binds:
- /nsm:/nsm:ro
- /opt/so/log/filebeat:/usr/share/filebeat/logs:rw
diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml
index 07f7d1650..2500c604a 100644
--- a/salt/firewall/assigned_hostgroups.map.yaml
+++ b/salt/firewall/assigned_hostgroups.map.yaml
@@ -6,7 +6,7 @@ role:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -85,12 +85,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
- master:
+ - {{ portgroups.salt_manager }}
+ manager:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -166,12 +166,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
- mastersearch:
+ - {{ portgroups.salt_manager }}
+ managersearch:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -247,12 +247,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
+ - {{ portgroups.salt_manager }}
standalone:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.wazuh_api }}
@@ -328,12 +328,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
+ - {{ portgroups.salt_manager }}
helixsensor:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.wazuh_agent }}
- {{ portgroups.playbook }}
@@ -391,12 +391,12 @@ role:
- {{ portgroups.all }}
minion:
portgroups:
- - {{ portgroups.salt_master }}
+ - {{ portgroups.salt_manager }}
searchnode:
chain:
DOCKER-USER:
hostgroups:
- master:
+ manager:
portgroups:
- {{ portgroups.elasticsearch_node }}
dockernet:
diff --git a/salt/firewall/hostgroups.yaml b/salt/firewall/hostgroups.yaml
index bd303001b..5ff6b900b 100644
--- a/salt/firewall/hostgroups.yaml
+++ b/salt/firewall/hostgroups.yaml
@@ -19,4 +19,4 @@ firewall:
ips:
delete:
insert:
- - {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
\ No newline at end of file
+ - {{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
\ No newline at end of file
diff --git a/salt/firewall/portgroups.yaml b/salt/firewall/portgroups.yaml
index 7f09d1139..b8d86f253 100644
--- a/salt/firewall/portgroups.yaml
+++ b/salt/firewall/portgroups.yaml
@@ -61,7 +61,7 @@ firewall:
redis:
tcp:
- 6379
- salt_master:
+ salt_manager:
tcp:
- 4505
- 4506
diff --git a/salt/fleet/event_gen-packages.sls b/salt/fleet/event_gen-packages.sls
index e353eaf92..1cf7e331a 100644
--- a/salt/fleet/event_gen-packages.sls
+++ b/salt/fleet/event_gen-packages.sls
@@ -1,4 +1,4 @@
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set ENROLLSECRET = salt['pillar.get']('secrets:fleet_enroll-secret') %}
{% set CURRENTPACKAGEVERSION = salt['pillar.get']('static:fleet_packages-version') %}
{% set VERSION = salt['pillar.get']('static:soversion') %}
@@ -19,6 +19,6 @@ so/fleet:
mainip: {{ grains.host }}
enroll-secret: {{ ENROLLSECRET }}
current-package-version: {{ CURRENTPACKAGEVERSION }}
- master: {{ MASTER }}
+ manager: {{ MANAGER }}
version: {{ VERSION }}
\ No newline at end of file
diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls
index 65f32e213..7858ca298 100644
--- a/salt/fleet/init.sls
+++ b/salt/fleet/init.sls
@@ -2,14 +2,14 @@
{%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%}
{%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set FLEETARCH = salt['grains.get']('role') %}
{% if FLEETARCH == "so-fleet" %}
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% else %}
- {% set MAINIP = salt['pillar.get']('static:masterip') %}
+ {% set MAINIP = salt['pillar.get']('static:managerip') %}
{% endif %}
include:
@@ -105,7 +105,7 @@ fleet_password_none:
so-fleet:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-fleet:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-fleet:{{ VERSION }}
- hostname: so-fleet
- port_bindings:
- 0.0.0.0:8080:8080
diff --git a/salt/fleet/install_package.sls b/salt/fleet/install_package.sls
index 3787d6111..d09de540c 100644
--- a/salt/fleet/install_package.sls
+++ b/salt/fleet/install_package.sls
@@ -1,4 +1,4 @@
-{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
+{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%}
{%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%}
diff --git a/salt/grafana/dashboards/master/master.json b/salt/grafana/dashboards/manager/manager.json
similarity index 99%
rename from salt/grafana/dashboards/master/master.json
rename to salt/grafana/dashboards/manager/manager.json
index d1b2bf899..bf2580d34 100644
--- a/salt/grafana/dashboards/master/master.json
+++ b/salt/grafana/dashboards/manager/manager.json
@@ -12,7 +12,7 @@
}
]
},
- "description": "This Dashboard provides a general overview of the Master",
+ "description": "This Dashboard provides a general overview of the Manager",
"editable": true,
"gnetId": 2381,
"graphTooltip": 0,
@@ -4162,7 +4162,7 @@
]
},
"timezone": "browser",
- "title": "Master Node - {{ SERVERNAME }} Overview",
+ "title": "Manager Node - {{ SERVERNAME }} Overview",
"uid": "{{ UID }}",
"version": 3
}
\ No newline at end of file
diff --git a/salt/grafana/dashboards/mastersearch/mastersearch.json b/salt/grafana/dashboards/managersearch/managersearch.json
similarity index 99%
rename from salt/grafana/dashboards/mastersearch/mastersearch.json
rename to salt/grafana/dashboards/managersearch/managersearch.json
index 5ed9f9617..485509477 100644
--- a/salt/grafana/dashboards/mastersearch/mastersearch.json
+++ b/salt/grafana/dashboards/managersearch/managersearch.json
@@ -13,7 +13,7 @@
}
]
},
- "description": "This Dashboard provides a general overview of a MasterSearch Node",
+ "description": "This Dashboard provides a general overview of a ManagerSearch Node",
"editable": true,
"gnetId": 2381,
"graphTooltip": 0,
diff --git a/salt/grafana/etc/dashboards/dashboard.yml b/salt/grafana/etc/dashboards/dashboard.yml
index 9ae71e6a4..72f77f845 100644
--- a/salt/grafana/etc/dashboards/dashboard.yml
+++ b/salt/grafana/etc/dashboards/dashboard.yml
@@ -3,20 +3,20 @@ apiVersion: 1
providers:
{%- if grains['role'] != 'so-eval' %}
-- name: 'Master'
- folder: 'Master'
+- name: 'Manager'
+ folder: 'Manager'
type: file
disableDeletion: false
editable: true
options:
- path: /etc/grafana/grafana_dashboards/master
-- name: 'Master Search'
- folder: 'Master Search'
+ path: /etc/grafana/grafana_dashboards/manager
+- name: 'Manager Search'
+ folder: 'Manager Search'
type: file
disableDeletion: false
editable: true
options:
- path: /etc/grafana/grafana_dashboards/mastersearch
+ path: /etc/grafana/grafana_dashboards/managersearch
- name: 'Sensor Nodes'
folder: 'Sensor Nodes'
type: file
diff --git a/salt/grafana/etc/datasources/influxdb.yaml b/salt/grafana/etc/datasources/influxdb.yaml
index c9f98dc57..c70fd7137 100644
--- a/salt/grafana/etc/datasources/influxdb.yaml
+++ b/salt/grafana/etc/datasources/influxdb.yaml
@@ -1,4 +1,4 @@
-{%- set MASTER = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
apiVersion: 1
deleteDatasources:
@@ -10,7 +10,7 @@ datasources:
type: influxdb
access: proxy
database: telegraf
- url: https://{{ MASTER }}:8086
+ url: https://{{ MANAGER }}:8086
jsonData:
tlsAuth: false
tlsAuthWithCACert: false
diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls
index d3c457944..1f448f6f0 100644
--- a/salt/grafana/init.sls
+++ b/salt/grafana/init.sls
@@ -1,8 +1,8 @@
-{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
+{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
# Grafana all the things
grafanadir:
@@ -28,14 +28,14 @@ grafanadashdir:
grafanadashmdir:
file.directory:
- - name: /opt/so/conf/grafana/grafana_dashboards/master
+ - name: /opt/so/conf/grafana/grafana_dashboards/manager
- user: 939
- group: 939
- makedirs: True
grafanadashmsdir:
file.directory:
- - name: /opt/so/conf/grafana/grafana_dashboards/mastersearch
+ - name: /opt/so/conf/grafana/grafana_dashboards/managersearch
- user: 939
- group: 939
- makedirs: True
@@ -76,17 +76,17 @@ grafanaconf:
- template: jinja
- source: salt://grafana/etc
-{% if salt['pillar.get']('mastertab', False) %}
-{% for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %}
+{% if salt['pillar.get']('managertab', False) %}
+{% for SN, SNDATA in salt['pillar.get']('managertab', {}).items() %}
{% set NODETYPE = SN.split('_')|last %}
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
-dashboard-master:
+dashboard-manager:
file.managed:
- - name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json
+ - name: /opt/so/conf/grafana/grafana_dashboards/manager/{{ SN }}-Manager.json
- user: 939
- group: 939
- template: jinja
- - source: salt://grafana/dashboards/master/master.json
+ - source: salt://grafana/dashboards/manager/manager.json
- defaults:
SERVERNAME: {{ SN }}
MANINT: {{ SNDATA.manint }}
@@ -99,17 +99,17 @@ dashboard-master:
{% endfor %}
{% endif %}
-{% if salt['pillar.get']('mastersearchtab', False) %}
-{% for SN, SNDATA in salt['pillar.get']('mastersearchtab', {}).items() %}
+{% if salt['pillar.get']('managersearchtab', False) %}
+{% for SN, SNDATA in salt['pillar.get']('managersearchtab', {}).items() %}
{% set NODETYPE = SN.split('_')|last %}
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
-dashboard-mastersearch:
+dashboard-managersearch:
file.managed:
- - name: /opt/so/conf/grafana/grafana_dashboards/mastersearch/{{ SN }}-MasterSearch.json
+ - name: /opt/so/conf/grafana/grafana_dashboards/managersearch/{{ SN }}-ManagerSearch.json
- user: 939
- group: 939
- template: jinja
- - source: salt://grafana/dashboards/mastersearch/mastersearch.json
+ - source: salt://grafana/dashboards/managersearch/managersearch.json
- defaults:
SERVERNAME: {{ SN }}
MANINT: {{ SNDATA.manint }}
@@ -216,7 +216,7 @@ dashboard-{{ SN }}:
so-grafana:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-grafana:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-grafana:{{ VERSION }}
- hostname: grafana
- user: socore
- binds:
diff --git a/salt/idstools/init.sls b/salt/idstools/init.sls
index 078cb5b03..68d14d397 100644
--- a/salt/idstools/init.sls
+++ b/salt/idstools/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
# IDSTools Setup
idstoolsdir:
file.directory:
@@ -60,7 +60,7 @@ synclocalnidsrules:
so-idstools:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-idstools:{{ VERSION }}
- hostname: so-idstools
- user: socore
- binds:
diff --git a/salt/influxdb/init.sls b/salt/influxdb/init.sls
index 774db2187..6d6bfd328 100644
--- a/salt/influxdb/init.sls
+++ b/salt/influxdb/init.sls
@@ -1,9 +1,9 @@
-{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
+{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}
# Influx DB
influxconfdir:
@@ -26,7 +26,7 @@ influxdbconf:
so-influxdb:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-influxdb:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-influxdb:{{ VERSION }}
- hostname: influxdb
- environment:
- INFLUXDB_HTTP_LOG_ENABLED=false
diff --git a/salt/kibana/bin/keepkibanahappy.sh b/salt/kibana/bin/keepkibanahappy.sh
index 28967ee24..e8534ec12 100644
--- a/salt/kibana/bin/keepkibanahappy.sh
+++ b/salt/kibana/bin/keepkibanahappy.sh
@@ -1,4 +1,4 @@
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
COUNT=0
diff --git a/salt/kibana/bin/so-kibana-config-load b/salt/kibana/bin/so-kibana-config-load
index 81872fd6a..f59d0c10d 100644
--- a/salt/kibana/bin/so-kibana-config-load
+++ b/salt/kibana/bin/so-kibana-config-load
@@ -1,20 +1,20 @@
#!/bin/bash
-# {%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master', False) -%}
+# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
-# {%- set MASTER = salt['pillar.get']('master:url_base', '') %}
+# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
KIBANA_VERSION="7.6.1"
# Copy template file
cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_objects.ndjson
-# {% if FLEET_NODE or FLEET_MASTER %}
+# {% if FLEET_NODE or FLEET_MANAGER %}
# Fleet IP
-sed -i "s/FLEETPLACEHOLDER/{{ MASTER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
+sed -i "s/FLEETPLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
# {% endif %}
-# SOCtopus and Master
-sed -i "s/PLACEHOLDER/{{ MASTER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
+# SOCtopus and Manager
+sed -i "s/PLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
# Load saved objects
curl -X POST "localhost:5601/api/saved_objects/_import" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1
diff --git a/salt/kibana/etc/kibana.yml b/salt/kibana/etc/kibana.yml
index dd0d6faa9..4d19b251b 100644
--- a/salt/kibana/etc/kibana.yml
+++ b/salt/kibana/etc/kibana.yml
@@ -1,6 +1,6 @@
---
# Default Kibana configuration from kibana-docker.
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
server.name: kibana
server.host: "0"
server.basePath: /kibana
diff --git a/salt/kibana/init.sls b/salt/kibana/init.sls
index 6379d6ad0..792f41579 100644
--- a/salt/kibana/init.sls
+++ b/salt/kibana/init.sls
@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
{% set FEATURES = "-features" %}
@@ -69,13 +69,13 @@ kibanabin:
# Start the kibana docker
so-kibana:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-kibana:{{ VERSION }}{{ FEATURES }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-kibana:{{ VERSION }}{{ FEATURES }}
- hostname: kibana
- user: kibana
- environment:
- - ELASTICSEARCH_HOST={{ MASTER }}
+ - ELASTICSEARCH_HOST={{ MANAGER }}
- ELASTICSEARCH_PORT=9200
- - MASTER={{ MASTER }}
+ - MANAGER={{ MANAGER }}
- binds:
- /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw
- /opt/so/log/kibana:/var/log/kibana:rw
@@ -94,7 +94,7 @@ kibanadashtemplate:
wait_for_kibana:
module.run:
- http.wait_for_successful_query:
- - url: "http://{{MASTER}}:5601/api/saved_objects/_find?type=config"
+ - url: "http://{{MANAGER}}:5601/api/saved_objects/_find?type=config"
- wait_for: 180
- onchanges:
- file: kibanadashtemplate
diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls
index e2494e57a..784db9525 100644
--- a/salt/logstash/init.sls
+++ b/salt/logstash/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
@@ -24,13 +24,13 @@
# Logstash Section - Decide which pillar to use
{% set lsheap = salt['pillar.get']('logstash_settings:lsheap', '') %}
-{% if grains['role'] in ['so-eval','so-mastersearch', 'so-master', 'so-standalone'] %}
- {% set freq = salt['pillar.get']('master:freq', '0') %}
- {% set dstats = salt['pillar.get']('master:domainstats', '0') %}
+{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
+ {% set freq = salt['pillar.get']('manager:freq', '0') %}
+ {% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% elif grains['role'] == 'so-helix' %}
- {% set freq = salt['pillar.get']('master:freq', '0') %}
- {% set dstats = salt['pillar.get']('master:domainstats', '0') %}
+ {% set freq = salt['pillar.get']('manager:freq', '0') %}
+ {% set dstats = salt['pillar.get']('manager:domainstats', '0') %}
{% set nodetype = salt['grains.get']('role', '') %}
{% endif %}
@@ -159,7 +159,7 @@ lslogdir:
so-logstash:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-logstash:{{ VERSION }}{{ FEATURES }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-logstash:{{ VERSION }}{{ FEATURES }}
- hostname: so-logstash
- name: so-logstash
- user: logstash
diff --git a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja
index 4d6595dd9..2ce204875 100644
--- a/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja
+++ b/salt/logstash/pipelines/config/so/0900_input_redis.conf.jinja
@@ -1,13 +1,13 @@
{%- if grains.role == 'so-heavynode' %}
-{%- set MASTER = salt['pillar.get']('elasticsearch:mainip', '') %}
+{%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- else %}
-{%- set MASTER = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
{% endif -%}
{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %}
input {
redis {
- host => '{{ MASTER }}'
+ host => '{{ MANAGER }}'
data_type => 'list'
key => 'logstash:unparsed'
type => 'redis-input'
diff --git a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja
index 987614a2c..54a30f272 100644
--- a/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9000_output_zeek.conf.jinja
@@ -1,23 +1,10 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-
-filter {
- if [module] =~ "zeek" {
- mutate {
- ##add_tag => [ "conf_file_9000"]
- }
- }
-}
output {
- if [module] =~ "zeek" {
+ if [module] =~ "zeek" and "import" not in [tags] {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
diff --git a/salt/logstash/pipelines/config/so/9001_output_switch.conf.jinja b/salt/logstash/pipelines/config/so/9001_output_switch.conf.jinja
deleted file mode 100644
index 8e5e5f200..000000000
--- a/salt/logstash/pipelines/config/so/9001_output_switch.conf.jinja
+++ /dev/null
@@ -1,27 +0,0 @@
-{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
-{%- else %}
-{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if "switch" in [tags] and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9001"]
- }
- }
-}
-output {
- if "switch" in [tags] and "test_data" not in [tags] {
- #stdout { codec => rubydebug }
- elasticsearch {
- hosts => "{{ ES }}"
- index => "so-switch-%{+YYYY.MM.dd}"
- template => "/so-common-template.json"
- }
- }
-}
diff --git a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja
index 9153d5c44..563e5984e 100644
--- a/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9002_output_import.conf.jinja
@@ -1,26 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-# Updated by: Doug Burks
-# Last Update: 5/16/2017
-
-filter {
- if "import" in [tags] and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9002"]
- }
- }
-}
output {
- if "import" in [tags] and "test_data" not in [tags] {
-# stdout { codec => rubydebug }
+ if "import" in [tags] {
elasticsearch {
+ pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-import-%{+YYYY.MM.dd}"
- template_name => "so-common"
- template => "/so-common-template.json"
+ template_name => "so-import"
+ template => "/so-import-template.json"
template_overwrite => true
}
}
diff --git a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja
index 2e1e79f8b..007713811 100644
--- a/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9004_output_flow.conf.jinja
@@ -1,27 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [event_type] == "sflow" and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9004"]
- }
- }
-}
output {
- if [event_type] == "sflow" and "test_data" not in [tags] {
- #stdout { codec => rubydebug }
+ if [event_type] == "sflow" {
elasticsearch {
hosts => "{{ ES }}"
index => "so-flow-%{+YYYY.MM.dd}"
- template => "/so-common-template.json"
+ template_name => "so-flow"
+ template => "/so-flow-template.json"
+ template_overwrite => true
}
}
}
diff --git a/salt/logstash/pipelines/config/so/9026_output_dhcp.conf.jinja b/salt/logstash/pipelines/config/so/9026_output_dhcp.conf.jinja
deleted file mode 100644
index 3da9e83ef..000000000
--- a/salt/logstash/pipelines/config/so/9026_output_dhcp.conf.jinja
+++ /dev/null
@@ -1,26 +0,0 @@
-{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
-{%- else %}
-{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [event_type] == "dhcp" and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9026"]
- }
- }
-}
-output {
- if [event_type] == "dhcp" and "test_data" not in [tags] {
- #stdout { codec => rubydebug }
- elasticsearch {
- hosts => "{{ ES }}"
- template => "/so-common-template.json"
- }
- }
-}
diff --git a/salt/logstash/pipelines/config/so/9029_output_esxi.conf.jinja b/salt/logstash/pipelines/config/so/9029_output_esxi.conf.jinja
deleted file mode 100644
index b84ab4ec9..000000000
--- a/salt/logstash/pipelines/config/so/9029_output_esxi.conf.jinja
+++ /dev/null
@@ -1,25 +0,0 @@
-{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
-{%- else %}
-{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [event_type] == "esxi" and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9029"]
- }
- }
-}
-output {
- if [event_type] == "esxi" and "test_data" not in [tags] {
- elasticsearch {
- hosts => "{{ ES }}"
- template => "/so-common-template.json"
- }
- }
-}
diff --git a/salt/logstash/pipelines/config/so/9030_output_greensql.conf.jinja b/salt/logstash/pipelines/config/so/9030_output_greensql.conf.jinja
deleted file mode 100644
index d6801530b..000000000
--- a/salt/logstash/pipelines/config/so/9030_output_greensql.conf.jinja
+++ /dev/null
@@ -1,25 +0,0 @@
-{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
-{%- else %}
-{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [event_type] == "greensql" and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9030"]
- }
- }
-}
-output {
- if [event_type] == "greensql" and "test_data" not in [tags] {
- elasticsearch {
- hosts => "{{ ES }}"
- template => "/so-common-template.json"
- }
- }
-}
diff --git a/salt/logstash/pipelines/config/so/9031_output_iis.conf.jinja b/salt/logstash/pipelines/config/so/9031_output_iis.conf.jinja
deleted file mode 100644
index 67616110f..000000000
--- a/salt/logstash/pipelines/config/so/9031_output_iis.conf.jinja
+++ /dev/null
@@ -1,26 +0,0 @@
-{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
-{%- else %}
-{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [event_type] == "iis" and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9031"]
- }
- }
-}
-output {
- if [event_type] == "iis" and "test_data" not in [tags] {
- #stdout { codec => rubydebug }
- elasticsearch {
- hosts => "{{ ES }}"
- template => "/so-common-template.json"
- }
- }
-}
diff --git a/salt/logstash/pipelines/config/so/9032_output_mcafee.conf.jinja b/salt/logstash/pipelines/config/so/9032_output_mcafee.conf.jinja
deleted file mode 100644
index c6641f671..000000000
--- a/salt/logstash/pipelines/config/so/9032_output_mcafee.conf.jinja
+++ /dev/null
@@ -1,26 +0,0 @@
-{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
-{%- else %}
-{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [event_type] == "mcafee" and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9032"]
- }
- }
-}
-output {
- if [event_type] == "mcafee" and "test_data" not in [tags] {
- #stdout { codec => rubydebug }
- elasticsearch {
- hosts => "{{ ES }}"
- template => "/so-common-template.json"
- }
- }
-}
diff --git a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja
index 0cc7a3b66..065653f01 100644
--- a/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9033_output_snort.conf.jinja
@@ -1,28 +1,15 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [event_type] == "ids" and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9033"]
- }
- }
-}
output {
- if [event_type] == "ids" and "test_data" not in [tags] {
- #stdout { codec => rubydebug }
+ if [event_type] == "ids" and "import" not in [tags] {
elasticsearch {
hosts => "{{ ES }}"
index => "so-ids-%{+YYYY.MM.dd}"
- template_name => "so-common"
- template => "/so-common-template.json"
+ template_name => "so-ids"
+ template => "/so-ids-template.json"
template_overwrite => true
}
}
diff --git a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja
index 59cae7b65..cd7e44d74 100644
--- a/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9034_output_syslog.conf.jinja
@@ -1,24 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-
-filter {
- if [module] =~ "syslog" {
- mutate {
- ##add_tag => [ "conf_file_9000"]
- }
- }
-}
output {
if [module] =~ "syslog" {
elasticsearch {
pipeline => "%{module}"
hosts => "{{ ES }}"
index => "so-syslog-%{+YYYY.MM.dd}"
- template_name => "so-common"
- template => "/so-common-template.json"
+ template_name => "so-syslog"
+ template => "/so-syslog-template.json"
template_overwrite => true
}
}
diff --git a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja
index 21ae77095..3b99a7afa 100644
--- a/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9100_output_osquery.conf.jinja
@@ -1,20 +1,17 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-# Author: Josh Brower
-# Last Update: 12/29/2018
-# Output to ES for osquery tagged logs
-
-
output {
if [module] =~ "osquery" {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-osquery-%{+YYYY.MM.dd}"
- template => "/so-common-template.json"
+ template_name => "so-osquery"
+ template => "/so-osquery-template.json"
+ template_overwrite => true
}
}
}
diff --git a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja
index 54c75873d..9407fe79e 100644
--- a/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9200_output_firewall.conf.jinja
@@ -1,28 +1,15 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if "firewall" in [tags] and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9200"]
- }
- }
-}
output {
- if "firewall" in [tags] and "test_data" not in [tags] {
-# stdout { codec => rubydebug }
+ if "firewall" in [tags] {
elasticsearch {
hosts => "{{ ES }}"
index => "so-firewall-%{+YYYY.MM.dd}"
- template_name => "so-common"
- template => "/so-common-template.json"
+ template_name => "so-firewall"
+ template => "/so-firewall-template.json"
template_overwrite => true
}
}
diff --git a/salt/logstash/pipelines/config/so/9300_output_windows.conf.jinja b/salt/logstash/pipelines/config/so/9300_output_windows.conf.jinja
deleted file mode 100644
index cddda5541..000000000
--- a/salt/logstash/pipelines/config/so/9300_output_windows.conf.jinja
+++ /dev/null
@@ -1,27 +0,0 @@
-{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
-{%- else %}
-{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [event_type] == "windows" and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9300"]
- }
- }
-}
-output {
- if [event_type] == "windows" and "test_data" not in [tags] {
- #stdout { codec => rubydebug }
- elasticsearch {
- hosts => "{{ ES }}"
- index => "so-windows-%{+YYYY.MM.dd}"
- template => "/so-common-template.json"
- }
- }
-}
diff --git a/salt/logstash/pipelines/config/so/9301_output_dns_windows.conf.jinja b/salt/logstash/pipelines/config/so/9301_output_dns_windows.conf.jinja
deleted file mode 100644
index 84fd1f5f7..000000000
--- a/salt/logstash/pipelines/config/so/9301_output_dns_windows.conf.jinja
+++ /dev/null
@@ -1,27 +0,0 @@
-{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
-{%- else %}
-{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
-{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [event_type] == "dns" and "test_data" not in [tags] {
- mutate {
- ##add_tag => [ "conf_file_9301"]
- }
- }
-}
-output {
- if [event_type] == "dns" and "test_data" not in [tags] {
- #stdout { codec => rubydebug }
- elasticsearch {
- hosts => "{{ ES }}"
- index => "so-%{+YYYY.MM.dd}"
- template => "/so-common-template.json"
- }
- }
-}
diff --git a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja
index 1d36d774d..d3026aa20 100644
--- a/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9400_output_suricata.conf.jinja
@@ -1,28 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-filter {
- if [module] == "suricata" {
- mutate {
- ##add_tag => [ "conf_file_9400"]
- }
- }
-}
output {
- if [module] =~ "suricata" {
+ if [module] =~ "suricata" and "import" not in [tags] {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-ids-%{+YYYY.MM.dd}"
- template_name => "so-common"
- template => "/so-common-template.json"
+ template_name => "so-ids"
+ template => "/so-ids-template.json"
}
}
}
diff --git a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja
index 932a194ab..6874e5e76 100644
--- a/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9500_output_beats.conf.jinja
@@ -1,17 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-
output {
- if "beat-ext" in [tags] {
+ if "beat-ext" in [tags] and "import" not in [tags] {
elasticsearch {
pipeline => "beats.common"
hosts => "{{ ES }}"
index => "so-beats-%{+YYYY.MM.dd}"
- template_name => "so-common"
- template => "/so-common-template.json"
+ template_name => "so-beats"
+ template => "/so-beats-template.json"
template_overwrite => true
}
}
diff --git a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja
index 5a8f9f5ba..77610d9e0 100644
--- a/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9600_output_ossec.conf.jinja
@@ -1,29 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Updated by: Doug Burks
-# Last Update: 9/19/2018
-
-filter {
- if [module] =~ "ossec" {
- mutate {
- ##add_tag => [ "conf_file_9600"]
- }
- }
-}
-
output {
if [module] =~ "ossec" {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-ossec-%{+YYYY.MM.dd}"
- template_name => "so-common"
- template => "/so-common-template.json"
+ template_name => "so-ossec"
+ template => "/so-ossec-template.json"
template_overwrite => true
}
}
diff --git a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja
index 5116b86ea..b92e2a3d9 100644
--- a/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9700_output_strelka.conf.jinja
@@ -1,29 +1,16 @@
{%- if grains['role'] == 'so-eval' -%}
-{%- set ES = salt['pillar.get']('master:mainip', '') -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
-# Author: Justin Henderson
-# SANS Instructor and author of SANS SEC555: SIEM and Tactical Analytics
-# Email: justin@hasecuritysolution.com
-# Last Update: 12/9/2016
-
-
-filter {
- if [module] =~ "strelka" {
- mutate {
- ##add_tag => [ "conf_file_9000"]
- }
- }
-}
output {
if [event_type] =~ "strelka" {
elasticsearch {
pipeline => "%{module}.%{dataset}"
hosts => "{{ ES }}"
index => "so-strelka-%{+YYYY.MM.dd}"
- template_name => "so-common"
- template => "/so-common-template.json"
+ template_name => "so-strelka"
+ template => "/so-strelka-template.json"
template_overwrite => true
}
}
diff --git a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja
index afa8d290a..71ec9f639 100644
--- a/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja
+++ b/salt/logstash/pipelines/config/so/9999_output_redis.conf.jinja
@@ -1,9 +1,8 @@
-{% set MASTER = salt['pillar.get']('static:masterip', '') %}
+{% set MANAGER = salt['pillar.get']('static:managerip', '') %}
{% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %}
-
output {
redis {
- host => '{{ MASTER }}'
+ host => '{{ MANAGER }}'
data_type => 'list'
key => 'logstash:unparsed'
congestion_interval => 1
diff --git a/salt/logstash/pipelines/templates/so/so-beats-template.json.jinja b/salt/logstash/pipelines/templates/so/so-beats-template.json.jinja
new file mode 100644
index 000000000..6d2cf7851
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-beats-template.json.jinja
@@ -0,0 +1,13 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-beats:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-beats:refresh', '30s') %}
+{
+ "index_patterns": ["so-beats-*"],
+ "version": 50001,
+ "order": 11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
diff --git a/salt/logstash/pipelines/templates/so/so-common-template.json b/salt/logstash/pipelines/templates/so/so-common-template.json
index 396e26c3c..a4da40765 100644
--- a/salt/logstash/pipelines/templates/so/so-common-template.json
+++ b/salt/logstash/pipelines/templates/so/so-common-template.json
@@ -1,15 +1,16 @@
{
"index_patterns": ["so-ids-*", "so-firewall-*", "so-syslog-*", "so-zeek-*", "so-import-*", "so-ossec-*", "so-strelka-*", "so-beats-*", "so-osquery-*"],
"version":50001,
- "order" : 10,
+ "order":10,
"settings":{
"number_of_replicas":0,
"number_of_shards":1,
- "index.refresh_interval":"30s"
+ "index.refresh_interval":"30s",
+ "index.routing.allocation.require.box_type":"hot"
},
"mappings":{
- "dynamic": false,
- "date_detection": false,
+ "dynamic":false,
+ "date_detection":false,
"properties":{
"@timestamp":{
"type":"date"
@@ -19,7 +20,7 @@
},
"osquery":{
"type":"object",
- "dynamic": true
+ "dynamic":true
},
"geoip":{
"dynamic":true,
diff --git a/salt/logstash/pipelines/templates/so/so-firewall-template.json.jinja b/salt/logstash/pipelines/templates/so/so-firewall-template.json.jinja
new file mode 100644
index 000000000..7bc81fd12
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-firewall-template.json.jinja
@@ -0,0 +1,13 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-firewall:refresh', '30s') %}
+{
+ "index_patterns": ["so-firewall-*"],
+ "version":50001,
+ "order":11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
diff --git a/salt/logstash/pipelines/templates/so/so-flow-template.json.jinja b/salt/logstash/pipelines/templates/so/so-flow-template.json.jinja
new file mode 100644
index 000000000..6c8f2fa9f
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-flow-template.json.jinja
@@ -0,0 +1,13 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-flow:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-flow:refresh', '30s') %}
+{
+ "index_patterns": ["so-flow-*"],
+ "version": 50001,
+ "order": 11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
diff --git a/salt/logstash/pipelines/templates/so/so-ids-template.json.jinja b/salt/logstash/pipelines/templates/so/so-ids-template.json.jinja
new file mode 100644
index 000000000..abf37319a
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-ids-template.json.jinja
@@ -0,0 +1,13 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-ids:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-ids:refresh', '30s') %}
+{
+ "index_patterns": ["so-ids-*"],
+ "version":50001,
+ "order":11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
diff --git a/salt/logstash/pipelines/templates/so/so-import-template.json.jinja b/salt/logstash/pipelines/templates/so/so-import-template.json.jinja
new file mode 100644
index 000000000..e4d68235d
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-import-template.json.jinja
@@ -0,0 +1,13 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-import:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-import:refresh', '30s') %}
+{
+ "index_patterns": ["so-import-*"],
+ "version":50001,
+ "order": 11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
diff --git a/salt/logstash/pipelines/templates/so/so-osquery-template.json.jinja b/salt/logstash/pipelines/templates/so/so-osquery-template.json.jinja
new file mode 100644
index 000000000..47cb3ebab
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-osquery-template.json.jinja
@@ -0,0 +1,13 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-osquery:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-osquery:refresh', '30s') %}
+{
+ "index_patterns": ["so-osquery-*"],
+ "version":50001,
+ "order":11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
diff --git a/salt/logstash/pipelines/templates/so/so-ossec-template.json.jinja b/salt/logstash/pipelines/templates/so/so-ossec-template.json.jinja
new file mode 100644
index 000000000..ce903e228
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-ossec-template.json.jinja
@@ -0,0 +1,13 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-ossec:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-ossec:refresh', '30s') %}
+{
+ "index_patterns": ["so-ossec-*"],
+ "version":50001,
+ "order":11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
diff --git a/salt/logstash/pipelines/templates/so/so-strelka-template.json.jinja b/salt/logstash/pipelines/templates/so/so-strelka-template.json.jinja
new file mode 100644
index 000000000..2f7db541a
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-strelka-template.json.jinja
@@ -0,0 +1,13 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-strelka:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-strelka:refresh', '30s') %}
+{
+ "index_patterns": ["so-strelka-*"],
+ "version":50001,
+ "order":11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
diff --git a/salt/logstash/pipelines/templates/so/so-syslog-template.json.jinja b/salt/logstash/pipelines/templates/so/so-syslog-template.json.jinja
new file mode 100644
index 000000000..47f8d78e6
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-syslog-template.json.jinja
@@ -0,0 +1,14 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-syslog:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-syslog:refresh', '30s') %}
+{
+ "index_patterns": ["so-syslog-*"],
+ "version":50001,
+ "order":11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
+
diff --git a/salt/logstash/pipelines/templates/so/so-zeek-template.json b/salt/logstash/pipelines/templates/so/so-zeek-template.json
deleted file mode 100644
index 61a95c0e7..000000000
--- a/salt/logstash/pipelines/templates/so/so-zeek-template.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "index_patterns": ["so-zeek-*"],
- "version":50001,
- "order" : 11,
- "settings":{
- "number_of_replicas":0,
- "number_of_shards":1,
- "index.refresh_interval":"30s"
- }
-}
diff --git a/salt/logstash/pipelines/templates/so/so-zeek-template.json.jinja b/salt/logstash/pipelines/templates/so/so-zeek-template.json.jinja
new file mode 100644
index 000000000..616607f52
--- /dev/null
+++ b/salt/logstash/pipelines/templates/so/so-zeek-template.json.jinja
@@ -0,0 +1,13 @@
+{%- set SHARDS = salt['pillar.get']('elasticsearch:index_settings:so-zeek:shards', 1) %}
+{%- set REPLICAS = salt['pillar.get']('elasticsearch:replicas', 0) %}
+{%- set REFRESH = salt['pillar.get']('elasticsearch:index_settings:so-zeek:refresh', '30s') %}
+{
+ "index_patterns": ["so-zeek-*"],
+ "version":50001,
+ "order":11,
+ "settings":{
+ "number_of_replicas":{{ REPLICAS }},
+ "number_of_shards":{{ SHARDS }},
+ "index.refresh_interval":"{{ REFRESH }}"
+ }
+}
diff --git a/salt/master/files/acng/acng.conf b/salt/manager/files/acng/acng.conf
similarity index 100%
rename from salt/master/files/acng/acng.conf
rename to salt/manager/files/acng/acng.conf
diff --git a/salt/master/files/add_minion.sh b/salt/manager/files/add_minion.sh
similarity index 100%
rename from salt/master/files/add_minion.sh
rename to salt/manager/files/add_minion.sh
diff --git a/salt/master/files/registry/scripts/so-docker-download b/salt/manager/files/registry/scripts/so-docker-download
similarity index 88%
rename from salt/master/files/registry/scripts/so-docker-download
rename to salt/manager/files/registry/scripts/so-docker-download
index 1213ae72a..dcba7a531 100644
--- a/salt/master/files/registry/scripts/so-docker-download
+++ b/salt/manager/files/registry/scripts/so-docker-download
@@ -1,6 +1,6 @@
#!/bin/bash
-MASTER={{ MASTER }}
+MANAGER={{ MANAGER }}
VERSION="HH1.2.2"
TRUSTED_CONTAINERS=( \
"so-nginx:$VERSION" \
@@ -41,6 +41,6 @@ do
# Pull down the trusted docker image
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination
- docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
- docker push $MASTER:5000/soshybridhunter/$i
+ docker tag soshybridhunter/$i $MANAGER:5000/soshybridhunter/$i
+ docker push $MANAGER:5000/soshybridhunter/$i
done
diff --git a/salt/master/init.sls b/salt/manager/init.sls
similarity index 87%
rename from salt/master/init.sls
rename to salt/manager/init.sls
index 3c6b81e5e..e1d8cdb12 100644
--- a/salt/master/init.sls
+++ b/salt/manager/init.sls
@@ -13,8 +13,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{% set masterproxy = salt['pillar.get']('static:masterupdate', '0') %}
+{% set MANAGER = salt['grains.get']('master') %}
+{% set managerproxy = salt['pillar.get']('static:managerupdate', '0') %}
socore_own_saltstack:
file.directory:
@@ -25,7 +25,7 @@ socore_own_saltstack:
- user
- group
-{% if masterproxy == 1 %}
+{% if managerproxy == 1 %}
# Create the directories for apt-cacher-ng
aptcacherconfdir:
@@ -54,12 +54,12 @@ aptcacherlogdir:
acngcopyconf:
file.managed:
- name: /opt/so/conf/aptcacher-ng/etc/acng.conf
- - source: salt://master/files/acng/acng.conf
+ - source: salt://manager/files/acng/acng.conf
# Install the apt-cacher-ng container
so-aptcacherng:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-acng:{{ VERSION }}
- hostname: so-acng
- restart_policy: always
- port_bindings:
diff --git a/salt/minio/init.sls b/salt/minio/init.sls
index 7b4dd5673..2d5941301 100644
--- a/salt/minio/init.sls
+++ b/salt/minio/init.sls
@@ -13,8 +13,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-{% set access_key = salt['pillar.get']('master:access_key', '') %}
-{% set access_secret = salt['pillar.get']('master:access_secret', '') %}
+{% set access_key = salt['pillar.get']('manager:access_key', '') %}
+{% set access_secret = salt['pillar.get']('manager:access_secret', '') %}
# Minio Setup
minioconfdir:
diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls
index 6bb99d98c..e8120724c 100644
--- a/salt/mysql/init.sls
+++ b/salt/mysql/init.sls
@@ -1,7 +1,7 @@
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %}
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set MAINIP = salt['pillar.get']('elasticsearch:mainip') %}
{% set FLEETARCH = salt['grains.get']('role') %}
@@ -9,7 +9,7 @@
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% else %}
- {% set MAINIP = salt['pillar.get']('static:masterip') %}
+ {% set MAINIP = salt['pillar.get']('static:managerip') %}
{% endif %}
# MySQL Setup
@@ -71,7 +71,7 @@ mysql_password_none:
so-mysql:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-mysql:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-mysql:{{ VERSION }}
- hostname: so-mysql
- user: socore
- port_bindings:
diff --git a/salt/nginx/etc/nginx.conf.so-eval b/salt/nginx/etc/nginx.conf.so-eval
index 7e3a9a401..2998a5bf2 100644
--- a/salt/nginx/etc/nginx.conf.so-eval
+++ b/salt/nginx/etc/nginx.conf.so-eval
@@ -1,5 +1,5 @@
-{%- set masterip = salt['pillar.get']('master:mainip', '') %}
-{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
+{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
+{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri;
}
-{% if FLEET_MASTER %}
+{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
- grpc_pass grpcs://{{ masterip }}:8080;
+ grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
@@ -110,7 +110,7 @@ http {
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
- proxy_pass http://{{ masterip }}:9822;
+ proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -124,7 +124,7 @@ http {
location / {
auth_request /auth/sessions/whoami;
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -138,7 +138,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:4433;
+ proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -185,7 +185,7 @@ http {
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:3000/;
+ proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -198,7 +198,7 @@ http {
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:5601/;
+ proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -209,7 +209,7 @@ http {
}
location /nodered/ {
- proxy_pass http://{{ masterip }}:1880/;
+ proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -222,7 +222,7 @@ http {
}
location /playbook/ {
- proxy_pass http://{{ masterip }}:3200/playbook/;
+ proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -238,7 +238,7 @@ http {
}
{%- else %}
location /fleet/ {
- proxy_pass https://{{ masterip }}:8080;
+ proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -250,7 +250,7 @@ http {
{%- endif %}
location /thehive/ {
- proxy_pass http://{{ masterip }}:9000/thehive/;
+ proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -262,7 +262,7 @@ http {
}
location /cortex/ {
- proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -274,7 +274,7 @@ http {
}
location /soctopus/ {
- proxy_pass http://{{ masterip }}:7000/;
+ proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -297,7 +297,7 @@ http {
}
location /sensoroniagents/ {
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
diff --git a/salt/nginx/etc/nginx.conf.so-master b/salt/nginx/etc/nginx.conf.so-manager
similarity index 91%
rename from salt/nginx/etc/nginx.conf.so-master
rename to salt/nginx/etc/nginx.conf.so-manager
index de3a3a6c1..bdb342cac 100644
--- a/salt/nginx/etc/nginx.conf.so-master
+++ b/salt/nginx/etc/nginx.conf.so-manager
@@ -1,5 +1,5 @@
-{%- set masterip = salt['pillar.get']('master:mainip', '') %}
-{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
+{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
+{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri;
}
-{% if FLEET_MASTER %}
+{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
- grpc_pass grpcs://{{ masterip }}:8080;
+ grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
@@ -110,7 +110,7 @@ http {
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
- proxy_pass http://{{ masterip }}:9822;
+ proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -124,7 +124,7 @@ http {
location / {
auth_request /auth/sessions/whoami;
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -138,7 +138,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:4433;
+ proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -185,7 +185,7 @@ http {
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:3000/;
+ proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -198,7 +198,7 @@ http {
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:5601/;
+ proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -209,7 +209,7 @@ http {
}
location /nodered/ {
- proxy_pass http://{{ masterip }}:1880/;
+ proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -222,7 +222,7 @@ http {
}
location /playbook/ {
- proxy_pass http://{{ masterip }}:3200/playbook/;
+ proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -238,7 +238,7 @@ http {
}
{%- else %}
location /fleet/ {
- proxy_pass https://{{ masterip }}:8080;
+ proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -250,7 +250,7 @@ http {
{%- endif %}
location /thehive/ {
- proxy_pass http://{{ masterip }}:9000/thehive/;
+ proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -262,7 +262,7 @@ http {
}
location /cortex/ {
- proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -274,7 +274,7 @@ http {
}
location /soctopus/ {
- proxy_pass http://{{ masterip }}:7000/;
+ proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -297,7 +297,7 @@ http {
}
location /sensoroniagents/ {
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
diff --git a/salt/nginx/etc/nginx.conf.so-mastersearch b/salt/nginx/etc/nginx.conf.so-managersearch
similarity index 91%
rename from salt/nginx/etc/nginx.conf.so-mastersearch
rename to salt/nginx/etc/nginx.conf.so-managersearch
index 952f18cd9..cb7576923 100644
--- a/salt/nginx/etc/nginx.conf.so-mastersearch
+++ b/salt/nginx/etc/nginx.conf.so-managersearch
@@ -1,5 +1,5 @@
-{%- set masterip = salt['pillar.get']('master:mainip', '') %}
-{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
+{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
+{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri;
}
-{% if FLEET_MASTER %}
+{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
- grpc_pass grpcs://{{ masterip }}:8080;
+ grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
@@ -109,7 +109,7 @@ http {
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
- proxy_pass http://{{ masterip }}:9822;
+ proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -123,7 +123,7 @@ http {
location / {
auth_request /auth/sessions/whoami;
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -137,7 +137,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:4433;
+ proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -184,7 +184,7 @@ http {
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:3000/;
+ proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -197,7 +197,7 @@ http {
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:5601/;
+ proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -208,7 +208,7 @@ http {
}
location /nodered/ {
- proxy_pass http://{{ masterip }}:1880/;
+ proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -221,7 +221,7 @@ http {
}
location /playbook/ {
- proxy_pass http://{{ masterip }}:3200/playbook/;
+ proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -237,7 +237,7 @@ http {
}
{%- else %}
location /fleet/ {
- proxy_pass https://{{ masterip }}:8080;
+ proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -249,7 +249,7 @@ http {
{%- endif %}
location /thehive/ {
- proxy_pass http://{{ masterip }}:9000/thehive/;
+ proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -261,7 +261,7 @@ http {
}
location /cortex/ {
- proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -273,7 +273,7 @@ http {
}
location /soctopus/ {
- proxy_pass http://{{ masterip }}:7000/;
+ proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -296,7 +296,7 @@ http {
}
location /sensoroniagents/ {
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
diff --git a/salt/nginx/etc/nginx.conf.so-standalone b/salt/nginx/etc/nginx.conf.so-standalone
index de3a3a6c1..bdb342cac 100644
--- a/salt/nginx/etc/nginx.conf.so-standalone
+++ b/salt/nginx/etc/nginx.conf.so-standalone
@@ -1,5 +1,5 @@
-{%- set masterip = salt['pillar.get']('master:mainip', '') %}
-{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
+{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
+{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
# For more information on configuration, see:
@@ -66,7 +66,7 @@ http {
return 301 https://$host$request_uri;
}
-{% if FLEET_MASTER %}
+{% if FLEET_MANAGER %}
server {
listen 8090 ssl http2 default_server;
server_name _;
@@ -81,7 +81,7 @@ http {
ssl_prefer_server_ciphers on;
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
- grpc_pass grpcs://{{ masterip }}:8080;
+ grpc_pass grpcs://{{ managerip }}:8080;
grpc_set_header Host $host;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_buffering off;
@@ -110,7 +110,7 @@ http {
#include /etc/nginx/default.d/*.conf;
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
- proxy_pass http://{{ masterip }}:9822;
+ proxy_pass http://{{ managerip }}:9822;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -124,7 +124,7 @@ http {
location / {
auth_request /auth/sessions/whoami;
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -138,7 +138,7 @@ http {
location ~ ^/auth/.*?(whoami|login|logout|settings) {
rewrite /auth/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:4433;
+ proxy_pass http://{{ managerip }}:4433;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -185,7 +185,7 @@ http {
location /grafana/ {
auth_request /auth/sessions/whoami;
rewrite /grafana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:3000/;
+ proxy_pass http://{{ managerip }}:3000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -198,7 +198,7 @@ http {
location /kibana/ {
auth_request /auth/sessions/whoami;
rewrite /kibana/(.*) /$1 break;
- proxy_pass http://{{ masterip }}:5601/;
+ proxy_pass http://{{ managerip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -209,7 +209,7 @@ http {
}
location /nodered/ {
- proxy_pass http://{{ masterip }}:1880/;
+ proxy_pass http://{{ managerip }}:1880/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -222,7 +222,7 @@ http {
}
location /playbook/ {
- proxy_pass http://{{ masterip }}:3200/playbook/;
+ proxy_pass http://{{ managerip }}:3200/playbook/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -238,7 +238,7 @@ http {
}
{%- else %}
location /fleet/ {
- proxy_pass https://{{ masterip }}:8080;
+ proxy_pass https://{{ managerip }}:8080;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -250,7 +250,7 @@ http {
{%- endif %}
location /thehive/ {
- proxy_pass http://{{ masterip }}:9000/thehive/;
+ proxy_pass http://{{ managerip }}:9000/thehive/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -262,7 +262,7 @@ http {
}
location /cortex/ {
- proxy_pass http://{{ masterip }}:9001/cortex/;
+ proxy_pass http://{{ managerip }}:9001/cortex/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
@@ -274,7 +274,7 @@ http {
}
location /soctopus/ {
- proxy_pass http://{{ masterip }}:7000/;
+ proxy_pass http://{{ managerip }}:7000/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
@@ -297,7 +297,7 @@ http {
}
location /sensoroniagents/ {
- proxy_pass http://{{ masterip }}:9822/;
+ proxy_pass http://{{ managerip }}:9822/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_set_header Host $host;
diff --git a/salt/nginx/files/navigator_config.json b/salt/nginx/files/navigator_config.json
index 7e132cbf8..0c69995e3 100644
--- a/salt/nginx/files/navigator_config.json
+++ b/salt/nginx/files/navigator_config.json
@@ -1,4 +1,4 @@
-{%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- set ip = salt['pillar.get']('static:managerip', '') %}
{
"enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json",
diff --git a/salt/nginx/init.sls b/salt/nginx/init.sls
index 73f14a7ed..8bbdced0f 100644
--- a/salt/nginx/init.sls
+++ b/salt/nginx/init.sls
@@ -1,6 +1,6 @@
-{% set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) %}
+{% set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) %}
{% set FLEETNODE = salt['pillar.get']('static:fleet_node', False) %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
# Drop the correct nginx config based on role
@@ -61,15 +61,15 @@ navigatordefaultlayer:
so-nginx:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-nginx:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-nginx:{{ VERSION }}
- hostname: so-nginx
- binds:
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/so/log/nginx/:/var/log/nginx:rw
- /opt/so/tmp/nginx/:/var/lib/nginx:rw
- /opt/so/tmp/nginx/:/run:rw
- - /etc/pki/masterssl.crt:/etc/pki/nginx/server.crt:ro
- - /etc/pki/masterssl.key:/etc/pki/nginx/server.key:ro
+ - /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
+ - /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
- /opt/so/conf/fleet/packages:/opt/socore/html/packages
# ATT&CK Navigator binds
- /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro
@@ -78,7 +78,7 @@ so-nginx:
- port_bindings:
- 80:80
- 443:443
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- 8090:8090
{%- endif %}
- watch:
diff --git a/salt/nodered/files/nodered_load_flows b/salt/nodered/files/nodered_load_flows
index 5617b1022..985c1c49a 100644
--- a/salt/nodered/files/nodered_load_flows
+++ b/salt/nodered/files/nodered_load_flows
@@ -1,4 +1,4 @@
-{%- set ip = salt['pillar.get']('static:masterip', '') -%}
+{%- set ip = salt['pillar.get']('static:managerip', '') -%}
#!/bin/bash
default_salt_dir=/opt/so/saltstack/default
diff --git a/salt/nodered/files/so_flows.json b/salt/nodered/files/so_flows.json
index 8ab8cbf81..ad780ceb9 100644
--- a/salt/nodered/files/so_flows.json
+++ b/salt/nodered/files/so_flows.json
@@ -1,4 +1,4 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') -%}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') -%}
-[{"id":"dca608c3.7d8af8","type":"tab","label":"TheHive - Webhook Events","disabled":false,"info":""},{"id":"4db74fa6.2556d","type":"tls-config","z":"","name":"","cert":"","key":"","ca":"","certname":"","keyname":"","caname":"","servername":"","verifyservercert":false},{"id":"aa6cf50d.a02fc8","type":"http in","z":"dca608c3.7d8af8","name":"TheHive Listener","url":"/thehive","method":"post","upload":false,"swaggerDoc":"","x":120,"y":780,"wires":[["2b92aebb.853dc2","2fce29bb.1b1376","82ad0f08.7a53f"]]},{"id":"2b92aebb.853dc2","type":"debug","z":"dca608c3.7d8af8","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","targetType":"msg","x":470,"y":940,"wires":[]},{"id":"a4ecb84a.805958","type":"switch","z":"dca608c3.7d8af8","name":"Operation","property":"payload.operation","propertyType":"msg","rules":[{"t":"eq","v":"Creation","vt":"str"},{"t":"eq","v":"Update","vt":"str"},{"t":"eq","v":"Delete","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":580,"y":780,"wires":[["f1e954fd.3c21d8"],["65928861.c90a48"],["a259a26c.a21"]],"outputLabels":["Creation","Update","Delete"]},{"id":"f1e954fd.3c21d8","type":"switch","z":"dca608c3.7d8af8","name":"Creation","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":480,"wires":[["e88b4cc2.f6afe"],["8c54e39.a1b4f2"],["64203fe8.e0ad5"],["3511de51.889a02"],["14544a8b.b6b2f5"],["44c595a4.45d45c"],["3eb4bedf.6e20a2"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact","case_task","case_task_log","action","alert","user"],"info":"No webhook data is received for the following events:\n\n- Creation of Dashboard\n- Creation of Case Templates\n"},{"id":"65928861.c90a48","type":"switch","z":"dca608c3.7d8af8","name":"Update","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":860,"wires":[["eebe1748.1cd348"],["d703adc0.12fd1"],["2b738415.408d4c"],["6d97371a.406348"],["4ae621e1.9ae6"],["5786cee2.98109"],["54077728.447648"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact",null,"case_task","case_task_log","alert","user"]},{"id":"a259a26c.a21","type":"switch","z":"dca608c3.7d8af8","name":"Delete","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":890,"y":1200,"wires":[["60c8bcfb.eff1f4"],["df708bab.348308"],["e9a8650c.e20cc8"]],"outputLabels":["case","case_artifact",""],"info":"Deleting a case task doesnt actually trigger a delete event. It triggers an `update` event where the status = cancelled"},{"id":"54077728.447648","type":"switch","z":"dca608c3.7d8af8","name":"User","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Locked","vt":"str"},{"t":"eq","v":"Ok","vt":"str"}],"checkall":"false","repair":false,"outputs":2,"x":1130,"y":980,"wires":[["9429d6c5.5ac788"],["4e3e091c.d35388"]]},{"id":"9429d6c5.5ac788","type":"function","z":"dca608c3.7d8af8","name":"status: Locked","func":"msg.topic = \"[The Hive] A user account was locked\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1380,"y":972,"wires":[[]],"info":"- User account was locked"},{"id":"4e3e091c.d35388","type":"function","z":"dca608c3.7d8af8","name":"status: Ok","func":"msg.topic = \"[The Hive] A user account was changed\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1360,"y":1020,"wires":[[]],"info":"- User account was unlocked\n- User description was changed\n- User role was changed\n- User API key was added\n- User API key was revoked\n"},{"id":"485f3be.1ffcfc4","type":"function","z":"dca608c3.7d8af8","name":"status: Open","func":"// Fires when a Case is updated AND status = open\n// This can include things like TLP/PAP changes\n\nreturn msg;","outputs":1,"noerr":0,"x":1370,"y":660,"wires":[[]]},{"id":"eebe1748.1cd348","type":"switch","z":"dca608c3.7d8af8","name":"case","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Open","vt":"str"}],"checkall":"true","repair":false,"outputs":1,"x":1130,"y":740,"wires":[["485f3be.1ffcfc4","e4b7b4bf.2fb828"]],"info":"- A case was modified"},{"id":"8c54e39.a1b4f2","type":"switch","z":"dca608c3.7d8af8","name":"case_artifact: Run Analyzer","property":"payload.object.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1600,"y":340,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["eb8cfeb7.a7118","a5dd8a8a.065b88"]],"info":"# References\n\n\n"},{"id":"2fce29bb.1b1376","type":"function","z":"dca608c3.7d8af8","name":"Add headers","func":"msg.thehive_url = 'https://{{ MASTERIP }}/thehive';\nmsg.cortex_url = 'https://{{ MASTERIP }}/cortex';\nmsg.cortex_id = 'CORTEX-SERVER-ID';\nreturn msg;","outputs":1,"noerr":0,"x":350,"y":780,"wires":[["a4ecb84a.805958"]]},{"id":"e4b7b4bf.2fb828","type":"function","z":"dca608c3.7d8af8","name":"status: Resolved","func":"// Fires when a case is closed (resolved)\n\nreturn msg;","outputs":1,"noerr":0,"x":1390,"y":720,"wires":[[]]},{"id":"e88b4cc2.f6afe","type":"function","z":"dca608c3.7d8af8","name":"case","func":"// Fires when a case is created\n// or when a responder is generated against a case\n\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":320,"wires":[[]]},{"id":"64203fe8.e0ad5","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is created\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":400,"wires":[[]]},{"id":"3511de51.889a02","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"// Fires when a case task log is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1163,"y":440,"wires":[[]]},{"id":"14544a8b.b6b2f5","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"// Fires when a Responder or Analyzser is Run on an existing observable\n\nreturn msg;","outputs":1,"noerr":0,"x":1173,"y":480,"wires":[[]]},{"id":"2b738415.408d4c","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"\nreturn msg;","outputs":1,"noerr":0,"x":1170,"y":820,"wires":[[]]},{"id":"3eb4bedf.6e20a2","type":"function","z":"dca608c3.7d8af8","name":"user","func":"// Fires when a user is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1133,"y":560,"wires":[[]]},{"id":"d703adc0.12fd1","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"// Fires when an artifact is updated\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":780,"wires":[[]]},{"id":"6d97371a.406348","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is updated\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":860,"wires":[[]]},{"id":"4ae621e1.9ae6","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is updated\n\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":900,"wires":[[]]},{"id":"60c8bcfb.eff1f4","type":"function","z":"dca608c3.7d8af8","name":"case","func":"//Fires when a case is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":1160,"wires":[[]]},{"id":"df708bab.348308","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"//Fires when a case_artifact is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":1200,"wires":[[]]},{"id":"e9a8650c.e20cc8","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":1240,"wires":[[]]},{"id":"5786cee2.98109","type":"function","z":"dca608c3.7d8af8","name":"alert","func":"//Fires when an alert is updated\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":940,"wires":[[]]},{"id":"44c595a4.45d45c","type":"change","z":"dca608c3.7d8af8","d":true,"name":"Convert Alert Msg to Artifacts","rules":[{"t":"move","p":"payload.object.artifacts","pt":"msg","to":"payload","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":1200,"y":520,"wires":[["6dcca25e.04bd2c"]]},{"id":"6dcca25e.04bd2c","type":"split","z":"dca608c3.7d8af8","name":"Split Artifacts","splt":"\\n","spltType":"str","arraySplt":1,"arraySpltType":"len","stream":false,"addname":"","x":1430,"y":520,"wires":[["767c84f2.c9ba2c"]]},{"id":"767c84f2.c9ba2c","type":"switch","z":"dca608c3.7d8af8","name":"alert: Run Analyzer","property":"payload.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1630,"y":400,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["a5dd8a8a.065b88","eb8cfeb7.a7118"]],"info":"# References\n\n\n"},{"id":"82ad0f08.7a53f","type":"http response","z":"dca608c3.7d8af8","name":"Ack Event Receipt","statusCode":"200","headers":{},"x":250,"y":940,"wires":[]},{"id":"a5dd8a8a.065b88","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: CERT DNS","func":"msg.analyzer_id = \"4f28afc20d78f98df425e36e561af33f\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1930,"y":420,"wires":[["f050a09f.b2201"]]},{"id":"eb8cfeb7.a7118","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: Urlscan","func":"msg.analyzer_id = \"54e51b62c6c8ddc3cbc3cbdd889a0557\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1920,"y":320,"wires":[["f050a09f.b2201"]]},{"id":"1c448528.3032fb","type":"http request","z":"dca608c3.7d8af8","name":"Submit to Cortex","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ CORTEXKEY }}"},"x":2450,"y":420,"wires":[["ea6614fb.752a78"]]},{"id":"ea6614fb.752a78","type":"debug","z":"dca608c3.7d8af8","name":"Debug","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"true","targetType":"full","x":2670,"y":360,"wires":[]},{"id":"f050a09f.b2201","type":"switch","z":"dca608c3.7d8af8","name":"Cases vs Alerts","property":"tag","propertyType":"msg","rules":[{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"observable","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":2200,"y":360,"wires":[["f7fca977.a73b28"],["1c448528.3032fb"]],"inputLabels":["Data"],"outputLabels":["Cases","Alerts"]},{"id":"f7fca977.a73b28","type":"http request","z":"dca608c3.7d8af8","name":"Submit to TheHive","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ HIVEKEY }}"},"x":2450,"y":280,"wires":[["ea6614fb.752a78"]]}]
+[{"id":"dca608c3.7d8af8","type":"tab","label":"TheHive - Webhook Events","disabled":false,"info":""},{"id":"4db74fa6.2556d","type":"tls-config","z":"","name":"","cert":"","key":"","ca":"","certname":"","keyname":"","caname":"","servername":"","verifyservercert":false},{"id":"aa6cf50d.a02fc8","type":"http in","z":"dca608c3.7d8af8","name":"TheHive Listener","url":"/thehive","method":"post","upload":false,"swaggerDoc":"","x":120,"y":780,"wires":[["2b92aebb.853dc2","2fce29bb.1b1376","82ad0f08.7a53f"]]},{"id":"2b92aebb.853dc2","type":"debug","z":"dca608c3.7d8af8","name":"","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"payload","targetType":"msg","x":470,"y":940,"wires":[]},{"id":"a4ecb84a.805958","type":"switch","z":"dca608c3.7d8af8","name":"Operation","property":"payload.operation","propertyType":"msg","rules":[{"t":"eq","v":"Creation","vt":"str"},{"t":"eq","v":"Update","vt":"str"},{"t":"eq","v":"Delete","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":580,"y":780,"wires":[["f1e954fd.3c21d8"],["65928861.c90a48"],["a259a26c.a21"]],"outputLabels":["Creation","Update","Delete"]},{"id":"f1e954fd.3c21d8","type":"switch","z":"dca608c3.7d8af8","name":"Creation","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":480,"wires":[["e88b4cc2.f6afe"],["8c54e39.a1b4f2"],["64203fe8.e0ad5"],["3511de51.889a02"],["14544a8b.b6b2f5"],["44c595a4.45d45c"],["3eb4bedf.6e20a2"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact","case_task","case_task_log","action","alert","user"],"info":"No webhook data is received for the following events:\n\n- Creation of Dashboard\n- Creation of Case Templates\n"},{"id":"65928861.c90a48","type":"switch","z":"dca608c3.7d8af8","name":"Update","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_artifact_job","vt":"str"},{"t":"eq","v":"case_task","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"},{"t":"eq","v":"alert","vt":"str"},{"t":"eq","v":"user","vt":"str"}],"checkall":"false","repair":false,"outputs":7,"x":900,"y":860,"wires":[["eebe1748.1cd348"],["d703adc0.12fd1"],["2b738415.408d4c"],["6d97371a.406348"],["4ae621e1.9ae6"],["5786cee2.98109"],["54077728.447648"]],"inputLabels":["Operation"],"outputLabels":["case","case_artifact",null,"case_task","case_task_log","alert","user"]},{"id":"a259a26c.a21","type":"switch","z":"dca608c3.7d8af8","name":"Delete","property":"payload.objectType","propertyType":"msg","rules":[{"t":"eq","v":"case","vt":"str"},{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"case_task_log","vt":"str"}],"checkall":"false","repair":false,"outputs":3,"x":890,"y":1200,"wires":[["60c8bcfb.eff1f4"],["df708bab.348308"],["e9a8650c.e20cc8"]],"outputLabels":["case","case_artifact",""],"info":"Deleting a case task doesnt actually trigger a delete event. It triggers an `update` event where the status = cancelled"},{"id":"54077728.447648","type":"switch","z":"dca608c3.7d8af8","name":"User","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Locked","vt":"str"},{"t":"eq","v":"Ok","vt":"str"}],"checkall":"false","repair":false,"outputs":2,"x":1130,"y":980,"wires":[["9429d6c5.5ac788"],["4e3e091c.d35388"]]},{"id":"9429d6c5.5ac788","type":"function","z":"dca608c3.7d8af8","name":"status: Locked","func":"msg.topic = \"[The Hive] A user account was locked\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1380,"y":972,"wires":[[]],"info":"- User account was locked"},{"id":"4e3e091c.d35388","type":"function","z":"dca608c3.7d8af8","name":"status: Ok","func":"msg.topic = \"[The Hive] A user account was changed\";\nmsg.from = \"from@example.com\";\nmsg.to = \"to@example.com\";\nreturn msg;","outputs":1,"noerr":0,"x":1360,"y":1020,"wires":[[]],"info":"- User account was unlocked\n- User description was changed\n- User role was changed\n- User API key was added\n- User API key was revoked\n"},{"id":"485f3be.1ffcfc4","type":"function","z":"dca608c3.7d8af8","name":"status: Open","func":"// Fires when a Case is updated AND status = open\n// This can include things like TLP/PAP changes\n\nreturn msg;","outputs":1,"noerr":0,"x":1370,"y":660,"wires":[[]]},{"id":"eebe1748.1cd348","type":"switch","z":"dca608c3.7d8af8","name":"case","property":"payload.object.status","propertyType":"msg","rules":[{"t":"eq","v":"Open","vt":"str"}],"checkall":"true","repair":false,"outputs":1,"x":1130,"y":740,"wires":[["485f3be.1ffcfc4","e4b7b4bf.2fb828"]],"info":"- A case was modified"},{"id":"8c54e39.a1b4f2","type":"switch","z":"dca608c3.7d8af8","name":"case_artifact: Run Analyzer","property":"payload.object.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1600,"y":340,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["eb8cfeb7.a7118","a5dd8a8a.065b88"]],"info":"# References\n\n\n"},{"id":"2fce29bb.1b1376","type":"function","z":"dca608c3.7d8af8","name":"Add headers","func":"msg.thehive_url = 'https://{{ MANAGERIP }}/thehive';\nmsg.cortex_url = 'https://{{ MANAGERIP }}/cortex';\nmsg.cortex_id = 'CORTEX-SERVER-ID';\nreturn msg;","outputs":1,"noerr":0,"x":350,"y":780,"wires":[["a4ecb84a.805958"]]},{"id":"e4b7b4bf.2fb828","type":"function","z":"dca608c3.7d8af8","name":"status: Resolved","func":"// Fires when a case is closed (resolved)\n\nreturn msg;","outputs":1,"noerr":0,"x":1390,"y":720,"wires":[[]]},{"id":"e88b4cc2.f6afe","type":"function","z":"dca608c3.7d8af8","name":"case","func":"// Fires when a case is created\n// or when a responder is generated against a case\n\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":320,"wires":[[]]},{"id":"64203fe8.e0ad5","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is created\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":400,"wires":[[]]},{"id":"3511de51.889a02","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"// Fires when a case task log is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1163,"y":440,"wires":[[]]},{"id":"14544a8b.b6b2f5","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"// Fires when a Responder or Analyzser is Run on an existing observable\n\nreturn msg;","outputs":1,"noerr":0,"x":1173,"y":480,"wires":[[]]},{"id":"2b738415.408d4c","type":"function","z":"dca608c3.7d8af8","name":"case_artifact_job","func":"\nreturn msg;","outputs":1,"noerr":0,"x":1170,"y":820,"wires":[[]]},{"id":"3eb4bedf.6e20a2","type":"function","z":"dca608c3.7d8af8","name":"user","func":"// Fires when a user is created\n\nreturn msg;","outputs":1,"noerr":0,"x":1133,"y":560,"wires":[[]]},{"id":"d703adc0.12fd1","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"// Fires when an artifact is updated\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":780,"wires":[[]]},{"id":"6d97371a.406348","type":"function","z":"dca608c3.7d8af8","name":"case_task","func":"// Fires when a case task is updated\nreturn msg;","outputs":1,"noerr":0,"x":1140,"y":860,"wires":[[]]},{"id":"4ae621e1.9ae6","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is updated\n\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":900,"wires":[[]]},{"id":"60c8bcfb.eff1f4","type":"function","z":"dca608c3.7d8af8","name":"case","func":"//Fires when a case is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":1160,"wires":[[]]},{"id":"df708bab.348308","type":"function","z":"dca608c3.7d8af8","name":"case_artifact","func":"//Fires when a case_artifact is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1150,"y":1200,"wires":[[]]},{"id":"e9a8650c.e20cc8","type":"function","z":"dca608c3.7d8af8","name":"case_task_log","func":"//Fires when a case_task_log is deleted\nreturn msg;","outputs":1,"noerr":0,"x":1160,"y":1240,"wires":[[]]},{"id":"5786cee2.98109","type":"function","z":"dca608c3.7d8af8","name":"alert","func":"//Fires when an alert is updated\nreturn msg;","outputs":1,"noerr":0,"x":1130,"y":940,"wires":[[]]},{"id":"44c595a4.45d45c","type":"change","z":"dca608c3.7d8af8","d":true,"name":"Convert Alert Msg to Artifacts","rules":[{"t":"move","p":"payload.object.artifacts","pt":"msg","to":"payload","tot":"msg"}],"action":"","property":"","from":"","to":"","reg":false,"x":1200,"y":520,"wires":[["6dcca25e.04bd2c"]]},{"id":"6dcca25e.04bd2c","type":"split","z":"dca608c3.7d8af8","name":"Split Artifacts","splt":"\\n","spltType":"str","arraySplt":1,"arraySpltType":"len","stream":false,"addname":"","x":1430,"y":520,"wires":[["767c84f2.c9ba2c"]]},{"id":"767c84f2.c9ba2c","type":"switch","z":"dca608c3.7d8af8","name":"alert: Run Analyzer","property":"payload.dataType","propertyType":"msg","rules":[{"t":"eq","v":"ip","vt":"str"},{"t":"eq","v":"domain","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":1630,"y":400,"wires":[["eb8cfeb7.a7118","a5dd8a8a.065b88"],["a5dd8a8a.065b88","eb8cfeb7.a7118"]],"info":"# References\n\n\n"},{"id":"82ad0f08.7a53f","type":"http response","z":"dca608c3.7d8af8","name":"Ack Event Receipt","statusCode":"200","headers":{},"x":250,"y":940,"wires":[]},{"id":"a5dd8a8a.065b88","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: CERT DNS","func":"msg.analyzer_id = \"4f28afc20d78f98df425e36e561af33f\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1930,"y":420,"wires":[["f050a09f.b2201"]]},{"id":"eb8cfeb7.a7118","type":"function","z":"dca608c3.7d8af8","name":"Run Analyzer: Urlscan","func":"msg.analyzer_id = \"54e51b62c6c8ddc3cbc3cbdd889a0557\";\n\nif (msg.payload.objectId) {\n msg.tag = \"case_artifact\"\n msg.artifact_id = msg.payload.objectId\n msg.url = msg.thehive_url + '/api/connector/cortex/job';\n msg.payload = {\n 'cortexId' : msg.cortex_id,\n 'artifactId': msg.artifact_id,\n 'analyzerId': msg.analyzer_id\n };\n}\nelse {\n msg.tag = \"observable\"\n msg.observable = msg.payload.data\n msg.dataType = msg.payload.dataType\n\n msg.url = msg.cortex_url + '/api/analyzer/' + msg.analyzer_id + '/run';\n msg.payload = {\n 'data' : msg.observable,\n 'dataType': msg.dataType \n };\n}\nreturn msg;","outputs":1,"noerr":0,"x":1920,"y":320,"wires":[["f050a09f.b2201"]]},{"id":"1c448528.3032fb","type":"http request","z":"dca608c3.7d8af8","name":"Submit to Cortex","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ CORTEXKEY }}"},"x":2450,"y":420,"wires":[["ea6614fb.752a78"]]},{"id":"ea6614fb.752a78","type":"debug","z":"dca608c3.7d8af8","name":"Debug","active":true,"tosidebar":true,"console":false,"tostatus":false,"complete":"true","targetType":"full","x":2670,"y":360,"wires":[]},{"id":"f050a09f.b2201","type":"switch","z":"dca608c3.7d8af8","name":"Cases vs Alerts","property":"tag","propertyType":"msg","rules":[{"t":"eq","v":"case_artifact","vt":"str"},{"t":"eq","v":"observable","vt":"str"}],"checkall":"true","repair":false,"outputs":2,"x":2200,"y":360,"wires":[["f7fca977.a73b28"],["1c448528.3032fb"]],"inputLabels":["Data"],"outputLabels":["Cases","Alerts"]},{"id":"f7fca977.a73b28","type":"http request","z":"dca608c3.7d8af8","name":"Submit to TheHive","method":"POST","ret":"obj","paytoqs":false,"url":"","tls":"4db74fa6.2556d","persist":false,"proxy":"","authType":"bearer","credentials": {"user": "", "password": "{{ HIVEKEY }}"},"x":2450,"y":280,"wires":[["ea6614fb.752a78"]]}]
diff --git a/salt/pcap/files/sensoroni.json b/salt/pcap/files/sensoroni.json
index ed673d969..81fc4569b 100644
--- a/salt/pcap/files/sensoroni.json
+++ b/salt/pcap/files/sensoroni.json
@@ -1,13 +1,14 @@
-{%- set MASTER = grains['master'] -%}
+{%- set MANAGER = salt['grains.get']('master') -%}
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
{
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
"logLevel":"debug",
"agent": {
"pollIntervalMs": 10000,
- "serverUrl": "https://{{ MASTER }}/sensoroniagents",
+ "serverUrl": "https://{{ MANAGER }}/sensoroniagents",
"verifyCert": false,
"modules": {
+ "importer": {},
"statickeyauth": {
"apiKey": "{{ SENSORONIKEY }}"
},
diff --git a/salt/pcap/init.sls b/salt/pcap/init.sls
index bcf09b765..dc3db3c21 100644
--- a/salt/pcap/init.sls
+++ b/salt/pcap/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %}
{% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %}
{% set BPF_COMPILED = "" %}
@@ -129,7 +129,7 @@ sensoronilog:
so-steno:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-steno:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-steno:{{ VERSION }}
- network_mode: host
- privileged: True
- port_bindings:
@@ -146,7 +146,7 @@ so-steno:
so-sensoroni:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
- network_mode: host
- binds:
- /opt/so/conf/steno/certs:/etc/stenographer/certs:rw
diff --git a/salt/playbook/init.sls b/salt/playbook/init.sls
index eca8bda40..6de1c121d 100644
--- a/salt/playbook/init.sls
+++ b/salt/playbook/init.sls
@@ -1,7 +1,7 @@
-{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
+{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('master:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %}
+{% set MANAGER = salt['grains.get']('master') %}
+{% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
{%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook', None) -%}
@@ -40,7 +40,7 @@ query_playbookdbuser_grants:
query_updatwebhooks:
mysql_query.run:
- database: playbook
- - query: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
+ - query: "update webhooks set url = 'http://{{MANAGERIP}}:7000/playbook/webhook' where project_id = 1"
- connection_host: {{ MAINIP }}
- connection_port: 3306
- connection_user: root
@@ -53,8 +53,8 @@ query_updatepluginurls:
update settings set value =
"--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess
project: '1'
- convert_url: http://{{MASTERIP}}:7000/playbook/sigmac
- create_url: http://{{MASTERIP}}:7000/playbook/play"
+ convert_url: http://{{MANAGERIP}}:7000/playbook/sigmac
+ create_url: http://{{MANAGERIP}}:7000/playbook/play"
where id = 43
- connection_host: {{ MAINIP }}
- connection_port: 3306
@@ -73,11 +73,11 @@ playbook_password_none:
so-playbook:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-playbook:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-playbook:{{ VERSION }}
- hostname: playbook
- name: so-playbook
- environment:
- - REDMINE_DB_MYSQL={{ MASTERIP }}
+ - REDMINE_DB_MYSQL={{ MANAGERIP }}
- REDMINE_DB_DATABASE=playbook
- REDMINE_DB_USERNAME=playbookdbuser
- REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }}
diff --git a/salt/reactor/fleet.sls b/salt/reactor/fleet.sls
index 4b09abe0a..c29ab85ed 100644
--- a/salt/reactor/fleet.sls
+++ b/salt/reactor/fleet.sls
@@ -13,7 +13,7 @@ def run():
STATICFILE = f"{LOCAL_SALT_DIR}/pillar/static.sls"
SECRETSFILE = f"{LOCAL_SALT_DIR}/pillar/secrets.sls"
- if MINIONID.split('_')[-1] in ['master','eval','fleet','mastersearch','standalone']:
+ if MINIONID.split('_')[-1] in ['manager','eval','fleet','managersearch','standalone']:
if ACTION == 'enablefleet':
logging.info('so/fleet enablefleet reactor')
@@ -27,7 +27,7 @@ def run():
if ROLE == 'so-fleet':
line = re.sub(r'fleet_node: \S*', f"fleet_node: True", line.rstrip())
else:
- line = re.sub(r'fleet_master: \S*', f"fleet_master: True", line.rstrip())
+ line = re.sub(r'fleet_manager: \S*', f"fleet_manager: True", line.rstrip())
print(line)
# Update the enroll secret in the secrets pillar
@@ -50,7 +50,7 @@ def run():
PACKAGEVERSION = data['data']['current-package-version']
PACKAGEHOSTNAME = data['data']['package-hostname']
- MASTER = data['data']['master']
+ MANAGER = data['data']['manager']
VERSION = data['data']['version']
ESECRET = data['data']['enroll-secret']
@@ -59,7 +59,7 @@ def run():
# Run Docker container that will build the packages
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", f"type=bind,source={LOCAL_SALT_DIR}/salt/fleet/packages,target=/output", \
- "--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MASTER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
+ "--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", f"{ MANAGER }:5000/soshybridhunter/so-fleet-launcher:{ VERSION }", \
f"{ESECRET}", f"{PACKAGEHOSTNAME}:8090", f"{PACKAGEVERSION}.1.1"], stdout=subprocess.PIPE, encoding='ascii')
# Update the 'packages-built' timestamp on the webpage (stored in the static pillar)
diff --git a/salt/redis/init.sls b/salt/redis/init.sls
index 5db53957c..4b61c35ef 100644
--- a/salt/redis/init.sls
+++ b/salt/redis/init.sls
@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
# Redis Setup
redisconfdir:
@@ -47,7 +47,7 @@ redisconfsync:
so-redis:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
- hostname: so-redis
- user: socore
- port_bindings:
diff --git a/salt/soc/files/kratos/kratos.yaml b/salt/soc/files/kratos/kratos.yaml
index 7939ec35b..2171971bc 100644
--- a/salt/soc/files/kratos/kratos.yaml
+++ b/salt/soc/files/kratos/kratos.yaml
@@ -1,4 +1,4 @@
-{%- set WEBACCESS = salt['pillar.get']('master:url_base', '') -%}
+{%- set WEBACCESS = salt['pillar.get']('manager:url_base', '') -%}
{%- set KRATOSKEY = salt['pillar.get']('kratos:kratoskey', '') -%}
selfservice:
diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json
index 693c44aeb..6b6a84d50 100644
--- a/salt/soc/files/soc/soc.json
+++ b/salt/soc/files/soc/soc.json
@@ -1,4 +1,4 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
{
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
@@ -12,10 +12,10 @@
"jobDir": "jobs"
},
"kratos": {
- "hostUrl": "http://{{ MASTERIP }}:4434/"
+ "hostUrl": "http://{{ MANAGERIP }}:4434/"
},
"elastic": {
- "hostUrl": "http://{{ MASTERIP }}:9200",
+ "hostUrl": "http://{{ MANAGERIP }}:9200",
"username": "",
"password": "",
"verifyCert": false
@@ -83,7 +83,7 @@
},
"queries": [
{ "name": "Default Query", "description": "Show all events grouped by the origin host", "query": "* | groupby observer.name"},
- { "name": "Log Type", "description": "Show all events grouped by type", "query": "* | groupby event.module"},
+ { "name": "Log Type", "description": "Show all events grouped by module and dataset", "query": "* | groupby event.module event.dataset"},
{ "name": "Elastalerts", "description": "", "query": "_type:elastalert | groupby rule.name"},
{ "name": "Alerts", "description": "Show all alerts grouped by alert source", "query": "event.dataset: alert | groupby event.module"},
{ "name": "NIDS Alerts", "description": "Show all NIDS alerts grouped by alert name", "query": "event.category: network AND event.dataset: alert | groupby rule.name"},
@@ -107,6 +107,7 @@
{ "name": "DNS", "description": "DNS queries grouped by response code", "query": "event.dataset:dns | groupby dns.response.code_name destination.port"},
{ "name": "DNS", "description": "DNS highest registered domain", "query": "event.dataset:dns | groupby dns.highest_registered_domain.keyword destination.port"},
{ "name": "DNS", "description": "DNS grouped by parent domain", "query": "event.dataset:dns | groupby dns.parent_domain.keyword destination.port"},
+ { "name": "DPD", "description": "Dynamic Protocol Detection errors", "query": "event.dataset:dpd | groupby error.reason"},
{ "name": "Files", "description": "Files grouped by mimetype", "query": "event.dataset:file | groupby file.mime_type source.ip"},
{ "name": "Files", "description": "Files grouped by source", "query": "event.dataset:file | groupby file.source source.ip"},
{ "name": "FTP", "description": "FTP grouped by argument", "query": "event.dataset:ftp | groupby ftp.argument"},
diff --git a/salt/soc/init.sls b/salt/soc/init.sls
index cc2c9dfd6..0490aa13d 100644
--- a/salt/soc/init.sls
+++ b/salt/soc/init.sls
@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
socdir:
file.directory:
@@ -33,7 +33,7 @@ socsync:
so-soc:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-soc:{{ VERSION }}
- hostname: soc
- name: so-soc
- binds:
@@ -84,7 +84,7 @@ kratossync:
so-kratos:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-kratos:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-kratos:{{ VERSION }}
- hostname: kratos
- name: so-kratos
- binds:
diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf
index e32aaf8da..477113376 100644
--- a/salt/soctopus/files/SOCtopus.conf
+++ b/salt/soctopus/files/SOCtopus.conf
@@ -1,10 +1,10 @@
-{%- set MASTER = salt['pillar.get']('master:url_base', '') %}
+{%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
[es]
-es_url = http://{{MASTER}}:9200
-es_ip = {{MASTER}}
+es_url = http://{{MANAGER}}:9200
+es_ip = {{MANAGER}}
es_user = YOURESUSER
es_pass = YOURESPASS
es_index_pattern = so-*
@@ -12,7 +12,7 @@ es_verifycert = no
[cortex]
auto_analyze_alerts = no
-cortex_url = https://{{MASTER}}/cortex/
+cortex_url = https://{{MANAGER}}/cortex/
cortex_key = {{ CORTEXKEY }}
supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
@@ -33,7 +33,7 @@ grr_user = YOURGRRUSER
grr_pass = YOURGRRPASS
[hive]
-hive_url = https://{{MASTER}}/thehive/
+hive_url = https://{{MANAGER}}/thehive/
hive_key = {{ HIVEKEY }}
hive_tlp = 3
hive_verifycert = no
@@ -60,8 +60,8 @@ slack_url = YOURSLACKWORKSPACE
slack_webhook = YOURSLACKWEBHOOK
[playbook]
-playbook_url = http://{{MASTER}}:3200/playbook
-playbook_ext_url = https://{{MASTER}}/playbook
+playbook_url = http://{{MANAGER}}:3200/playbook
+playbook_ext_url = https://{{MANAGER}}/playbook
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
playbook_verifycert = no
playbook_unit_test_index = playbook-testing
diff --git a/salt/soctopus/files/templates/es-generic.template b/salt/soctopus/files/templates/es-generic.template
new file mode 100644
index 000000000..b56050741
--- /dev/null
+++ b/salt/soctopus/files/templates/es-generic.template
@@ -0,0 +1,7 @@
+{% set ES = salt['pillar.get']('static:managerip', '') %}
+
+alert: modules.so.playbook-es.PlaybookESAlerter
+elasticsearch_host: "{{ ES }}:9200"
+play_title: ""
+play_url: "https://{{ ES }}/playbook/issues/6000"
+sigma_level: ""
diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template
index 0bdb6a2ba..7bb5a969d 100644
--- a/salt/soctopus/files/templates/generic.template
+++ b/salt/soctopus/files/templates/generic.template
@@ -1,6 +1,6 @@
-{%- set es = salt['pillar.get']('static:masterip', '') %}
-{%- set hivehost = salt['pillar.get']('static:masterip', '') %}
-{%- set hivekey = salt['pillar.get']('static:hivekey', '') %}
+{% set es = salt['pillar.get']('static:managerip', '') %}
+{% set hivehost = salt['pillar.get']('static:managerip', '') %}
+{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
alert: hivealerter
hive_connection:
diff --git a/salt/soctopus/files/templates/osquery.template b/salt/soctopus/files/templates/osquery.template
index 90bc0743e..4fff9a1d5 100644
--- a/salt/soctopus/files/templates/osquery.template
+++ b/salt/soctopus/files/templates/osquery.template
@@ -1,5 +1,5 @@
-{% set es = salt['pillar.get']('static:masterip', '') %}
-{% set hivehost = salt['pillar.get']('static:masterip', '') %}
+{% set es = salt['pillar.get']('static:managerip', '') %}
+{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
alert: hivealerter
diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls
index 6c06fecff..11727e149 100644
--- a/salt/soctopus/init.sls
+++ b/salt/soctopus/init.sls
@@ -1,7 +1,7 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
-{%- set MASTER_URL = salt['pillar.get']('master:url_base', '') %}
-{%- set MASTER_IP = salt['pillar.get']('static:masterip', '') %}
+{% set MANAGER = salt['grains.get']('master') %}
+{%- set MANAGER_URL = salt['pillar.get']('manager:url_base', '') %}
+{%- set MANAGER_IP = salt['pillar.get']('static:managerip', '') %}
soctopusdir:
file.directory:
@@ -50,7 +50,7 @@ playbookrulessync:
so-soctopus:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-soctopus:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-soctopus:{{ VERSION }}
- hostname: soctopus
- name: so-soctopus
- binds:
@@ -61,4 +61,4 @@ so-soctopus:
- port_bindings:
- 0.0.0.0:7000:7000
- extra_hosts:
- - {{MASTER_URL}}:{{MASTER_IP}}
+ - {{MANAGER_URL}}:{{MANAGER_IP}}
diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls
index db4bc97ea..a0780ecf6 100644
--- a/salt/ssl/init.sls
+++ b/salt/ssl/init.sls
@@ -1,5 +1,5 @@
-{% set master = salt['grains.get']('master') %}
-{% set masterip = salt['pillar.get']('static:masterip', '') %}
+{% set manager = salt['grains.get']('master') %}
+{% set managerip = salt['pillar.get']('static:managerip', '') %}
{% set HOSTNAME = salt['grains.get']('host') %}
{% set global_ca_text = [] %}
{% set global_ca_server = [] %}
@@ -7,13 +7,13 @@
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
-{% if grains.id.split('_')|last in ['master', 'eval', 'standalone'] %}
+{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone'] %}
{% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %}
{% set ca_server = grains.id %}
{% else %}
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %}
{% for host in x509dict %}
- {% if 'master' in host.split('_')|last or host.split('_')|last == 'standalone' %}
+ {% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
{% do global_ca_server.append(host) %}
{% endif %}
@@ -43,7 +43,7 @@ m2cryptopkgs:
- ca_server: {{ ca_server }}
- signing_policy: influxdb
- public_key: /etc/pki/influxdb.key
- - CN: {{ master }}
+ - CN: {{ manager }}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -59,7 +59,7 @@ influxkeyperms:
- mode: 640
- group: 939
-{% if grains['role'] in ['so-master', 'so-eval', 'so-helix', 'so-mastersearch', 'so-standalone'] %}
+{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone'] %}
# Request a cert and drop it where it needs to go to be distributed
/etc/pki/filebeat.crt:
@@ -70,7 +70,7 @@ influxkeyperms:
{% if grains.role == 'so-heavynode' %}
- CN: {{grains.id}}
{% else %}
- - CN: {{master}}
+ - CN: {{manager}}
{% endif %}
- days_remaining: 0
- days_valid: 820
@@ -119,7 +119,7 @@ fbcrtlink:
- ca_server: {{ ca_server }}
- signing_policy: registry
- public_key: /etc/pki/registry.key
- - CN: {{ master }}
+ - CN: {{ manager }}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -136,31 +136,31 @@ regkeyperms:
- group: 939
# Create a cert for the reverse proxy
-/etc/pki/masterssl.crt:
+/etc/pki/managerssl.crt:
x509.certificate_managed:
- ca_server: {{ ca_server }}
- - signing_policy: masterssl
- - public_key: /etc/pki/masterssl.key
- - CN: {{ master }}
+ - signing_policy: managerssl
+ - public_key: /etc/pki/managerssl.key
+ - CN: {{ manager }}
- days_remaining: 0
- days_valid: 820
- backup: True
- managed_private_key:
- name: /etc/pki/masterssl.key
+ name: /etc/pki/managerssl.key
bits: 4096
backup: True
msslkeyperms:
file.managed:
- replace: False
- - name: /etc/pki/masterssl.key
+ - name: /etc/pki/managerssl.key
- mode: 640
- group: 939
# Create a private key and cert for OSQuery
/etc/pki/fleet.key:
x509.private_key_managed:
- - CN: {{ master }}
+ - CN: {{ manager }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
@@ -169,8 +169,8 @@ msslkeyperms:
/etc/pki/fleet.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/fleet.key
- - CN: {{ master }}
- - subjectAltName: DNS:{{ master }},IP:{{ masterip }}
+ - CN: {{ manager }}
+ - subjectAltName: DNS:{{ manager }},IP:{{ managerip }}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -187,7 +187,7 @@ fleetkeyperms:
- group: 939
{% endif %}
-{% if grains['role'] in ['so-sensor', 'so-master', 'so-node', 'so-eval', 'so-helix', 'so-mastersearch', 'so-heavynode', 'so-fleet', 'so-standalone'] %}
+{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone'] %}
fbcertdir:
file.directory:
@@ -203,7 +203,7 @@ fbcertdir:
{% if grains.role == 'so-heavynode' %}
- CN: {{grains.id}}
{% else %}
- - CN: {{master}}
+ - CN: {{manager}}
{% endif %}
- days_remaining: 0
- days_valid: 820
@@ -238,25 +238,25 @@ chownfilebeatp8:
{% if grains['role'] == 'so-fleet' %}
# Create a cert for the reverse proxy
-/etc/pki/masterssl.crt:
+/etc/pki/managerssl.crt:
x509.certificate_managed:
- ca_server: {{ ca_server }}
- - signing_policy: masterssl
- - public_key: /etc/pki/masterssl.key
+ - signing_policy: managerssl
+ - public_key: /etc/pki/managerssl.key
- CN: {{ HOSTNAME }}
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }} {% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }} {% endif %}
- days_remaining: 0
- days_valid: 820
- backup: True
- managed_private_key:
- name: /etc/pki/masterssl.key
+ name: /etc/pki/managerssl.key
bits: 4096
backup: True
msslkeyperms:
file.managed:
- replace: False
- - name: /etc/pki/masterssl.key
+ - name: /etc/pki/managerssl.key
- mode: 640
- group: 939
diff --git a/salt/strelka/files/backend/backend.yaml b/salt/strelka/files/backend/backend.yaml
index 76a2ae3af..b25e5630d 100644
--- a/salt/strelka/files/backend/backend.yaml
+++ b/salt/strelka/files/backend/backend.yaml
@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
- {%- set ip = salt['pillar.get']('static:masterip') %}
+ {%- set ip = salt['pillar.get']('static:managerip') %}
{%- endif -%}
logging_cfg: '/etc/strelka/logging.yaml'
limits:
diff --git a/salt/strelka/files/filestream/filestream.yaml b/salt/strelka/files/filestream/filestream.yaml
index c45fd8644..539e4314c 100644
--- a/salt/strelka/files/filestream/filestream.yaml
+++ b/salt/strelka/files/filestream/filestream.yaml
@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
- {%- set ip = salt['pillar.get']('static:masterip') %}
+ {%- set ip = salt['pillar.get']('static:managerip') %}
{%- endif -%}
conn:
server: '{{ ip }}:57314'
diff --git a/salt/strelka/files/frontend/frontend.yaml b/salt/strelka/files/frontend/frontend.yaml
index 56df323f9..5d72f1e0d 100644
--- a/salt/strelka/files/frontend/frontend.yaml
+++ b/salt/strelka/files/frontend/frontend.yaml
@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
- {%- set ip = salt['pillar.get']('static:masterip') %}
+ {%- set ip = salt['pillar.get']('static:managerip') %}
{%- endif -%}
server: ":57314"
coordinator:
diff --git a/salt/strelka/files/manager/manager.yaml b/salt/strelka/files/manager/manager.yaml
index 8a5966ac9..db9dd7f91 100644
--- a/salt/strelka/files/manager/manager.yaml
+++ b/salt/strelka/files/manager/manager.yaml
@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
- {%- set ip = salt['pillar.get']('static:masterip') %}
+ {%- set ip = salt['pillar.get']('static:managerip') %}
{%- endif -%}
coordinator:
addr: '{{ ip }}:6380'
diff --git a/salt/strelka/init.sls b/salt/strelka/init.sls
index 4a422b642..b34ee92da 100644
--- a/salt/strelka/init.sls
+++ b/salt/strelka/init.sls
@@ -12,8 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-{%- set MASTER = grains['master'] %}
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGER = salt['grains.get']('master') %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{%- set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') -%}
@@ -79,7 +79,7 @@ strelkastagedir:
strelka_coordinator:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
- name: so-strelka-coordinator
- entrypoint: redis-server --save "" --appendonly no
- port_bindings:
@@ -87,7 +87,7 @@ strelka_coordinator:
strelka_gatekeeper:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-redis:{{ VERSION }}
- name: so-strelka-gatekeeper
- entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru
- port_bindings:
@@ -95,7 +95,7 @@ strelka_gatekeeper:
strelka_frontend:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-strelka-frontend:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-strelka-frontend:{{ VERSION }}
- binds:
- /opt/so/conf/strelka/frontend/:/etc/strelka/:ro
- /nsm/strelka/log/:/var/log/strelka/:rw
@@ -107,7 +107,7 @@ strelka_frontend:
strelka_backend:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-strelka-backend:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-strelka-backend:{{ VERSION }}
- binds:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/rules/:/etc/yara/:ro
@@ -117,7 +117,7 @@ strelka_backend:
strelka_manager:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-strelka-manager:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-strelka-manager:{{ VERSION }}
- binds:
- /opt/so/conf/strelka/manager/:/etc/strelka/:ro
- name: so-strelka-manager
@@ -125,7 +125,7 @@ strelka_manager:
strelka_filestream:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-strelka-filestream:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-strelka-filestream:{{ VERSION }}
- binds:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka
diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls
index 11d178654..0dc16e6b0 100644
--- a/salt/suricata/init.sls
+++ b/salt/suricata/init.sls
@@ -16,7 +16,7 @@
{% set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
{% set BROVER = salt['pillar.get']('static:broversion', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set BPF_NIDS = salt['pillar.get']('nids:bpf') %}
{% set BPF_STATUS = 0 %}
@@ -132,7 +132,7 @@ suribpf:
so-suricata:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-suricata:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-suricata:{{ VERSION }}
- privileged: True
- environment:
- INTERFACE={{ interface }}
diff --git a/salt/suricata/master.sls b/salt/suricata/manager.sls
similarity index 100%
rename from salt/suricata/master.sls
rename to salt/suricata/manager.sls
diff --git a/salt/suricata/suricata_config.map.jinja b/salt/suricata/suricata_config.map.jinja
index 6260c1ec1..557d4e519 100644
--- a/salt/suricata/suricata_config.map.jinja
+++ b/salt/suricata/suricata_config.map.jinja
@@ -11,7 +11,7 @@ HOME_NET: "[{{salt['pillar.get']('sensor:hnsensor')}}]"
{% endload %}
{% else %}
{% load_yaml as homenet %}
-HOME_NET: "[{{salt['pillar.get']('static:hnmaster', '')}}]"
+HOME_NET: "[{{salt['pillar.get']('static:hnmanager', '')}}]"
{% endload %}
{% endif %}
diff --git a/salt/tcpreplay/init.sls b/salt/tcpreplay/init.sls
index 5a054bf5d..460552bf8 100644
--- a/salt/tcpreplay/init.sls
+++ b/salt/tcpreplay/init.sls
@@ -1,11 +1,11 @@
{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-eval' %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
so-tcpreplay:
docker_container.running:
- network_mode: "host"
- - image: {{ MASTER }}:5000/soshybridhunter/so-tcpreplay:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-tcpreplay:{{ VERSION }}
- name: so-tcpreplay
- user: root
- interactive: True
diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf
index be99dc849..70ca7837f 100644
--- a/salt/telegraf/etc/telegraf.conf
+++ b/salt/telegraf/etc/telegraf.conf
@@ -13,7 +13,7 @@
# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
-{%- set MASTER = grains['master'] %}
+{%- set MANAGER = salt['grains.get']('master') %}
{% set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') %}
{% set HELIX_API_KEY = salt['pillar.get']('fireeye:helix:api_key', '') %}
{% set UNIQUEID = salt['pillar.get']('sensor:uniqueid', '') %}
@@ -98,7 +98,7 @@
## urls will be written to each interval.
# urls = ["unix:///var/run/influxdb.sock"]
# urls = ["udp://127.0.0.1:8089"]
- urls = ["https://{{ MASTER }}:8086"]
+ urls = ["https://{{ MANAGER }}:8086"]
## The target database for metrics; will be created as needed.
@@ -616,13 +616,13 @@
# # Read stats from one or more Elasticsearch servers or clusters
-{% if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
+{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
[[inputs.elasticsearch]]
# ## specify a list of one or more Elasticsearch servers
# # you can add username and password to your url to use basic authentication:
# # servers = ["http://user:pass@localhost:9200"]
- servers = ["http://{{ MASTER }}:9200"]
+ servers = ["http://{{ MANAGER }}:9200"]
{% elif grains['role'] in ['so-node', 'so-hotnode', 'so-warmnode', 'so-heavynode'] %}
[[inputs.elasticsearch]]
servers = ["http://{{ NODEIP }}:9200"]
@@ -666,7 +666,7 @@
# # Read metrics from one or more commands that can output to stdout
# ## Commands array
-{% if grains['role'] in ['so-master', 'so-mastersearch'] %}
+{% if grains['role'] in ['so-manager', 'so-managersearch'] %}
[[inputs.exec]]
commands = [
"/scripts/redis.sh",
diff --git a/salt/telegraf/init.sls b/salt/telegraf/init.sls
index 9ae0903b9..e75608c6a 100644
--- a/salt/telegraf/init.sls
+++ b/salt/telegraf/init.sls
@@ -1,4 +1,4 @@
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
# Add Telegraf to monitor all the things.
@@ -36,7 +36,7 @@ tgrafconf:
so-telegraf:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-telegraf:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-telegraf:{{ VERSION }}
- environment:
- HOST_PROC=/host/proc
- HOST_ETC=/host/etc
@@ -53,7 +53,7 @@ so-telegraf:
- /proc:/host/proc:ro
- /nsm:/host/nsm:ro
- /etc:/host/etc:ro
- {% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' or grains['role'] == 'so-mastersearch' %}
+ {% if grains['role'] == 'so-manager' or grains['role'] == 'so-eval' or grains['role'] == 'so-managersearch' %}
- /etc/pki/ca.crt:/etc/telegraf/ca.crt:ro
{% else %}
- /etc/ssl/certs/intca.crt:/etc/telegraf/ca.crt:ro
diff --git a/salt/thehive/etc/application.conf b/salt/thehive/etc/application.conf
index 8630cb386..f06c3f7c6 100644
--- a/salt/thehive/etc/application.conf
+++ b/salt/thehive/etc/application.conf
@@ -1,4 +1,4 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
# Secret Key
@@ -6,7 +6,7 @@
# WARNING: If you deploy your application on several servers, make sure to use the same key.
play.http.secret.key="letsdewdis"
play.http.context=/thehive/
-search.uri = "http://{{ MASTERIP }}:9400"
+search.uri = "http://{{ MANAGERIP }}:9400"
# Elasticsearch
search {
# Name of the index
@@ -14,8 +14,8 @@ search {
# Name of the Elasticsearch cluster
cluster = thehive
# Address of the Elasticsearch instance
- host = ["{{ MASTERIP }}:9500"]
- #search.uri = "http://{{ MASTERIP }}:9500"
+ host = ["{{ MANAGERIP }}:9500"]
+ #search.uri = "http://{{ MANAGERIP }}:9500"
# Scroll keepalive
keepalive = 1m
# Size of the page for scroll
@@ -135,7 +135,7 @@ play.modules.enabled += connectors.cortex.CortexConnector
cortex {
"CORTEX-SERVER-ID" {
- url = "http://{{ MASTERIP }}:9001/cortex/"
+ url = "http://{{ MANAGERIP }}:9001/cortex/"
key = "{{ CORTEXKEY }}"
# # HTTP client configuration (SSL and proxy)
# ws {}
@@ -210,9 +210,9 @@ misp {
}
webhooks {
NodeRedWebHook {
- url = "http://{{ MASTERIP }}:1880/thehive"
+ url = "http://{{ MANAGERIP }}:1880/thehive"
}
#SOCtopusWebHook {
- # url = "http://{{ MASTERIP }}:7000/enrich"
+ # url = "http://{{ MANAGERIP }}:7000/enrich"
#}
}
diff --git a/salt/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf
index 28fbe6791..b9cbe20cc 100644
--- a/salt/thehive/etc/cortex-application.conf
+++ b/salt/thehive/etc/cortex-application.conf
@@ -1,11 +1,11 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
# Secret Key
# The secret key is used to secure cryptographic functions.
# WARNING: If you deploy your application on several servers, make sure to use the same key.
play.http.secret.key="letsdewdis"
play.http.context=/cortex/
-search.uri = "http://{{ MASTERIP }}:9400"
+search.uri = "http://{{ MANAGERIP }}:9400"
# Elasticsearch
search {
@@ -14,7 +14,7 @@ search {
# Name of the Elasticsearch cluster
cluster = thehive
# Address of the Elasticsearch instance
- host = ["{{ MASTERIP }}:9500"]
+ host = ["{{ MANAGERIP }}:9500"]
# Scroll keepalive
keepalive = 1m
# Size of the page for scroll
diff --git a/salt/thehive/init.sls b/salt/thehive/init.sls
index 3ca913221..19b9c4eeb 100644
--- a/salt/thehive/init.sls
+++ b/salt/thehive/init.sls
@@ -1,6 +1,6 @@
-{% set MASTERIP = salt['pillar.get']('master:mainip', '') %}
+{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
thehiveconfdir:
file.directory:
- name: /opt/so/conf/thehive/etc
@@ -71,7 +71,7 @@ thehiveesdata:
so-thehive-es:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-thehive-es:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-thehive-es:{{ VERSION }}
- hostname: so-thehive-es
- name: so-thehive-es
- user: 939
@@ -99,7 +99,7 @@ so-thehive-es:
# Install Cortex
so-cortex:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-thehive-cortex:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-thehive-cortex:{{ VERSION }}
- hostname: so-cortex
- name: so-cortex
- user: 939
@@ -119,9 +119,9 @@ cortexscript:
so-thehive:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-thehive:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-thehive:{{ VERSION }}
- environment:
- - ELASTICSEARCH_HOST={{ MASTERIP }}
+ - ELASTICSEARCH_HOST={{ MANAGERIP }}
- hostname: so-thehive
- name: so-thehive
- user: 939
diff --git a/salt/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init
index 922ad45dc..f653bc008 100644
--- a/salt/thehive/scripts/cortex_init
+++ b/salt/thehive/scripts/cortex_init
@@ -1,5 +1,5 @@
#!/bin/bash
-# {%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+# {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
# {%- set CORTEXUSER = salt['pillar.get']('static:cortexuser', '') %}
# {%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', 'cortexchangeme') %}
# {%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %}
@@ -11,7 +11,7 @@ default_salt_dir=/opt/so/saltstack/default
cortex_init(){
sleep 60
- CORTEX_IP="{{MASTERIP}}"
+ CORTEX_IP="{{MANAGERIP}}"
CORTEX_USER="{{CORTEXUSER}}"
CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
CORTEX_KEY="{{CORTEXKEY}}"
@@ -54,7 +54,7 @@ if [ -f /opt/so/state/cortex.txt ]; then
exit 0
else
rm -f garbage_file
- while ! wget -O garbage_file {{MASTERIP}}:9500 2>/dev/null
+ while ! wget -O garbage_file {{MANAGERIP}}:9500 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init
index 0163b45f3..bcd911c1e 100755
--- a/salt/thehive/scripts/hive_init
+++ b/salt/thehive/scripts/hive_init
@@ -1,12 +1,12 @@
#!/bin/bash
-# {%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+# {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
# {%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', '') %}
# {%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', 'hivechangeme') %}
# {%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
thehive_init(){
sleep 120
- THEHIVE_IP="{{MASTERIP}}"
+ THEHIVE_IP="{{MANAGERIP}}"
THEHIVE_USER="{{THEHIVEUSER}}"
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
THEHIVE_KEY="{{THEHIVEKEY}}"
@@ -52,7 +52,7 @@ if [ -f /opt/so/state/thehive.txt ]; then
exit 0
else
rm -f garbage_file
- while ! wget -O garbage_file {{MASTERIP}}:9400 2>/dev/null
+ while ! wget -O garbage_file {{MANAGERIP}}:9400 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
diff --git a/salt/top.sls b/salt/top.sls
index fbf9e32ef..a04e75657 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -1,10 +1,10 @@
{%- set BROVER = salt['pillar.get']('static:broversion', '') -%}
{%- set WAZUH = salt['pillar.get']('static:wazuh', '0') -%}
-{%- set THEHIVE = salt['pillar.get']('master:thehive', '0') -%}
-{%- set PLAYBOOK = salt['pillar.get']('master:playbook', '0') -%}
-{%- set FREQSERVER = salt['pillar.get']('master:freq', '0') -%}
-{%- set DOMAINSTATS = salt['pillar.get']('master:domainstats', '0') -%}
-{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
+{%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') -%}
+{%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') -%}
+{%- set FREQSERVER = salt['pillar.get']('manager:freq', '0') -%}
+{%- set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') -%}
+{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set STRELKA = salt['pillar.get']('strelka:enabled', '0') -%}
@@ -30,7 +30,7 @@ base:
- telegraf
- firewall
- idstools
- - suricata.master
+ - suricata.manager
- pcap
- suricata
- zeek
@@ -56,7 +56,7 @@ base:
- strelka
{%- endif %}
- filebeat
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
@@ -65,7 +65,7 @@ base:
- ca
- ssl
- registry
- - master
+ - manager
- common
- nginx
- telegraf
@@ -74,9 +74,9 @@ base:
- soc
- firewall
- idstools
- - suricata.master
+ - suricata.manager
- healthcheck
- {%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
+ {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
{%- if WAZUH != 0 %}
@@ -95,7 +95,7 @@ base:
- filebeat
- curator
- elastalert
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet
- redis
- fleet.install_package
@@ -117,7 +117,7 @@ base:
{%- endif %}
- '*_master':
+ '*_manager':
- ca
- ssl
- registry
@@ -128,11 +128,11 @@ base:
- grafana
- soc
- firewall
- - master
+ - manager
- idstools
- - suricata.master
+ - suricata.manager
- redis
- {%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
+ {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
{%- if WAZUH != 0 %}
@@ -145,7 +145,7 @@ base:
- filebeat
- utility
- schedule
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet
- fleet.install_package
{%- endif %}
@@ -167,7 +167,7 @@ base:
- ca
- ssl
- registry
- - master
+ - manager
- common
- nginx
- telegraf
@@ -176,10 +176,10 @@ base:
- soc
- firewall
- idstools
- - suricata.master
+ - suricata.manager
- healthcheck
- redis
- {%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
+ {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
{%- if WAZUH != 0 %}
@@ -199,7 +199,7 @@ base:
- filebeat
- curator
- elastalert
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet
- redis
- fleet.install_package
@@ -227,7 +227,7 @@ base:
- common
- firewall
- logstash
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
@@ -239,7 +239,7 @@ base:
- logstash
- elasticsearch
- curator
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
@@ -249,7 +249,7 @@ base:
- common
- firewall
- elasticsearch
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
@@ -268,12 +268,12 @@ base:
- elasticsearch
- curator
- filebeat
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
- '*_mastersensor':
+ '*_managersensor':
- common
- nginx
- telegraf
@@ -281,13 +281,13 @@ base:
- grafana
- firewall
- sensor
- - master
- {%- if FLEETMASTER or FLEETNODE %}
+ - manager
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- schedule
- '*_mastersearch':
+ '*_managersearch':
- ca
- ssl
- registry
@@ -298,11 +298,11 @@ base:
- grafana
- soc
- firewall
- - master
+ - manager
- idstools
- - suricata.master
+ - suricata.manager
- redis
- {%- if FLEETMASTER or FLEETNODE or PLAYBOOK != 0 %}
+ {%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
{%- if WAZUH != 0 %}
@@ -316,7 +316,7 @@ base:
- filebeat
- utility
- schedule
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet
- fleet.install_package
{%- endif %}
@@ -348,7 +348,7 @@ base:
- elasticsearch
- curator
- filebeat
- {%- if FLEETMASTER or FLEETNODE %}
+ {%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
{%- endif %}
- pcap
diff --git a/salt/utility/bin/crossthestreams b/salt/utility/bin/crossthestreams
index c8768230e..d21e3c1a4 100644
--- a/salt/utility/bin/crossthestreams
+++ b/salt/utility/bin/crossthestreams
@@ -1,6 +1,6 @@
#!/bin/bash
-{% set ES = salt['pillar.get']('master:mainip', '') %}
-{%- set MASTER = grains['master'] %}
+{% set ES = salt['pillar.get']('manager:mainip', '') %}
+{%- set MANAGER = salt['grains.get']('master') %}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
@@ -29,7 +29,7 @@ fi
echo "Applying cross cluster search config..."
curl -s -XPUT http://{{ ES }}:9200/_cluster/settings \
-H 'Content-Type: application/json' \
- -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MASTER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
+ -d "{\"persistent\": {\"search\": {\"remote\": {\"{{ MANAGER }}\": {\"seeds\": [\"127.0.0.1:9300\"]}}}}}"
# Add all the search nodes to cross cluster searching.
diff --git a/salt/utility/bin/eval b/salt/utility/bin/eval
index 7ff0ef886..87692e40f 100644
--- a/salt/utility/bin/eval
+++ b/salt/utility/bin/eval
@@ -1,5 +1,5 @@
#!/bin/bash
-{% set ES = salt['pillar.get']('master:mainip', '') %}
+{% set ES = salt['pillar.get']('manager:mainip', '') %}
# Wait for ElasticSearch to come up, so that we can query for version infromation
echo -n "Waiting for ElasticSearch..."
diff --git a/salt/utility/init.sls b/salt/utility/init.sls
index 87cfe8e87..00899f69a 100644
--- a/salt/utility/init.sls
+++ b/salt/utility/init.sls
@@ -1,5 +1,5 @@
# This state is for checking things
-{% if grains['role'] in ['so-master', 'so-mastersearch', 'so-standalone'] %}
+{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %}
# Make sure Cross Cluster is good. Will need some logic once we have hot/warm
crossclusterson:
cmd.script:
diff --git a/salt/wazuh/files/agent/ossec.conf b/salt/wazuh/files/agent/ossec.conf
index ffc7922b0..37971aa93 100644
--- a/salt/wazuh/files/agent/ossec.conf
+++ b/salt/wazuh/files/agent/ossec.conf
@@ -1,5 +1,5 @@
-{%- if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set ip = salt['pillar.get']('static:managerip', '') %}
{%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
{%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- elif grains['role'] == 'so-sensor' %}
diff --git a/salt/wazuh/files/agent/wazuh-register-agent b/salt/wazuh/files/agent/wazuh-register-agent
index b38474d8e..f2fd8693f 100755
--- a/salt/wazuh/files/agent/wazuh-register-agent
+++ b/salt/wazuh/files/agent/wazuh-register-agent
@@ -1,5 +1,5 @@
-{%- if grains['role'] in ['so-master', 'so-eval', 'so-mastersearch', 'so-standalone'] %}
- {%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
+ {%- set ip = salt['pillar.get']('static:managerip', '') %}
{%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
{%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- elif grains['role'] == 'so-sensor' %}
diff --git a/salt/wazuh/files/wazuh-manager-whitelist b/salt/wazuh/files/wazuh-manager-whitelist
index 66dc13cd9..d39d68e36 100755
--- a/salt/wazuh/files/wazuh-manager-whitelist
+++ b/salt/wazuh/files/wazuh-manager-whitelist
@@ -1,4 +1,4 @@
-{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
+{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{%- set WAZUH_ENABLED = salt['pillar.get']('static:wazuh', '0') %}
#!/bin/bash
local_salt_dir=/opt/so/saltstack/local
@@ -21,12 +21,12 @@ local_salt_dir=/opt/so/saltstack/local
# Check if Wazuh enabled
if [ {{ WAZUH_ENABLED }} ]; then
WAZUH_MGR_CFG="/opt/so/wazuh/etc/ossec.conf"
- if ! grep -q "{{ MASTERIP }}" $WAZUH_MGR_CFG ; then
+ if ! grep -q "{{ MANAGERIP }}" $WAZUH_MGR_CFG ; then
DATE=`date`
sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG
sed -i '/^$/N;/^\n$/D' $WAZUH_MGR_CFG
- echo -e "\n \n {{ MASTERIP }}\n \n" >> $WAZUH_MGR_CFG
- echo "Added whitelist entry for {{ MASTERIP }} in $WAZUH_MGR_CFG."
+ echo -e "\n \n {{ MANAGERIP }}\n \n" >> $WAZUH_MGR_CFG
+ echo "Added whitelist entry for {{ MANAGERIP }} in $WAZUH_MGR_CFG."
echo
fi
fi
diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls
index 3e0969359..c4ca27d95 100644
--- a/salt/wazuh/init.sls
+++ b/salt/wazuh/init.sls
@@ -1,6 +1,6 @@
{%- set HOSTNAME = salt['grains.get']('host', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
# Add ossec group
ossecgroup:
group.present:
@@ -83,7 +83,7 @@ wazuhmgrwhitelist:
so-wazuh:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-wazuh:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-wazuh:{{ VERSION }}
- hostname: {{HOSTNAME}}-wazuh-manager
- name: so-wazuh
- detach: True
diff --git a/salt/yum/etc/yum.conf.jinja b/salt/yum/etc/yum.conf.jinja
index a370bbf4f..81f981c1d 100644
--- a/salt/yum/etc/yum.conf.jinja
+++ b/salt/yum/etc/yum.conf.jinja
@@ -11,6 +11,6 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }}
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release
-{% if salt['pillar.get']('static:masterupdate', '0') %}
-proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
+{% if salt['pillar.get']('static:managerupdate', '0') %}
+proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('manager')) }}:3142
{% endif %}
\ No newline at end of file
diff --git a/salt/zeek/init.sls b/salt/zeek/init.sls
index 246b43c90..4fb7fe458 100644
--- a/salt/zeek/init.sls
+++ b/salt/zeek/init.sls
@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
-{% set MASTER = salt['grains.get']('master') %}
+{% set MANAGER = salt['grains.get']('master') %}
{% set BPF_ZEEK = salt['pillar.get']('zeek:bpf', {}) %}
{% set BPF_STATUS = 0 %}
{% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %}
@@ -86,20 +86,20 @@ nodecfgsync:
- group: 939
- template: jinja
-zeekcleanscript:
- file.managed:
- - name: /usr/local/bin/zeek_clean
- - source: salt://zeek/cron/zeek_clean
- - mode: 755
+#zeekcleanscript:
+# file.managed:
+# - name: /usr/local/bin/zeek_clean
+# - source: salt://zeek/cron/zeek_clean
+# - mode: 755
-/usr/local/bin/zeek_clean:
- cron.present:
- - user: root
- - minute: '*'
- - hour: '*'
- - daymonth: '*'
- - month: '*'
- - dayweek: '*'
+#/usr/local/bin/zeek_clean:
+# cron.present:
+# - user: root
+# - minute: '*'
+# - hour: '*'
+# - daymonth: '*'
+# - month: '*'
+# - dayweek: '*'
plcronscript:
file.managed:
@@ -156,7 +156,7 @@ localzeeksync:
so-zeek:
docker_container.running:
- - image: {{ MASTER }}:5000/soshybridhunter/so-zeek:{{ VERSION }}
+ - image: {{ MANAGER }}:5000/soshybridhunter/so-zeek:{{ VERSION }}
- privileged: True
- binds:
- /nsm/zeek/logs:/nsm/zeek/logs:rw
diff --git a/setup/automation/pm_standalone_defaults b/setup/automation/pm_standalone_defaults
index ac0033f83..156697a28 100644
--- a/setup/automation/pm_standalone_defaults
+++ b/setup/automation/pm_standalone_defaults
@@ -32,7 +32,7 @@ BROVERSION=ZEEK
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
-HNMASTER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
+HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
@@ -40,8 +40,8 @@ install_type=STANDALONE
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
-MASTERADV=BASIC
-MASTERUPDATES=1
+MANAGERADV=BASIC
+MANAGERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=
@@ -55,7 +55,7 @@ NIDS=Suricata
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
-NODEUPDATES=MASTER
+NODEUPDATES=MANAGER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
diff --git a/setup/so-functions b/setup/so-functions
index d9ebf0d15..487eb9a40 100755
--- a/setup/so-functions
+++ b/setup/so-functions
@@ -24,7 +24,7 @@ SOVERSION=$(cat ../VERSION)
accept_salt_key_remote() {
systemctl restart salt-minion
- echo "Accept the key remotely on the master" >> "$setup_log" 2>&1
+ echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1
# Delete the key just in case.
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y
salt-call state.apply ca
@@ -43,14 +43,14 @@ add_admin_user() {
}
-add_master_hostfile() {
+add_manager_hostfile() {
[ -n "$TESTING" ] && return
- echo "Checking if I can resolve master. If not add to hosts file" >> "$setup_log" 2>&1
+ echo "Checking if I can resolve manager. If not add to hosts file" >> "$setup_log" 2>&1
# Pop up an input to get the IP address
MSRVIP=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter your Master Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
+ "Enter your Manager Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -60,7 +60,7 @@ addtotab_generate_templates() {
local addtotab_path=$local_salt_dir/pillar/data
- for i in evaltab mastersearchtab mastertab nodestab sensorstab standalonetab; do
+ for i in evaltab managersearchtab managertab nodestab sensorstab standalonetab; do
printf '%s\n'\
"$i:"\
"" > "$addtotab_path"/$i.sls
@@ -87,11 +87,11 @@ so_add_user() {
fi
}
-add_socore_user_master() {
+add_socore_user_manager() {
so_add_user "socore" "939" "939" "/opt/so" >> "$setup_log" 2>&1
}
-add_soremote_user_master() {
+add_soremote_user_manager() {
so_add_user "soremote" "947" "947" "/home/soremote" "$SOREMOTEPASS1" >> "$setup_log" 2>&1
}
@@ -152,7 +152,7 @@ bro_logs_enabled() {
"brologs:"\
" enabled:" > "$brologs_pillar"
- if [ "$MASTERADV" = 'ADVANCED' ]; then
+ if [ "$MANAGERADV" = 'ADVANCED' ]; then
for BLOG in "${BLOGS[@]}"; do
echo " - $BLOG" | tr -d '"' >> "$brologs_pillar"
done
@@ -265,12 +265,12 @@ check_web_pass() {
check_pass_match "$WEBPASSWD1" "$WEBPASSWD2" "WPMATCH"
}
-clear_master() {
- # Clear out the old master public key in case this is a re-install.
- # This only happens if you re-install the master.
+clear_manager() {
+ # Clear out the old manager public key in case this is a re-install.
+ # This only happens if you re-install the manager.
if [ -f /etc/salt/pki/minion/minion_master.pub ]; then
{
- echo "Clearing old master key";
+ echo "Clearing old Salt master key";
rm -f /etc/salt/pki/minion/minion_master.pub;
systemctl -q restart salt-minion;
} >> "$setup_log" 2>&1
@@ -360,7 +360,7 @@ configure_minion() {
'helix')
echo "master: $HOSTNAME" >> "$minion_config"
;;
- 'master' | 'eval' | 'mastersearch' | 'standalone')
+ 'manager' | 'eval' | 'managersearch' | 'standalone')
printf '%s\n'\
"master: $HOSTNAME"\
"mysql.host: '$MAINIP'"\
@@ -437,9 +437,9 @@ check_requirements() {
fi
}
-copy_master_config() {
+copy_salt_master_config() {
- # Copy the master config template to the proper directory
+ # Copy the Salt master config template to the proper directory
if [ "$setup_type" = 'iso' ]; then
cp /root/SecurityOnion/files/master /etc/salt/master >> "$setup_log" 2>&1
else
@@ -452,7 +452,7 @@ copy_master_config() {
copy_minion_tmp_files() {
case "$install_type" in
- 'MASTER' | 'EVAL' | 'HELIXSENSOR' | 'MASTERSEARCH' | 'STANDALONE')
+ 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE')
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
cp -Rv "$temp_install_dir"/pillar/ $local_salt_dir/ >> "$setup_log" 2>&1
if [ -d "$temp_install_dir"/salt ] ; then
@@ -461,12 +461,12 @@ copy_minion_tmp_files() {
;;
*)
{
- echo "scp pillar and salt files in $temp_install_dir to master $local_salt_dir";
+ echo "scp pillar and salt files in $temp_install_dir to manager $local_salt_dir";
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/salt/patch/os/schedules/* soremote@"$MSRV":/tmp/"$MINION_ID"/schedules;
- ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/master/files/add_minion.sh "$MINION_ID";
+ ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/manager/files/add_minion.sh "$MINION_ID";
} >> "$setup_log" 2>&1
;;
esac
@@ -479,8 +479,8 @@ copy_ssh_key() {
mkdir -p /root/.ssh
ssh-keygen -f /root/.ssh/so.key -t rsa -q -N "" < /dev/zero
chown -R "$SUDO_USER":"$SUDO_USER" /root/.ssh
- echo "Copying the SSH key to the master"
- #Copy the key over to the master
+ echo "Copying the SSH key to the manager"
+ #Copy the key over to the manager
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
}
@@ -703,7 +703,7 @@ docker_install() {
else
case "$install_type" in
- 'MASTER' | 'EVAL')
+ 'MANAGER' | 'EVAL')
apt-get update >> "$setup_log" 2>&1
;;
*)
@@ -733,7 +733,7 @@ docker_registry() {
echo "Setting up Docker Registry" >> "$setup_log" 2>&1
mkdir -p /etc/docker >> "$setup_log" 2>&1
- # Make the host use the master docker registry
+ # Make the host use the manager docker registry
if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi
printf '%s\n'\
"{"\
@@ -832,12 +832,24 @@ firewall_generate_templates() {
cp ../files/firewall/* /opt/so/saltstack/local/salt/firewall/ >> "$setup_log" 2>&1
- for i in analyst beats_endpoint sensor master minion osquery_endpoint search_node wazuh_endpoint; do
+ for i in analyst beats_endpoint sensor manager minion osquery_endpoint search_node wazuh_endpoint; do
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost "$i" 127.0.0.1
done
}
+fleet_pillar() {
+
+ local pillar_file="$temp_install_dir"/pillar/minions/"$MINION_ID".sls
+
+ # Create the fleet pillar
+ printf '%s\n'\
+ "fleet:"\
+ " mainip: $MAINIP"\
+ " manager: $MSRV"\
+ "" > "$pillar_file"
+}
+
generate_passwords(){
# Generate Random Passwords for Things
MYSQLPASS=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
@@ -871,7 +883,7 @@ got_root() {
get_minion_type() {
local minion_type
case "$install_type" in
- 'EVAL' | 'MASTERSEARCH' | 'MASTER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE')
+ 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE')
minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]')
;;
'HELIXSENSOR')
@@ -904,13 +916,13 @@ install_cleanup() {
}
-master_pillar() {
+manager_pillar() {
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
- # Create the master pillar
+ # Create the manager pillar
printf '%s\n'\
- "master:"\
+ "manager:"\
" mainip: $MAINIP"\
" mainint: $MNIC"\
" esheap: $ES_HEAP_SIZE"\
@@ -919,7 +931,7 @@ master_pillar() {
" domainstats: 0" >> "$pillar_file"
- if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'HELIXSENSOR' ] || [ "$install_type" = 'MASTERSEARCH' ] || [ "$install_type" = 'STANDALONE' ]; then
+ if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'HELIXSENSOR' ] || [ "$install_type" = 'MANAGERSEARCH' ] || [ "$install_type" = 'STANDALONE' ]; then
printf '%s\n'\
" mtu: $MTU" >> "$pillar_file"
fi
@@ -949,6 +961,16 @@ master_pillar() {
" playbook: $PLAYBOOK"\
" url_base: $REDIRECTIT"\
""\
+ "elasticsearch:"\
+ " mainip: $MAINIP"\
+ " mainint: $MNIC"\
+ " esheap: $NODE_ES_HEAP_SIZE"\
+ " esclustername: {{ grains.host }}"\
+ " node_type: $NODETYPE"\
+ " es_port: $node_es_port"\
+ " log_size_limit: $log_size_limit"\
+ " node_route_type: hot"\
+ ""\
"logstash_settings:"\
" ls_pipeline_batch_size: 125"\
" ls_input_threads: 1"\
@@ -966,19 +988,19 @@ master_pillar() {
cat "$pillar_file" >> "$setup_log" 2>&1
}
-master_static() {
+manager_static() {
local static_pillar="$local_salt_dir/pillar/static.sls"
# Create a static file for global values
printf '%s\n'\
"static:"\
" soversion: $SOVERSION"\
- " hnmaster: $HNMASTER"\
+ " hnmanager: $HNMANAGER"\
" ntpserver: $NTPSERVER"\
" proxy: $PROXY"\
" broversion: $BROVERSION"\
" ids: $NIDS"\
- " masterip: $MAINIP"\
+ " managerip: $MAINIP"\
" hiveuser: $WEBUSER"\
" hivepassword: $WEBPASSWD1"\
" hivekey: $HIVEKEY"\
@@ -990,7 +1012,7 @@ master_static() {
" cortexorguserkey: $CORTEXORGUSERKEY"\
" grafanapassword: $WEBPASSWD1"\
" fleet_custom_hostname: "\
- " fleet_master: False"\
+ " fleet_manager: False"\
" fleet_node: False"\
" fleet_packages-timestamp: N/A"\
" fleet_packages-version: 1"\
@@ -998,12 +1020,75 @@ master_static() {
" fleet_ip: N/A"\
" sensoronikey: $SENSORONIKEY"\
" wazuh: $WAZUH"\
- " masterupdate: $MASTERUPDATES"\
+ " managerupdate: $MANAGERUPDATES"\
"strelka:"\
" enabled: $STRELKA"\
" rules: $STRELKARULES"\
+ "curator:"\
+ " hot_warm: False"\
"elastic:"\
- " features: False" > "$static_pillar"
+ " features: False"\
+ "elasticsearch:"\
+ " replicas: 0"\
+ " true_cluster: False"\
+ " true_cluster_name: so"\
+ " discovery_nodes: 1"\
+ " hot_warm_enabled: False"\
+ " cluster_routing_allocation_disk.threshold_enabled: true"\
+ " cluster_routing_allocation_disk_watermark_low: 95%"\
+ " cluster_routing_allocation_disk_watermark_high: 98%"\
+ " cluster_routing_allocation_disk_watermark_flood_stage: 98%"\
+ " index_settings:"\
+ " so-beats:"\
+ " shards: 1"\
+ " warm: 7"\
+ " close: 30"\
+ " delete: 365"\
+ " so-firewall:"\
+ " shards: 1"\
+ " warm: 7"\
+ " close: 30"\
+ " delete: 365"\
+ " so-flow:"\
+ " shards: 1"\
+ " warm: 7"\
+ " close: 30"\
+ " delete: 365"\
+ " so-ids:"\
+ " shards: 1"\
+ " warm: 7"\
+ " close: 30"\
+ " delete: 365"\
+ " so-import:"\
+ " shards: 1"\
+ " warm: 7"\
+ " close: 73000"\
+ " delete: 73001"\
+ " so-osquery:"\
+ " shards: 1"\
+ " warm: 7"\
+ " close: 30"\
+ " delete: 365"\
+ " so-ossec:"\
+ " shards: 1"\
+ " warm: 7"\
+ " close: 30"\
+ " delete: 365"\
+ " so-strelka:"\
+ " shards: 1"\
+ " warm: 7"\
+ " close: 30"\
+ " delete: 365"\
+ " so-syslog:"\
+ " shards: 1"\
+ " warm: 7"\
+ " close: 30"\
+ " delete: 365"\
+ " so-zeek:"\
+ " shards: 5"\
+ " warm: 7"\
+ " close: 365"\
+ " delete: 45" > "$static_pillar"
printf '%s\n' '----' >> "$setup_log" 2>&1
cat "$static_pillar" >> "$setup_log" 2>&1
@@ -1055,15 +1140,10 @@ elasticsearch_pillar() {
" node_type: $NODETYPE"\
" es_port: $node_es_port"\
" log_size_limit: $log_size_limit"\
- " cur_close_days: $CURCLOSEDAYS"\
- " route_type: hot"\
- " index_settings:"\
- " so-zeek:"\
- " shards: 5"\
- " replicas: 0"\
+ " node_route_type: hot"\
"" >> "$pillar_file"
- if [ "$install_type" != 'EVAL' ] && [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'MASTERSEARCH' ] && [ "$install_type" != 'STANDALONE' ]; then
+ if [ "$install_type" != 'EVAL' ] && [ "$install_type" != 'HELIXSENSOR' ] && [ "$install_type" != 'MANAGERSEARCH' ] && [ "$install_type" != 'STANDALONE' ]; then
printf '%s\n'\
"logstash_settings:"\
" ls_pipeline_batch_size: $LSPIPELINEBATCH"\
@@ -1151,11 +1231,11 @@ saltify() {
set_progress_str 6 'Installing various dependencies'
yum -y install wget nmap-ncat >> "$setup_log" 2>&1
case "$install_type" in
- 'MASTER' | 'EVAL' | 'MASTERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE')
+ 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE')
reserve_group_ids >> "$setup_log" 2>&1
yum -y install epel-release >> "$setup_log" 2>&1
yum -y install sqlite argon2 curl mariadb-devel >> "$setup_log" 2>&1
- # Download Ubuntu Keys in case master updates = 1
+ # Download Ubuntu Keys in case manager updates = 1
mkdir -p /opt/so/gpg >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg >> "$setup_log" 2>&1
@@ -1166,7 +1246,7 @@ saltify() {
systemctl enable salt-master >> "$setup_log" 2>&1
;;
*)
- if [ "$MASTERUPDATES" = '1' ]; then
+ if [ "$MANAGERUPDATES" = '1' ]; then
{
# Create the GPG Public Key for the Salt Repo
cp ./public_keys/salt.pem /etc/pki/rpm-gpg/saltstack-signing-key;
@@ -1222,7 +1302,7 @@ saltify() {
'FLEET')
if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi
;;
- 'MASTER' | 'EVAL' | 'MASTERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR?
+ 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE') # TODO: should this also be HELIXSENSOR?
if [ "$OSVER" != "xenial" ]; then local py_ver_url_path="/py3"; else local py_ver_url_path="/apt"; fi
# Add saltstack repo(s)
@@ -1252,9 +1332,9 @@ saltify() {
apt-mark hold salt-master >> "$setup_log" 2>&1
;;
*)
- # Copy down the gpg keys and install them from the master
+ # Copy down the gpg keys and install them from the manager
mkdir "$temp_install_dir"/gpg >> "$setup_log" 2>&1
- echo "scp the gpg keys and install them from the master" >> "$setup_log" 2>&1
+ echo "scp the gpg keys and install them from the manager" >> "$setup_log" 2>&1
scp -v -i /root/.ssh/so.key soremote@"$MSRV":/opt/so/gpg/* "$temp_install_dir"/gpg >> "$setup_log" 2>&1
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
@@ -1280,7 +1360,7 @@ saltify() {
salt_checkin() {
case "$install_type" in
- 'MASTER' | 'EVAL' | 'HELIXSENSOR' | 'MASTERSEARCH' | 'STANDALONE') # Fix Mine usage
+ 'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE') # Fix Mine usage
{
echo "Building Certificate Authority";
salt-call state.apply ca;
@@ -1348,7 +1428,7 @@ setup_salt_master_dirs() {
cp -R ../salt/* $default_salt_dir/salt/ >> "$setup_log" 2>&1
fi
- echo "Chown the salt dirs on the master for socore" >> "$setup_log" 2>&1
+ echo "Chown the salt dirs on the manager for socore" >> "$setup_log" 2>&1
chown -R socore:socore /opt/so
}
@@ -1403,7 +1483,7 @@ sensor_pillar() {
" brobpf:"\
" pcapbpf:"\
" nidsbpf:"\
- " master: $MSRV"\
+ " manager: $MSRV"\
" mtu: $MTU"\
" uniqueid: $(date '+%s')" >> "$pillar_file"
if [ "$HNSENSOR" != 'inherit' ]; then
@@ -1449,7 +1529,7 @@ set_hostname() {
set_hostname_iso
- if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
+ if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then
if ! getent hosts "$MSRV"; then
echo "$MSRVIP $MSRV" >> /etc/hosts
fi
@@ -1476,13 +1556,13 @@ set_initial_firewall_policy() {
if [ -f $default_salt_dir/salt/common/tools/sbin/so-firewall ]; then chmod +x $default_salt_dir/salt/common/tools/sbin/so-firewall; fi
case "$install_type" in
- 'MASTER')
- $default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
+ 'MANAGER')
+ $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost minion "$MAINIP"
- $default_salt_dir/pillar/data/addtotab.sh mastertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
+ $default_salt_dir/pillar/data/addtotab.sh managertab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
- 'EVAL' | 'MASTERSEARCH' | 'STANDALONE')
- $default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
+ 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE')
+ $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
@@ -1490,8 +1570,8 @@ set_initial_firewall_policy() {
'EVAL')
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE True
;;
- 'MASTERSEARCH')
- $default_salt_dir/pillar/data/addtotab.sh mastersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
+ 'MANAGERSEARCH')
+ $default_salt_dir/pillar/data/addtotab.sh managersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'STANDALONE')
$default_salt_dir/pillar/data/addtotab.sh standalonetab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE
@@ -1499,7 +1579,7 @@ set_initial_firewall_policy() {
esac
;;
'HELIXSENSOR')
- $default_salt_dir/salt/common/tools/sbin/so-firewall includehost master "$MAINIP"
+ $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
;;
@@ -1558,7 +1638,7 @@ set_management_interface() {
set_node_type() {
case "$install_type" in
- 'SEARCHNODE' | 'EVAL' | 'MASTERSEARCH' | 'HEAVYNODE' | 'STANDALONE')
+ 'SEARCHNODE' | 'EVAL' | 'MANAGERSEARCH' | 'HEAVYNODE' | 'STANDALONE')
NODETYPE='search'
;;
'HOTNODE')
@@ -1571,13 +1651,13 @@ set_node_type() {
}
set_updates() {
- if [ "$MASTERUPDATES" = '1' ]; then
+ if [ "$MANAGERUPDATES" = '1' ]; then
if [ "$OS" = 'centos' ]; then
if ! grep -q "$MSRV" /etc/yum.conf; then
echo "proxy=http://$MSRV:3142" >> /etc/yum.conf
fi
else
- # Set it up so the updates roll through the master
+ # Set it up so the updates roll through the manager
printf '%s\n'\
"Acquire::http::Proxy \"http://$MSRV:3142\";"\
"Acquire::https::Proxy \"http://$MSRV:3142\";" > /etc/apt/apt.conf.d/00Proxy
@@ -1598,7 +1678,7 @@ update_sudoers() {
echo "soremote ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/common/tools/sbin/so-firewall" | tee -a /etc/sudoers
echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/data/addtotab.sh" | tee -a /etc/sudoers
- echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/master/files/add_minion.sh" | tee -a /etc/sudoers
+ echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/manager/files/add_minion.sh" | tee -a /etc/sudoers
else
echo "User soremote already granted sudo privileges" >> "$setup_log" 2>&1
fi
@@ -1614,7 +1694,7 @@ update_packages() {
}
use_turbo_proxy() {
- if [[ ! $install_type =~ ^(MASTER|EVAL|HELIXSENSOR|MASTERSEARCH|STANDALONE)$ ]]; then
+ if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then
echo "turbo is not supported on this install type" >> $setup_log 2>&1
return
fi
@@ -1638,7 +1718,7 @@ ls_heapsize() {
fi
case "$install_type" in
- 'MASTERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
+ 'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
LS_HEAP_SIZE='1000m'
;;
'EVAL')
@@ -1650,7 +1730,7 @@ ls_heapsize() {
esac
export LS_HEAP_SIZE
- if [[ "$install_type" =~ ^(EVAL|MASTERSEARCH|STANDALONE)$ ]]; then
+ if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
NODE_LS_HEAP_SIZE=LS_HEAP_SIZE
export NODE_LS_HEAP_SIZE
fi
@@ -1672,7 +1752,7 @@ es_heapsize() {
fi
export ES_HEAP_SIZE
- if [[ "$install_type" =~ ^(EVAL|MASTERSEARCH|STANDALONE)$ ]]; then
+ if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then
NODE_ES_HEAP_SIZE=ES_HEAP_SIZE
export NODE_ES_HEAP_SIZE
fi
diff --git a/setup/so-setup b/setup/so-setup
index d9bc73d27..42fa6c33a 100755
--- a/setup/so-setup
+++ b/setup/so-setup
@@ -129,21 +129,21 @@ whiptail_install_type
if [ "$install_type" = 'EVAL' ]; then
is_node=true
- is_master=true
+ is_manager=true
is_sensor=true
is_eval=true
elif [ "$install_type" = 'STANDALONE' ]; then
- is_master=true
- is_distmaster=true
+ is_manager=true
+ is_distmanager=true
is_node=true
is_sensor=true
-elif [ "$install_type" = 'MASTERSEARCH' ]; then
- is_master=true
- is_distmaster=true
+elif [ "$install_type" = 'MANAGERSEARCH' ]; then
+ is_manager=true
+ is_distmanager=true
is_node=true
-elif [ "$install_type" = 'MASTER' ]; then
- is_master=true
- is_distmaster=true
+elif [ "$install_type" = 'MANAGER' ]; then
+ is_manager=true
+ is_distmanager=true
elif [ "$install_type" = 'SENSOR' ]; then
is_sensor=true
is_minion=true
@@ -169,7 +169,7 @@ elif [[ $is_fleet_standalone ]]; then
check_requirements "dist" "fleet"
elif [[ $is_sensor && ! $is_eval ]]; then
check_requirements "dist" "sensor"
-elif [[ $is_distmaster || $is_minion ]]; then
+elif [[ $is_distmanager || $is_minion ]]; then
check_requirements "dist"
fi
@@ -214,15 +214,15 @@ if [[ $is_helix ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
HNSENSOR=inherit
- MASTERUPDATES=0
+ MANAGERUPDATES=0
fi
-if [[ $is_helix || ( $is_master && $is_node ) ]]; then
+if [[ $is_helix || ( $is_manager && $is_node ) ]]; then
RULESETUP=ETOPEN
NSMSETUP=BASIC
fi
-if [[ $is_master && $is_node ]]; then
+if [[ $is_manager && $is_node ]]; then
LSPIPELINEWORKERS=1
LSPIPELINEBATCH=125
LSINPUTTHREADS=1
@@ -241,16 +241,16 @@ if [[ $is_helix || $is_sensor ]]; then
calculate_useable_cores
fi
-if [[ $is_helix || $is_master ]]; then
- whiptail_homenet_master
+if [[ $is_helix || $is_manager ]]; then
+ whiptail_homenet_manager
fi
-if [[ $is_helix || $is_master || $is_node ]]; then
+if [[ $is_helix || $is_manager || $is_node ]]; then
set_base_heapsizes
fi
-if [[ $is_master && ! $is_eval ]]; then
- whiptail_master_adv
+if [[ $is_manager && ! $is_eval ]]; then
+ whiptail_manager_adv
whiptail_bro_version
whiptail_nids
whiptail_rule_setup
@@ -259,12 +259,12 @@ if [[ $is_master && ! $is_eval ]]; then
whiptail_oinkcode
fi
- if [ "$MASTERADV" = 'ADVANCED' ] && [ "$BROVERSION" != 'SURICATA' ]; then
- whiptail_master_adv_service_brologs
+ if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$BROVERSION" != 'SURICATA' ]; then
+ whiptail_manager_adv_service_brologs
fi
fi
-if [[ $is_master ]]; then
+if [[ $is_manager ]]; then
whiptail_components_adv_warning
whiptail_enable_components
if [[ $STRELKA == 1 ]]; then
@@ -274,10 +274,10 @@ if [[ $is_master ]]; then
get_redirect
fi
-if [[ $is_distmaster || ( $is_sensor || $is_node || $is_fleet_standalone ) && ! $is_eval ]]; then
- whiptail_master_updates
- if [[ $setup_type == 'network' && $MASTERUPDATES == 1 ]]; then
- whiptail_master_updates_warning
+if [[ $is_distmanager || ( $is_sensor || $is_node || $is_fleet_standalone ) && ! $is_eval ]]; then
+ whiptail_manager_updates
+ if [[ $setup_type == 'network' && $MANAGERUPDATES == 1 ]]; then
+ whiptail_manager_updates_warning
fi
fi
@@ -285,7 +285,7 @@ if [[ $is_minion ]]; then
whiptail_management_server
fi
-if [[ $is_distmaster ]]; then
+if [[ $is_distmanager ]]; then
collect_soremote_inputs
fi
@@ -351,32 +351,32 @@ fi
{
set_hostname;
set_version;
- clear_master;
+ clear_manager;
} >> $setup_log 2>&1
-if [[ $is_master ]]; then
+if [[ $is_manager ]]; then
{
generate_passwords;
secrets_pillar;
- add_socore_user_master;
+ add_socore_user_manager;
} >> $setup_log 2>&1
fi
-if [[ $is_master && ! $is_eval ]]; then
- add_soremote_user_master >> $setup_log 2>&1
+if [[ $is_manager && ! $is_eval ]]; then
+ add_soremote_user_manager >> $setup_log 2>&1
fi
set_main_ip >> $setup_log 2>&1
+host_pillar >> $setup_log 2>&1
+
if [[ $is_minion ]]; then
set_updates >> $setup_log 2>&1
copy_ssh_key >> $setup_log 2>&1
fi
-if [[ "$OSQUERY" = 1 ]]; then
- host_pillar >> $setup_log 2>&1
-fi
+
# Begin install
{
@@ -410,12 +410,12 @@ fi
set_progress_str 9 'Initializing Salt minion'
configure_minion "$minion_type" >> $setup_log 2>&1
- if [[ $is_master || $is_helix ]]; then
+ if [[ $is_manager || $is_helix ]]; then
set_progress_str 10 'Configuring Salt master'
{
create_local_directories;
addtotab_generate_templates;
- copy_master_config;
+ copy_salt_master_config;
setup_salt_master_dirs;
firewall_generate_templates;
} >> $setup_log 2>&1
@@ -423,11 +423,11 @@ fi
set_progress_str 11 'Updating sudoers file for soremote user'
update_sudoers >> $setup_log 2>&1
- set_progress_str 12 'Generating master static pillar'
- master_static >> $setup_log 2>&1
+ set_progress_str 12 'Generating manager static pillar'
+ manager_static >> $setup_log 2>&1
- set_progress_str 13 'Generating master pillar'
- master_pillar >> $setup_log 2>&1
+ set_progress_str 13 'Generating manager pillar'
+ manager_pillar >> $setup_log 2>&1
fi
@@ -448,22 +448,22 @@ fi
fi
if [[ $is_minion ]]; then
- set_progress_str 20 'Accepting Salt key on master'
+ set_progress_str 20 'Accepting Salt key on manager'
accept_salt_key_remote >> $setup_log 2>&1
fi
- if [[ $is_master ]]; then
+ if [[ $is_manager ]]; then
set_progress_str 20 'Accepting Salt key'
salt-key -ya "$MINION_ID" >> $setup_log 2>&1
fi
- set_progress_str 21 'Copying minion pillars to master'
+ set_progress_str 21 'Copying minion pillars to manager'
copy_minion_tmp_files >> $setup_log 2>&1
set_progress_str 22 'Generating CA and checking in'
salt_checkin >> $setup_log 2>&1
- if [[ $is_master || $is_helix ]]; then
+ if [[ $is_manager || $is_helix ]]; then
set_progress_str 25 'Configuring firewall'
set_initial_firewall_policy >> $setup_log 2>&1
@@ -476,14 +476,14 @@ fi
salt-call state.apply -l info registry >> $setup_log 2>&1
docker_seed_registry 2>> "$setup_log" # ~ 60% when finished
- set_progress_str 60 "$(print_salt_state_apply 'master')"
- salt-call state.apply -l info master >> $setup_log 2>&1
+ set_progress_str 60 "$(print_salt_state_apply 'manager')"
+ salt-call state.apply -l info manager >> $setup_log 2>&1
set_progress_str 61 "$(print_salt_state_apply 'idstools')"
salt-call state.apply -l info idstools >> $setup_log 2>&1
- set_progress_str 61 "$(print_salt_state_apply 'suricata.master')"
- salt-call state.apply -l info suricata.master >> $setup_log 2>&1
+ set_progress_str 61 "$(print_salt_state_apply 'suricata.manager')"
+ salt-call state.apply -l info suricata.manager >> $setup_log 2>&1
fi
@@ -501,7 +501,7 @@ fi
set_progress_str 64 "$(print_salt_state_apply 'nginx')"
salt-call state.apply -l info nginx >> $setup_log 2>&1
- if [[ $is_master || $is_node ]]; then
+ if [[ $is_manager || $is_node ]]; then
set_progress_str 64 "$(print_salt_state_apply 'elasticsearch')"
salt-call state.apply -l info elasticsearch >> $setup_log 2>&1
fi
@@ -522,7 +522,7 @@ fi
salt-call state.apply -l info curator >> $setup_log 2>&1
fi
- if [[ $is_master ]]; then
+ if [[ $is_manager ]]; then
set_progress_str 69 "$(print_salt_state_apply 'soc')"
salt-call state.apply -l info soc >> $setup_log 2>&1
@@ -586,12 +586,12 @@ fi
fi
fi
- if [[ $is_master || $is_helix ]]; then
+ if [[ $is_manager || $is_helix ]]; then
set_progress_str 81 "$(print_salt_state_apply 'utility')"
salt-call state.apply -l info utility >> $setup_log 2>&1
fi
- if [[ ( $is_helix || $is_master || $is_node ) && ! $is_eval ]]; then
+ if [[ ( $is_helix || $is_manager || $is_node ) && ! $is_eval ]]; then
set_progress_str 82 "$(print_salt_state_apply 'logstash')"
salt-call state.apply -l info logstash >> $setup_log 2>&1
@@ -603,7 +603,7 @@ fi
filter_unused_nics >> $setup_log 2>&1
network_setup >> $setup_log 2>&1
- if [[ $is_master ]]; then
+ if [[ $is_manager ]]; then
set_progress_str 87 'Adding user to SOC'
add_web_user >> $setup_log 2>&1
fi
diff --git a/setup/so-whiptail b/setup/so-whiptail
index 3b8b13f79..0e2cab26f 100755
--- a/setup/so-whiptail
+++ b/setup/so-whiptail
@@ -406,24 +406,24 @@ whiptail_helix_apikey() {
}
-whiptail_homenet_master() {
+whiptail_homenet_manager() {
[ -n "$TESTING" ] && return
- HNMASTER=$(whiptail --title "Security Onion Setup" --inputbox \
+ HNMANAGER=$(whiptail --title "Security Onion Setup" --inputbox \
"Enter your HOME_NET separated by ," 10 75 10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
- export HNMASTER
+ export HNMANAGER
}
whiptail_homenet_sensor() {
[ -n "$TESTING" ] && return
- # Ask to inherit from master
- whiptail --title "Security Onion Setup" --yesno "Do you want to inherit the HOME_NET from the Master?" 8 75
+ # Ask to inherit from manager
+ whiptail --title "Security Onion Setup" --yesno "Do you want to inherit the HOME_NET from the Manager?" 8 75
local exitstatus=$?
@@ -458,10 +458,10 @@ whiptail_install_type() {
if [[ $install_type == "DISTRIBUTED" ]]; then
install_type=$(whiptail --title "Security Onion Setup" --radiolist \
"Choose distributed node type:" 13 60 6 \
- "MASTER" "Start a new grid " ON \
+ "MANAGER" "Start a new grid " ON \
"SENSOR" "Create a forward only sensor " OFF \
"SEARCHNODE" "Add a search node with parsing " OFF \
- "MASTERSEARCH" "Master + search node " OFF \
+ "MANAGERSEARCH" "Manager + search node " OFF \
"FLEET" "Dedicated Fleet Osquery Node " OFF \
"HEAVYNODE" "Sensor + Search Node " OFF \
3>&1 1>&2 2>&3
@@ -599,26 +599,26 @@ whiptail_management_server() {
[ -n "$TESTING" ] && return
MSRV=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter your Master Server hostname. It is CASE SENSITIVE!" 10 75 XXXX 3>&1 1>&2 2>&3)
+ "Enter your Manager Server hostname. It is CASE SENSITIVE!" 10 75 XXXX 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
if ! getent hosts "$MSRV"; then
- add_master_hostfile
+ add_manager_hostfile
fi
}
-# Ask if you want to do advanced setup of the Master
-whiptail_master_adv() {
+# Ask if you want to do advanced setup of the Manager
+whiptail_manager_adv() {
[ -n "$TESTING" ] && return
- MASTERADV=$(whiptail --title "Security Onion Setup" --radiolist \
- "Choose what type of master install:" 20 75 4 \
- "BASIC" "Install master with recommended settings" ON \
- "ADVANCED" "Do additional configuration to the master" OFF 3>&1 1>&2 2>&3 )
+ MANAGERADV=$(whiptail --title "Security Onion Setup" --radiolist \
+ "Choose which type of manager to install:" 20 75 4 \
+ "BASIC" "Install manager with recommended settings" ON \
+ "ADVANCED" "Do additional configuration to the manager" OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -626,7 +626,7 @@ whiptail_master_adv() {
}
# Ask which additional components to install
-whiptail_master_adv_service_brologs() {
+whiptail_manager_adv_service_brologs() {
[ -n "$TESTING" ] && return
@@ -791,7 +791,7 @@ whiptail_patch_name_new_schedule() {
[ -n "$TESTING" ] && return
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
- "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -799,7 +799,7 @@ whiptail_patch_name_new_schedule() {
while [[ -z "$PATCHSCHEDULENAME" ]]; do
whiptail --title "Security Onion Setup" --msgbox "Please enter a name for this OS patch schedule." 8 75
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
- "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+ "What name do you want to give this OS patch schedule? This schedule needs to be named uniquely. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
done
@@ -850,7 +850,7 @@ whiptail_patch_schedule_import() {
unset PATCHSCHEDULENAME
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -858,7 +858,7 @@ whiptail_patch_schedule_import() {
while [[ -z "$PATCHSCHEDULENAME" ]]; do
whiptail --title "Security Onion Setup" --msgbox "Please enter a name for the OS patch schedule you want to inherit." 8 75
PATCHSCHEDULENAME=$(whiptail --title "Security Onion Setup" --inputbox \
- "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the master under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
+ "Enter the name of the OS patch schedule you want to inherit. Available schedules can be found on the manager under /opt/so/salt/patch/os/schedules/.yml" 10 75 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
@@ -944,7 +944,7 @@ whiptail_rule_setup() {
# Get pulled pork info
RULESETUP=$(whiptail --title "Security Onion Setup" --radiolist \
- "Which IDS ruleset would you like to use?\n\nThis master server is responsible for downloading the IDS ruleset from the Internet.\n\nSensors then pull a copy of this ruleset from the master server.\n\nIf you select a commercial ruleset, it is your responsibility to purchase enough licenses for all of your sensors in compliance with your vendor's policies." 20 75 4 \
+ "Which IDS ruleset would you like to use?\n\nThis manager server is responsible for downloading the IDS ruleset from the Internet.\n\nSensors then pull a copy of this ruleset from the manager server.\n\nIf you select a commercial ruleset, it is your responsibility to purchase enough licenses for all of your sensors in compliance with your vendor's policies." 20 75 4 \
"ETOPEN" "Emerging Threats Open" ON \
"ETPRO" "Emerging Threats PRO" OFF \
"TALOSET" "Snort Subscriber (Talos) and ET NoGPL rulesets" OFF \
@@ -1133,34 +1133,34 @@ whiptail_suricata_pins() {
}
-whiptail_master_updates() {
+whiptail_manager_updates() {
[ -n "$TESTING" ] && return
local update_string
update_string=$(whiptail --title "Security Onion Setup" --radiolist \
"How would you like to download OS package updates for your grid?:" 20 75 4 \
- "MASTER" "Master node is proxy for updates" ON \
+ "MANAGER" "Manager node is proxy for updates" ON \
"OPEN" "Each node connects to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
case "$update_string" in
- 'MASTER')
- MASTERUPDATES='1'
+ 'MANAGER')
+ export MANAGERUPDATES='1'
;;
*)
- MASTERUPDATES='0'
+ export MANAGERUPDATES='0'
;;
esac
}
-whiptail_master_updates_warning() {
+whiptail_manager_updates_warning() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup"\
- --msgbox "Updating through the master node requires the master to have internet access, press ENTER to continue"\
+ --msgbox "Updating through the manager node requires the manager to have internet access, press ENTER to continue."\
8 75
local exitstatus=$?
@@ -1173,7 +1173,7 @@ whiptail_node_updates() {
NODEUPDATES=$(whiptail --title "Security Onion Setup" --radiolist \
"How would you like to download OS package updates for your grid?:" 20 75 4 \
- "MASTER" "Master node is proxy for updates." ON \
+ "MANAGER" "Manager node is proxy for updates." ON \
"OPEN" "Each node connects to the Internet for updates" OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
diff --git a/upgrade/so-update-functions b/upgrade/so-update-functions
index 8b7fcd312..a0a4b0288 100644
--- a/upgrade/so-update-functions
+++ b/upgrade/so-update-functions
@@ -32,7 +32,7 @@ fi
HOSTNAME=$(hostname)
# List all the containers
-if [ $MASTERCHECK != 'so-helix' ]; then
+if [ $MANAGERCHECK != 'so-helix' ]; then
TRUSTED_CONTAINERS=( \
"so-acng:$BUILD$UPDATEVERSION" \
"so-thehive-cortex:$BUILD$UPDATEVERSION" \
@@ -136,13 +136,13 @@ detect_os() {
}
-master_check() {
- # Check to see if this is a master
- MASTERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
- if [ $MASTERCHECK == 'so-eval' OR $MASTERCHECK == 'so-master' OR $MASTERCHECK == 'so-mastersearch' ]; then
- echo "This is a master. We can proceed"
+manager_check() {
+ # Check to see if this is a manager
+ MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
+ if [ $MANAGERCHECK == 'so-eval' OR $MANAGERCHECK == 'so-manager' OR $MANAGERCHECK == 'so-managersearch' ]; then
+ echo "This is a manager. We can proceed"
else
- echo "Please run soup on the master. The master controls all updates."
+ echo "Please run soup on the manager. The manager controls all updates."
exit
}
diff --git a/upgrade/soup b/upgrade/soup
index 19fa0203f..068782f04 100644
--- a/upgrade/soup
+++ b/upgrade/soup
@@ -19,7 +19,7 @@ SCRIPTDIR=$(dirname "$0")
source $SCRIPTDIR/so-update-functions
# Update Packages
-master_check
+manager_check
update_all_packages
update_held_packages