mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Merge pull request #6604 from Security-Onion-Solutions/issue/6469
https://github.com/Security-Onion-Solutions/securityonion/issues/6469
This commit is contained in:
@@ -16,6 +16,7 @@ role:
|
||||
import:
|
||||
manager:
|
||||
managersearch:
|
||||
receiver:
|
||||
standalone:
|
||||
searchnode:
|
||||
sensor:
|
||||
sensor:
|
||||
|
||||
@@ -44,6 +44,10 @@ firewall:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
receiver:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
search_node:
|
||||
ips:
|
||||
delete:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'redis') %}
|
||||
logstash:
|
||||
pipelines:
|
||||
manager:
|
||||
|
||||
29
pillar/logstash/nodes.sls
Normal file
29
pillar/logstash/nodes.sls
Normal file
@@ -0,0 +1,29 @@
|
||||
{% set node_types = {} %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-node or G@role:so-heavynode or G@role:so-receiver or G@role:so-helix ',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='compound') | dictsort()
|
||||
%}
|
||||
{% set hostname = minionid.split('_')[0] %}
|
||||
{% set node_type = minionid.split('_')[1] %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
logstash:
|
||||
nodes:
|
||||
{% for node_type, values in node_types.items() %}
|
||||
{{node_type}}:
|
||||
{% for hostname, ip in values.items() %}
|
||||
{{hostname}}:
|
||||
ip: {{ip}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
9
pillar/logstash/receiver.sls
Normal file
9
pillar/logstash/receiver.sls
Normal file
@@ -0,0 +1,9 @@
|
||||
logstash:
|
||||
pipelines:
|
||||
receiver:
|
||||
config:
|
||||
- so/0009_input_beats.conf
|
||||
- so/0010_input_hhbeats.conf
|
||||
- so/0011_input_endgame.conf
|
||||
- so/9999_output_redis.conf.jinja
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'minio') %}
|
||||
logstash:
|
||||
pipelines:
|
||||
search:
|
||||
|
||||
33
pillar/node_data/ips.sls
Normal file
33
pillar/node_data/ips.sls
Normal file
@@ -0,0 +1,33 @@
|
||||
{% set node_types = {} %}
|
||||
{% set manage_alived = salt.saltutil.runner('manage.alived', show_ip=True) %}
|
||||
{% set manager = grains.master %}
|
||||
{% set manager_type = manager.split('_')|last %}
|
||||
{% for minionid, ip in salt.saltutil.runner('mine.get', tgt='*', fun='network.ip_addrs', tgt_type='glob') | dictsort() %}
|
||||
{% set hostname = minionid.split('_')[0] %}
|
||||
{% set node_type = minionid.split('_')[1] %}
|
||||
{% set is_alive = False %}
|
||||
{% if minionid in manage_alived.keys() %}
|
||||
{% if ip[0] == manage_alived[minionid] %}
|
||||
{% set is_alive = True %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
node_data:
|
||||
{% for node_type, host_values in node_types.items() %}
|
||||
{{node_type}}:
|
||||
{% for hostname, details in host_values.items() %}
|
||||
{{hostname}}:
|
||||
ip: {{details.ip}}
|
||||
alive: {{ details.alive }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -3,6 +3,9 @@ base:
|
||||
- patch.needs_restarting
|
||||
- logrotate
|
||||
|
||||
'* and not *_eval and not *_import':
|
||||
- logstash.nodes
|
||||
|
||||
'*_eval or *_helixsensor or *_heavynode or *_sensor or *_standalone or *_import':
|
||||
- match: compound
|
||||
- zeek
|
||||
@@ -104,6 +107,13 @@ base:
|
||||
- minions.{{ grains.id }}
|
||||
- data.nodestab
|
||||
|
||||
'*_receiver':
|
||||
- logstash
|
||||
- logstash.receiver
|
||||
- elasticsearch.auth
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
'*_import':
|
||||
- zeeklogs
|
||||
- secrets
|
||||
|
||||
@@ -50,7 +50,6 @@
|
||||
'learn'
|
||||
],
|
||||
'so-heavynode': [
|
||||
'ca',
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
@@ -80,7 +79,6 @@
|
||||
'docker_clean'
|
||||
],
|
||||
'so-fleet': [
|
||||
'ca',
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
@@ -157,7 +155,6 @@
|
||||
'learn'
|
||||
],
|
||||
'so-node': [
|
||||
'ca',
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
@@ -191,7 +188,6 @@
|
||||
'learn'
|
||||
],
|
||||
'so-sensor': [
|
||||
'ca',
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
@@ -205,9 +201,16 @@
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-receiver': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
],
|
||||
}, grain='role') %}
|
||||
|
||||
{% if FILEBEAT and grains.role in ['so-helixsensor', 'so-eval', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
||||
{% if FILEBEAT and grains.role in ['so-helixsensor', 'so-eval', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-import', 'so-receiver'] %}
|
||||
{% do allowed_states.append('filebeat') %}
|
||||
{% endif %}
|
||||
|
||||
@@ -215,7 +218,7 @@
|
||||
{% do allowed_states.append('mysql') %}
|
||||
{% endif %}
|
||||
|
||||
{% if (FLEETMANAGER or FLEETNODE) and grains.role in ['so-sensor', 'so-eval', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode'] %}
|
||||
{% if (FLEETMANAGER or FLEETNODE) and grains.role in ['so-sensor', 'so-eval', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('fleet.install_package') %}
|
||||
{% endif %}
|
||||
|
||||
@@ -235,7 +238,7 @@
|
||||
{% do allowed_states.append('strelka') %}
|
||||
{% endif %}
|
||||
|
||||
{% if WAZUH and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode']%}
|
||||
{% if WAZUH and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-receiver']%}
|
||||
{% do allowed_states.append('wazuh') %}
|
||||
{% endif %}
|
||||
|
||||
@@ -280,11 +283,11 @@
|
||||
{% do allowed_states.append('domainstats') %}
|
||||
{% endif %}
|
||||
|
||||
{% if LOGSTASH and grains.role in ['so-helixsensor', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode'] %}
|
||||
{% if LOGSTASH and grains.role in ['so-helixsensor', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('logstash') %}
|
||||
{% endif %}
|
||||
|
||||
{% if REDIS and grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode'] %}
|
||||
{% if REDIS and grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('redis') %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') -%}
|
||||
{% set NODEIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] -%}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') -%}
|
||||
{% set NODEIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] -%}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
@@ -321,7 +321,28 @@ output.logstash:
|
||||
enabled: true
|
||||
|
||||
# The Logstash hosts
|
||||
hosts: ["{{ MANAGER }}:5644"]
|
||||
hosts:
|
||||
{%- if grains.role in ['so-sensor', 'so-fleet', 'so-node'] %}
|
||||
{%- set LOGSTASH = namespace() %}
|
||||
{%- set LOGSTASH.count = 0 %}
|
||||
{%- set LOGSTASH.loadbalance = false %}
|
||||
{%- set node_data = salt['pillar.get']('logstash:nodes') %}
|
||||
{%- for node_type, node_details in node_data.items() | sort -%}
|
||||
{%- if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %}
|
||||
{%- for hostname in node_data[node_type].keys() %}
|
||||
{%- set LOGSTASH.count = LOGSTASH.count + 1 %}
|
||||
- "{{ hostname }}:5644" #{{ node_details[hostname].ip }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if LOGSTASH.count > 1 %}
|
||||
{%- set LOGSTASH.loadbalance = true %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
loadbalance: {{ LOGSTASH.loadbalance | lower }}
|
||||
{%- else %}
|
||||
- "{{ grains.host }}:5644"
|
||||
{%- endif %}
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
worker: {{ FBLSWORKERS }}
|
||||
|
||||
@@ -17,12 +17,10 @@
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set LOCALHOSTNAME = salt['grains.get']('host') %}
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') %}
|
||||
{% set LOCALHOSTIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
|
||||
{% from 'filebeat/map.jinja' import THIRDPARTY with context %}
|
||||
{% from 'filebeat/map.jinja' import SO with context %}
|
||||
{% from 'filebeat/map.jinja' import FILEBEAT_EXTRA_HOSTS with context %}
|
||||
{% set ES_INCLUDED_NODES = ['so-eval', 'so-standalone', 'so-managersearch', 'so-node', 'so-heavynode', 'so-import'] %}
|
||||
|
||||
include:
|
||||
@@ -111,7 +109,7 @@ so-filebeat:
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-filebeat:{{ VERSION }}
|
||||
- hostname: so-filebeat
|
||||
- user: root
|
||||
- extra_hosts: {{ MANAGER }}:{{ MANAGERIP }},{{ LOCALHOSTNAME }}:{{ LOCALHOSTIP }}
|
||||
- extra_hosts: {{ FILEBEAT_EXTRA_HOSTS }}
|
||||
- binds:
|
||||
- /nsm:/nsm:ro
|
||||
- /opt/so/log/filebeat:/usr/share/filebeat/logs:rw
|
||||
|
||||
@@ -4,3 +4,20 @@
|
||||
{% import_yaml 'filebeat/securityoniondefaults.yaml' as SODEFAULTS %}
|
||||
{% set SO = SODEFAULTS.securityonion_filebeat %}
|
||||
{#% set SO = salt['pillar.get']('filebeat:third_party_filebeat', default=SODEFAULTS.third_party_filebeat, merge=True) %#}
|
||||
|
||||
{% set role = grains.role %}
|
||||
{% set FILEBEAT_EXTRA_HOSTS = [] %}
|
||||
{% set mainint = salt['pillar.get']('host:mainint') %}
|
||||
{% set localhostip = salt['grains.get']('ip_interfaces').get(mainint)[0] %}
|
||||
{% if role in ['so-sensor', 'so-fleet', 'so-node' ] %}
|
||||
{% set node_data = salt['pillar.get']('logstash:nodes') %}
|
||||
{% for node_type, node_details in node_data.items() | sort %}
|
||||
{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %}
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% do FILEBEAT_EXTRA_HOSTS.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% do FILEBEAT_EXTRA_HOSTS.append({grains.host:localhostip}) %}
|
||||
|
||||
@@ -604,3 +604,52 @@ role:
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.salt_manager }}
|
||||
|
||||
receiver:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.syslog}}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
syslog:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog }}
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5044 }}
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5644 }}
|
||||
endgame:
|
||||
portgroups:
|
||||
- {{ portgroups.endgame }}
|
||||
wazuh_agent:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_agent }}
|
||||
wazuh_api:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_api }}
|
||||
wazuh_authd:
|
||||
portgroups:
|
||||
- {{ portgroups.wazuh_authd }}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- {{ portgroups.ssh }}
|
||||
dockernet:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
localhost:
|
||||
portgroups:
|
||||
- {{ portgroups.all }}
|
||||
|
||||
@@ -36,17 +36,13 @@
|
||||
{% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %}
|
||||
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
|
||||
|
||||
{% if grains.role in ['so-heavynode'] %}
|
||||
{% set EXTRAHOSTHOSTNAME = salt['grains.get']('host') %}
|
||||
{% set EXTRAHOSTIP = salt['pillar.get']('sensor:mainip') %}
|
||||
{% else %}
|
||||
{% set EXTRAHOSTHOSTNAME = MANAGER %}
|
||||
{% set EXTRAHOSTIP = MANAGERIP %}
|
||||
{% endif %}
|
||||
{% from 'logstash/map.jinja' import REDIS_NODES with context %}
|
||||
|
||||
include:
|
||||
- ssl
|
||||
{% if grains.role not in ['so-receiver'] %}
|
||||
- elasticsearch
|
||||
{% endif %}
|
||||
|
||||
# Create the logstash group
|
||||
logstashgroup:
|
||||
@@ -157,8 +153,7 @@ so-logstash:
|
||||
- hostname: so-logstash
|
||||
- name: so-logstash
|
||||
- user: logstash
|
||||
- extra_hosts:
|
||||
- {{ EXTRAHOSTHOSTNAME }}:{{ EXTRAHOSTIP }}
|
||||
- extra_hosts: {{ REDIS_NODES }}
|
||||
- environment:
|
||||
- LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }}
|
||||
- port_bindings:
|
||||
@@ -174,18 +169,20 @@ so-logstash:
|
||||
- /nsm/logstash:/usr/share/logstash/data:rw
|
||||
- /opt/so/log/logstash:/var/log/logstash:rw
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %}
|
||||
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
|
||||
{% if grains['role'] in ['so-manager', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
|
||||
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
|
||||
{% endif %}
|
||||
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
|
||||
{% if grains['role'] == 'so-heavynode' %}
|
||||
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% else %}
|
||||
{% if grains['role'] in ['so-manager', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% else %}
|
||||
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-manager', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-node'] %}
|
||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
||||
{% endif %}
|
||||
{%- if grains['role'] == 'so-eval' %}
|
||||
- /nsm/zeek:/nsm/zeek:ro
|
||||
- /nsm/suricata:/suricata:ro
|
||||
@@ -206,16 +203,18 @@ so-logstash:
|
||||
- file: es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}
|
||||
{% endfor %}
|
||||
- require:
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %}
|
||||
{% if grains['role'] in ['so-manager', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
- x509: etc_filebeat_crt
|
||||
{% endif %}
|
||||
{% if grains['role'] == 'so-heavynode' %}
|
||||
- x509: trusttheca
|
||||
{% else %}
|
||||
{% if grains['role'] in ['so-manager', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- x509: pki_public_ca_crt
|
||||
{% else %}
|
||||
- x509: trusttheca
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-manager', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- file: cacertz
|
||||
- file: capemz
|
||||
{% endif %}
|
||||
|
||||
append_so-logstash_so-status.conf:
|
||||
file.append:
|
||||
|
||||
16
salt/logstash/map.jinja
Normal file
16
salt/logstash/map.jinja
Normal file
@@ -0,0 +1,16 @@
|
||||
{% set role = grains.role %}
|
||||
{% set REDIS_NODES = [] %}
|
||||
{% set mainint = salt['pillar.get']('host:mainint') %}
|
||||
{% set localhostip = salt['grains.get']('ip_interfaces').get(mainint)[0] %}
|
||||
{% if role in ['so-node', 'so-standalone', 'so-managersearch'] %}
|
||||
{% set node_data = salt['pillar.get']('logstash:nodes') %}
|
||||
{% for node_type, node_details in node_data.items() | sort %}
|
||||
{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %}
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% do REDIS_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
{% do REDIS_NODES.append({grains.host:localhostip}) %}
|
||||
{% endif %}
|
||||
@@ -8,4 +8,4 @@ filter {
|
||||
mutate {
|
||||
rename => {"@metadata" => "metadata"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
{%- if grains.role in ['so-heavynode'] %}
|
||||
{%- set HOST = salt['grains.get']('host') %}
|
||||
{%- else %}
|
||||
{%- set HOST = salt['grains.get']('master') %}
|
||||
{%- endif %}
|
||||
{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %}
|
||||
{%- set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %}
|
||||
{% set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') -%}
|
||||
{% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) -%}
|
||||
{% from 'logstash/map.jinja' import REDIS_NODES with context -%}
|
||||
|
||||
{% for index in range(REDIS_NODES|length) -%}
|
||||
{% for host in REDIS_NODES[index] -%}
|
||||
input {
|
||||
redis {
|
||||
host => '{{ HOST }}'
|
||||
port => 9696
|
||||
ssl => true
|
||||
data_type => 'list'
|
||||
key => 'logstash:unparsed'
|
||||
type => 'redis-input'
|
||||
threads => {{ THREADS }}
|
||||
batch_count => {{ BATCH }}
|
||||
}
|
||||
redis {
|
||||
host => '{{ host }}'
|
||||
port => 9696
|
||||
ssl => true
|
||||
data_type => 'list'
|
||||
key => 'logstash:unparsed'
|
||||
type => 'redis-input'
|
||||
threads => {{ THREADS }}
|
||||
batch_count => {{ BATCH }}
|
||||
}
|
||||
}
|
||||
{% endfor %}
|
||||
{% endfor -%}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{%- if grains.role in ['so-heavynode'] %}
|
||||
{%- if grains.role in ['so-heavynode', 'so-receiver'] %}
|
||||
{%- set HOST = salt['grains.get']('host') %}
|
||||
{%- else %}
|
||||
{%- set HOST = salt['grains.get']('master') %}
|
||||
|
||||
@@ -66,7 +66,11 @@ so-redis:
|
||||
- /opt/so/conf/redis/working:/redis:rw
|
||||
- /etc/pki/redis.crt:/certs/redis.crt:ro
|
||||
- /etc/pki/redis.key:/certs/redis.key:ro
|
||||
{% if grains['role'] in ['so-manager', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- /etc/pki/ca.crt:/certs/ca.crt:ro
|
||||
{% else %}
|
||||
- /etc/ssl/certs/intca.crt:/certs/ca.crt:ro
|
||||
{% endif %}
|
||||
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
|
||||
- watch:
|
||||
- file: /opt/so/conf/redis/etc
|
||||
@@ -74,7 +78,11 @@ so-redis:
|
||||
- file: redisconf
|
||||
- x509: redis_crt
|
||||
- x509: redis_key
|
||||
{% if grains['role'] in ['so-manager', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- x509: pki_public_ca_crt
|
||||
{% else %}
|
||||
- x509: trusttheca
|
||||
{% endif %}
|
||||
|
||||
append_so-redis_so-status.conf:
|
||||
file.append:
|
||||
|
||||
4
salt/salt/etc/minion.d/mine_functions.conf
Normal file
4
salt/salt/etc/minion.d/mine_functions.conf
Normal file
@@ -0,0 +1,4 @@
|
||||
mine_interval: 35
|
||||
mine_functions:
|
||||
network.ip_addrs:
|
||||
- interface: {{ pillar.host.mainint }}
|
||||
@@ -76,14 +76,23 @@ salt_minion_service_unit_file:
|
||||
- module: systemd_reload
|
||||
- listen_in:
|
||||
- service: salt_minion_service
|
||||
|
||||
{% endif %}
|
||||
|
||||
mine_functions:
|
||||
file.managed:
|
||||
- name: /etc/salt/minion.d/mine_functions.conf
|
||||
- source: salt://salt/etc/minion.d/mine_functions.conf
|
||||
- template: jinja
|
||||
|
||||
# this has to be outside the if statement above since there are <requisite>_in calls to this state
|
||||
salt_minion_service:
|
||||
service.running:
|
||||
- name: salt-minion
|
||||
- enable: True
|
||||
- onlyif: test "{{INSTALLEDSALTVERSION}}" == "{{SALTVERSION}}"
|
||||
- watch:
|
||||
- file: mine_functions
|
||||
|
||||
patch_pkg:
|
||||
pkg.installed:
|
||||
|
||||
@@ -18,6 +18,8 @@
|
||||
{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import', 'helixsensor'] %}
|
||||
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
|
||||
{% set ca_server = grains.id %}
|
||||
include:
|
||||
- ca
|
||||
{% else %}
|
||||
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %}
|
||||
{% for host in x509dict %}
|
||||
@@ -30,9 +32,6 @@
|
||||
{% set ca_server = global_ca_server[0] %}
|
||||
{% endif %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
|
||||
# Trust the CA
|
||||
trusttheca:
|
||||
x509.pem_managed:
|
||||
@@ -70,7 +69,7 @@ removeesp12dir:
|
||||
influxdb_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/influxdb.key
|
||||
- CN: {{ manager }}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- bits: 4096
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
@@ -92,8 +91,8 @@ influxdb_crt:
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: influxdb
|
||||
- public_key: /etc/pki/influxdb.key
|
||||
- CN: {{ manager }}
|
||||
- subjectAltName: DNS:{{ manager }}, IP:{{ managerip }}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
@@ -115,12 +114,12 @@ influxkeyperms:
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet'] %}
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||
# Create a cert for Redis encryption
|
||||
redis_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/redis.key
|
||||
- CN: {{ COMMONNAME }}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- bits: 4096
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
@@ -139,9 +138,10 @@ redis_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/redis.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }}
|
||||
- signing_policy: registry
|
||||
- public_key: /etc/pki/redis.key
|
||||
- CN: {{ COMMONNAME }}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
@@ -164,7 +164,7 @@ rediskeyperms:
|
||||
- group: 939
|
||||
{% endif %}
|
||||
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode'] %}
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-helix', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
etc_filebeat_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/filebeat.key
|
||||
@@ -190,7 +190,8 @@ etc_filebeat_crt:
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: filebeat
|
||||
- public_key: /etc/pki/filebeat.key
|
||||
- CN: {{ COMMONNAME }}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
@@ -209,7 +210,6 @@ etc_filebeat_crt:
|
||||
- onchanges:
|
||||
- x509: etc_filebeat_key
|
||||
|
||||
|
||||
fbperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
@@ -225,7 +225,7 @@ chownilogstashfilebeatp8:
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
{% if grains.role != 'so-heavynode' %}
|
||||
{% if grains.role not in ['so-heavynode', 'so-receiver'] %}
|
||||
# Create Symlinks to the keys so I can distribute it to all the things
|
||||
filebeatdir:
|
||||
file.directory:
|
||||
@@ -292,51 +292,8 @@ regkeyperms:
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
minio_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/minio.key
|
||||
- CN: {{ manager }}
|
||||
- bits: 4096
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/minio.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/minio.crt
|
||||
{%- endif %}
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
# Create a cert for minio
|
||||
minio_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/minio.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: registry
|
||||
- public_key: /etc/pki/minio.key
|
||||
- CN: {{ manager }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- unless:
|
||||
# https://github.com/saltstack/salt/issues/52167
|
||||
# Will trigger 5 days (432000 sec) from cert expiration
|
||||
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/minio.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
miniokeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/minio.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
{% endif %}
|
||||
{% if grains.role not in ['so-receiver'] %}
|
||||
# Create a cert for elasticsearch
|
||||
/etc/pki/elasticsearch.key:
|
||||
x509.private_key_managed:
|
||||
@@ -360,7 +317,8 @@ miniokeyperms:
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: registry
|
||||
- public_key: /etc/pki/elasticsearch.key
|
||||
- CN: {{ COMMONNAME }}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
@@ -379,7 +337,7 @@ miniokeyperms:
|
||||
- onchanges:
|
||||
- x509: /etc/pki/elasticsearch.key
|
||||
|
||||
ealstickeyperms:
|
||||
elastickeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticsearch.key
|
||||
@@ -418,7 +376,7 @@ managerssl_crt:
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: managerssl
|
||||
- public_key: /etc/pki/managerssl.key
|
||||
- CN: {{ manager }}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }} {% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }} {% endif %}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
@@ -439,11 +397,13 @@ msslkeyperms:
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% endif %}
|
||||
|
||||
# Create a private key and cert for OSQuery
|
||||
fleet_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/fleet.key
|
||||
- CN: {{ manager }}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- bits: 4096
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
@@ -462,8 +422,8 @@ fleet_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/fleet.crt
|
||||
- signing_private_key: /etc/pki/fleet.key
|
||||
- CN: {{ manager }}
|
||||
- subjectAltName: DNS:{{ manager }},IP:{{ managerip }}{% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }}{% endif %}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }},IP:{{ MAINIP }}{% if CUSTOM_FLEET_HOSTNAME != None %},DNS:{{ CUSTOM_FLEET_HOSTNAME }}{% endif %}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
@@ -484,7 +444,8 @@ fleetkeyperms:
|
||||
- group: 939
|
||||
|
||||
{% endif %}
|
||||
{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import'] %}
|
||||
|
||||
{% if grains['role'] in ['so-sensor', 'so-manager', 'so-node', 'so-eval', 'so-helix', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-import', 'so-receiver'] %}
|
||||
|
||||
fbcertdir:
|
||||
file.directory:
|
||||
@@ -516,7 +477,7 @@ conf_filebeat_crt:
|
||||
- ca_server: {{ ca_server }}
|
||||
- signing_policy: filebeat
|
||||
- public_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
|
||||
- CN: {{ COMMONNAME }}
|
||||
- CN: {{ HOSTNAME }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
@@ -675,6 +636,7 @@ fleetkeyperms:
|
||||
- signing_policy: registry
|
||||
- public_key: /etc/pki/elasticsearch.key
|
||||
- CN: {{ HOSTNAME }}
|
||||
- subjectAltName: DNS:{{ HOSTNAME }}, IP:{{ MAINIP }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
|
||||
28
salt/top.sls
28
salt/top.sls
@@ -63,7 +63,6 @@ base:
|
||||
|
||||
'*_sensor and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- ca
|
||||
- ssl
|
||||
- sensoroni
|
||||
- telegraf
|
||||
@@ -298,7 +297,6 @@ base:
|
||||
|
||||
'*_searchnode and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- ca
|
||||
- ssl
|
||||
- sensoroni
|
||||
- nginx
|
||||
@@ -391,7 +389,6 @@ base:
|
||||
|
||||
'*_heavynode and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- ca
|
||||
- ssl
|
||||
- sensoroni
|
||||
- nginx
|
||||
@@ -433,7 +430,6 @@ base:
|
||||
|
||||
'*_fleet and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- ca
|
||||
- ssl
|
||||
- sensoroni
|
||||
- nginx
|
||||
@@ -478,3 +474,27 @@ base:
|
||||
- docker_clean
|
||||
- pipeline.load
|
||||
- learn
|
||||
|
||||
'*_receiver and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- ssl
|
||||
- sensoroni
|
||||
- telegraf
|
||||
- firewall
|
||||
{%- if WAZUH != 0 %}
|
||||
- wazuh
|
||||
{%- endif %}
|
||||
{%- if LOGSTASH %}
|
||||
- logstash
|
||||
{%- endif %}
|
||||
{%- if REDIS %}
|
||||
- redis
|
||||
{%- endif %}
|
||||
{%- if FILEBEAT %}
|
||||
- filebeat
|
||||
{%- endif %}
|
||||
{%- if FLEETMANAGER or FLEETNODE %}
|
||||
- fleet.install_package
|
||||
{%- endif %}
|
||||
- schedule
|
||||
- docker_clean
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
|
||||
{%- set ip = salt['pillar.get']('global:managerip', '') %}
|
||||
{%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
|
||||
{%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %}
|
||||
{%- elif grains['role'] == 'so-sensor' %}
|
||||
{%- set ip = salt['pillar.get']('sensor:mainip', '') %}
|
||||
{%- endif %}
|
||||
{% set mainint = salt['pillar.get']('host:mainint') -%}
|
||||
{% set ip = salt['grains.get']('ip_interfaces').get(mainint)[0] -%}
|
||||
|
||||
<!--
|
||||
Wazuh - Agent Configuration
|
||||
More info at: https://documentation.wazuh.com
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
|
||||
{%- set ip = salt['pillar.get']('global:managerip', '') %}
|
||||
{%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
|
||||
{%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %}
|
||||
{%- elif grains['role'] == 'so-sensor' %}
|
||||
{%- set ip = salt['pillar.get']('sensor:mainip', '') %}
|
||||
{%- endif %}
|
||||
{% set mainint = salt['pillar.get']('host:mainint') -%}
|
||||
{% set ip = salt['grains.get']('ip_interfaces').get(mainint)[0] -%}
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
###
|
||||
|
||||
@@ -77,7 +77,7 @@ accept_salt_key_remote() {
|
||||
echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1
|
||||
# Delete the key just in case.
|
||||
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y
|
||||
salt-call state.apply ca >> /dev/null 2>&1
|
||||
salt-call state.show_top >> /dev/null 2>&1
|
||||
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -a "$MINION_ID" -y
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ addtotab_generate_templates() {
|
||||
|
||||
local addtotab_path=$local_salt_dir/pillar/data
|
||||
|
||||
for i in evaltab managersearchtab managertab nodestab sensorstab standalonetab; do
|
||||
for i in evaltab managersearchtab managertab nodestab sensorstab standalonetab receiverstab; do
|
||||
printf '%s\n'\
|
||||
"$i:"\
|
||||
"" > "$addtotab_path"/$i.sls
|
||||
@@ -1327,15 +1327,6 @@ elasticsearch_pillar() {
|
||||
" log_size_limit: $log_size_limit"\
|
||||
" node_route_type: 'hot'"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
printf '%s\n'\
|
||||
"logstash_settings:"\
|
||||
" ls_pipeline_batch_size: $LSPIPELINEBATCH"\
|
||||
" ls_input_threads: $LSINPUTTHREADS"\
|
||||
" lsheap: $NODE_LS_HEAP_SIZE"\
|
||||
" ls_pipeline_workers: $num_cpu_cores"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
}
|
||||
|
||||
es_heapsize() {
|
||||
@@ -1490,7 +1481,7 @@ get_redirect() {
|
||||
get_minion_type() {
|
||||
local minion_type
|
||||
case "$install_type" in
|
||||
'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE' | 'IMPORT')
|
||||
'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'FLEET' | 'STANDALONE' | 'IMPORT' | 'RECEIVER')
|
||||
minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]')
|
||||
;;
|
||||
'HELIXSENSOR')
|
||||
@@ -1554,6 +1545,20 @@ import_registry_docker() {
|
||||
fi
|
||||
}
|
||||
|
||||
logstash_pillar() {
|
||||
|
||||
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
|
||||
|
||||
# Create the logstash pillar
|
||||
printf '%s\n'\
|
||||
"logstash_settings:"\
|
||||
" ls_pipeline_batch_size: $LSPIPELINEBATCH"\
|
||||
" ls_input_threads: $LSINPUTTHREADS"\
|
||||
" lsheap: $NODE_LS_HEAP_SIZE"\
|
||||
" ls_pipeline_workers: $num_cpu_cores"\
|
||||
"" >> "$pillar_file"
|
||||
}
|
||||
|
||||
# Set Logstash heap size based on total memory
|
||||
ls_heapsize() {
|
||||
|
||||
@@ -2345,13 +2350,13 @@ salt_checkin() {
|
||||
;;
|
||||
*)
|
||||
{
|
||||
salt-call state.apply ca;
|
||||
#salt-call state.apply ca;
|
||||
salt-call state.apply ssl;
|
||||
} >> "$setup_log" 2>&1
|
||||
;;
|
||||
esac
|
||||
{
|
||||
salt-call state.apply ca;
|
||||
#salt-call state.apply ca;
|
||||
salt-call state.apply ssl;
|
||||
salt-call saltutil.sync_modules;
|
||||
} >> "$setup_log" 2>&1
|
||||
@@ -2407,11 +2412,6 @@ securityonion_repo() {
|
||||
fi
|
||||
}
|
||||
|
||||
set_base_heapsizes() {
|
||||
es_heapsize
|
||||
ls_heapsize
|
||||
}
|
||||
|
||||
set_network_dev_status_list() {
|
||||
readarray -t nmcli_dev_status_list <<< "$(nmcli -t -f DEVICE,STATE -c no dev status)"
|
||||
export nmcli_dev_status_list
|
||||
@@ -2665,7 +2665,7 @@ set_initial_firewall_policy() {
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
||||
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
|
||||
;;
|
||||
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET')
|
||||
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET' | 'RECEIVER')
|
||||
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
|
||||
case "$install_type" in
|
||||
'SENSOR')
|
||||
@@ -2685,6 +2685,9 @@ set_initial_firewall_policy() {
|
||||
'FLEET')
|
||||
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost beats_endpoint_ssl "$MAINIP"
|
||||
;;
|
||||
'RECEIVER')
|
||||
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost receiver "$MAINIP"
|
||||
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh receiverstab "$MINION_ID" "$MAINIP"
|
||||
esac
|
||||
;;
|
||||
'PARSINGNODE')
|
||||
|
||||
@@ -255,6 +255,9 @@ elif [ "$install_type" = 'HELIXSENSOR' ]; then
|
||||
is_helix=true
|
||||
elif [ "$install_type" = 'IMPORT' ]; then
|
||||
is_import=true
|
||||
elif [ "$install_type" = 'RECEIVER' ]; then
|
||||
is_minion=true
|
||||
is_receiver=true
|
||||
elif [ "$install_type" = 'ANALYST' ]; then
|
||||
cd .. || exit 255
|
||||
exec bash so-analyst-install
|
||||
@@ -461,8 +464,14 @@ if [[ $is_helix || $is_manager || $is_import ]]; then
|
||||
collect_homenet_mngr
|
||||
fi
|
||||
|
||||
#set base elasticsearch heap size
|
||||
if [[ $is_helix || $is_manager || $is_node || $is_import ]]; then
|
||||
set_base_heapsizes
|
||||
es_heapsize
|
||||
fi
|
||||
|
||||
#set base logstash heap size
|
||||
if [[ $is_helix || $is_manager || $is_node || $is_import || $is_receiver ]]; then
|
||||
ls_heapsize
|
||||
fi
|
||||
|
||||
if [[ $is_manager && ! $is_eval ]]; then
|
||||
@@ -541,17 +550,21 @@ fi
|
||||
|
||||
[[ $is_iso ]] && collect_ntp_servers
|
||||
|
||||
if [[ $is_node && ! $is_eval ]]; then
|
||||
if [[ ($is_node || $is_receiver) && ! $is_eval ]]; then
|
||||
whiptail_node_advanced
|
||||
if [ "$NODESETUP" == 'NODEADVANCED' ]; then
|
||||
collect_node_es_heap
|
||||
if [[ ! $is_receiver ]]; then
|
||||
collect_node_es_heap
|
||||
collect_es_space_limit
|
||||
fi
|
||||
collect_node_ls_heap
|
||||
collect_node_ls_pipeline_worker_count
|
||||
collect_node_ls_pipeline_batch_size
|
||||
collect_node_ls_input
|
||||
collect_es_space_limit
|
||||
else
|
||||
NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE
|
||||
if [[ ! $is_receiver ]]; then
|
||||
NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE
|
||||
fi
|
||||
NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE
|
||||
LSPIPELINEWORKERS=$num_cpu_cores
|
||||
LSPIPELINEBATCH=125
|
||||
@@ -724,6 +737,12 @@ echo "1" > /root/accept_changes
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ($is_node || $is_receiver) && !($is_manager || $is_helix) ]]; then
|
||||
set_progress_str 19 'Generating logstash pillar'
|
||||
logstash_pillar >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
|
||||
if [[ $is_minion ]]; then
|
||||
set_progress_str 20 'Accepting Salt key on manager'
|
||||
retry 20 10 accept_salt_key_remote "going to be accepted" >> $setup_log 2>&1
|
||||
@@ -781,7 +800,7 @@ echo "1" > /root/accept_changes
|
||||
set_progress_str 62 "$(print_salt_state_apply 'common')"
|
||||
salt-call state.apply -l info common >> $setup_log 2>&1
|
||||
|
||||
if [[ ! $is_helix ]]; then
|
||||
if [[ ! $is_helix && ! $is_receiver ]]; then
|
||||
set_progress_str 62 "$(print_salt_state_apply 'nginx')"
|
||||
salt-call state.apply -l info nginx >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
@@ -563,19 +563,23 @@ whiptail_end_settings() {
|
||||
|
||||
if [[ $NODESETUP == 'NODEADVANCED' ]]; then
|
||||
__append_end_msg "Advanced Node Settings:"
|
||||
__append_end_msg " Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
|
||||
if [[ ! $is_receiver ]]; then
|
||||
__append_end_msg " Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
|
||||
__append_end_msg " Elasticsearch Storage Space: ${log_size_limit}GB"
|
||||
fi
|
||||
__append_end_msg " Logstash Heap Size: $NODE_LS_HEAP_SIZE"
|
||||
__append_end_msg " Logstash Worker Count: $LSPIPELINEWORKERS"
|
||||
__append_end_msg " Logstash Batch Size: $LSPIPELINEBATCH"
|
||||
__append_end_msg " Logstash Input Threads: $LSINPUTTHREADS"
|
||||
__append_end_msg " Elasticsearch Storage Space: ${log_size_limit}GB"
|
||||
else
|
||||
__append_end_msg "Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
|
||||
if [[ ! $is_receiver ]]; then
|
||||
__append_end_msg "Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
|
||||
__append_end_msg "Elasticsearch Storage Space: ${log_size_limit}GB"
|
||||
fi
|
||||
__append_end_msg "Logstash Heap Size: $NODE_LS_HEAP_SIZE"
|
||||
__append_end_msg "Logstash Worker Count: $LSPIPELINEWORKERS"
|
||||
__append_end_msg "Logstash Batch Size: $LSPIPELINEBATCH"
|
||||
__append_end_msg "Logstash Input Threads: $LSINPUTTHREADS"
|
||||
__append_end_msg "Elasticsearch Storage Space: ${log_size_limit}GB"
|
||||
fi
|
||||
|
||||
|
||||
@@ -796,11 +800,12 @@ whiptail_install_type_dist_existing() {
|
||||
Note: Heavy nodes (HEAVYNODE) are NOT recommended for most users.
|
||||
EOM
|
||||
|
||||
install_type=$(whiptail --title "$whiptail_title" --radiolist "$node_msg" 18 58 4 \
|
||||
install_type=$(whiptail --title "$whiptail_title" --radiolist "$node_msg" 18 58 5 \
|
||||
"SENSOR" "Create a forward only sensor " ON \
|
||||
"SEARCHNODE" "Add a search node with parsing " OFF \
|
||||
"FLEET" "Dedicated Fleet Osquery Node " OFF \
|
||||
"HEAVYNODE" "Sensor + Search Node " OFF \
|
||||
"RECEIVER" "Receiver Node " OFF \
|
||||
3>&1 1>&2 2>&3
|
||||
# "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" OFF \ # TODO
|
||||
# "WARMNODE" "Add Warm Node to existing Hot or Search node" OFF \ # TODO
|
||||
|
||||
Reference in New Issue
Block a user