mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
MANAGERHYPE setup is now complete!
This commit is contained in:
@@ -27,7 +27,7 @@ base:
|
||||
- nginx.adv_nginx
|
||||
- node_data.ips
|
||||
|
||||
'*_manager or *_managersearch':
|
||||
'*_manager or *_managersearch or *_managerhype':
|
||||
- match: compound
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
|
||||
@@ -7,271 +7,174 @@
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
||||
{% set saltversion = saltversion.salt.minion.version %}
|
||||
|
||||
{# this is the list we are returning from this map file, it gets built below #}
|
||||
{% set allowed_states= [] %}
|
||||
{# Define common state groups to reduce redundancy #}
|
||||
{% set base_states = [
|
||||
'common',
|
||||
'patch.os.schedule',
|
||||
'motd',
|
||||
'salt.minion-check',
|
||||
'sensoroni',
|
||||
'salt.lasthighstate',
|
||||
'salt.minion'
|
||||
] %}
|
||||
|
||||
{% set ssl_states = [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
] %}
|
||||
|
||||
{% set manager_states = [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility'
|
||||
] %}
|
||||
|
||||
{% set sensor_states = [
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'tcpreplay'
|
||||
] %}
|
||||
|
||||
{% set kafka_states = [
|
||||
'kafka'
|
||||
] %}
|
||||
|
||||
{% set stig_states = [
|
||||
'stig'
|
||||
] %}
|
||||
|
||||
{% set elastic_stack_states = [
|
||||
'elasticsearch',
|
||||
'elasticsearch.auth',
|
||||
'kibana',
|
||||
'kibana.secrets',
|
||||
'elastalert',
|
||||
'logstash',
|
||||
'redis'
|
||||
] %}
|
||||
|
||||
{# Initialize the allowed_states list #}
|
||||
{% set allowed_states = [] %}
|
||||
|
||||
{% if grains.saltversion | string == saltversion | string %}
|
||||
{# Map role-specific states #}
|
||||
{% set role_states = {
|
||||
'so-eval': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-heavynode': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['elasticagent', 'zeek', 'strelka'] +
|
||||
['elasticsearch', 'logstash', 'redis'] +
|
||||
stig_states
|
||||
),
|
||||
'so-idh': (
|
||||
ssl_states +
|
||||
['idh']
|
||||
),
|
||||
'so-import': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states +
|
||||
['zeek'] +
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets']
|
||||
),
|
||||
'so-manager': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managerhype': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managersearch': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['strelka.manager'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-searchnode': (
|
||||
ssl_states +
|
||||
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash'] +
|
||||
stig_states
|
||||
),
|
||||
'so-standalone': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states +
|
||||
['zeek', 'strelka']
|
||||
),
|
||||
'so-sensor': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['nginx', 'zeek', 'strelka'] +
|
||||
stig_states
|
||||
),
|
||||
'so-fleet': (
|
||||
ssl_states +
|
||||
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
|
||||
),
|
||||
'so-receiver': (
|
||||
ssl_states +
|
||||
kafka_states +
|
||||
stig_states +
|
||||
['logstash', 'redis']
|
||||
),
|
||||
'so-hypervisor': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['hypervisor', 'libvirt']
|
||||
),
|
||||
'so-desktop': (
|
||||
['ssl', 'docker_clean', 'telegraf'] +
|
||||
stig_states
|
||||
)
|
||||
} %}
|
||||
|
||||
{% set allowed_states= salt['grains.filter_by']({
|
||||
'so-eval': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'healthcheck',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'utility',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-heavynode': [
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'elasticagent',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-idh': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'idh',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-import': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'influxdb',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'pcap',
|
||||
'utility',
|
||||
'suricata',
|
||||
'zeek',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry'
|
||||
],
|
||||
'so-manager': [
|
||||
'salt.master',
|
||||
'salt.cloud',
|
||||
'libvirt.packages',
|
||||
'libvirt.ssh.users',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-managersearch': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'manager',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-searchnode': [
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka.ca',
|
||||
'kafka.ssl'
|
||||
],
|
||||
'so-standalone': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'utility',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-sensor': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'nginx',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
],
|
||||
'so-fleet': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'logstash',
|
||||
'nginx',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'elasticfleet',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-receiver': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'kafka',
|
||||
'stig'
|
||||
],
|
||||
'so-hypervisor': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'hypervisor',
|
||||
'libvirt'
|
||||
],
|
||||
'so-desktop': [
|
||||
'ssl',
|
||||
'docker_clean',
|
||||
'telegraf',
|
||||
'stig'
|
||||
],
|
||||
}, grain='role') %}
|
||||
|
||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('zeek') %}
|
||||
{%- endif %}
|
||||
|
||||
{% if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('strelka') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch.auth') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('kibana') %}
|
||||
{% do allowed_states.append('kibana.secrets') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% do allowed_states.append('elastalert') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('logstash') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver', 'so-eval'] %}
|
||||
{% do allowed_states.append('redis') %}
|
||||
{% endif %}
|
||||
|
||||
{# all nodes on the right salt version can run the following states #}
|
||||
{% do allowed_states.append('common') %}
|
||||
{% do allowed_states.append('patch.os.schedule') %}
|
||||
{% do allowed_states.append('motd') %}
|
||||
{% do allowed_states.append('salt.minion-check') %}
|
||||
{% do allowed_states.append('sensoroni') %}
|
||||
{% do allowed_states.append('salt.lasthighstate') %}
|
||||
{# Get states for the current role #}
|
||||
{% if grains.role in role_states %}
|
||||
{% set allowed_states = role_states[grains.role] %}
|
||||
{% endif %}
|
||||
|
||||
{# Add base states that apply to all roles #}
|
||||
{% for state in base_states %}
|
||||
{% do allowed_states.append(state) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
{# Add airgap state if needed #}
|
||||
{% if ISAIRGAP %}
|
||||
{% do allowed_states.append('airgap') %}
|
||||
{% do allowed_states.append('airgap') %}
|
||||
{% endif %}
|
||||
|
||||
{# all nodes can always run salt.minion state #}
|
||||
{% do allowed_states.append('salt.minion') %}
|
||||
|
||||
@@ -106,7 +106,7 @@ Etc/UTC:
|
||||
timezone.system
|
||||
|
||||
# Sync curl configuration for Elasticsearch authentication
|
||||
{% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-searchnode'] %}
|
||||
elastic_curl_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/curl.config
|
||||
|
||||
@@ -166,7 +166,7 @@ eaoptionalintegrationsdir:
|
||||
|
||||
{% for minion in node_data %}
|
||||
{% set role = node_data[minion]["role"] %}
|
||||
{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
|
||||
{% if role in [ "eval","fleet","heavynode","import","manager", "managerhype", "managersearch","standalone" ] %}
|
||||
{% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %}
|
||||
{% set integration_keys = optional_integrations.keys() %}
|
||||
fleet_server_integrations_{{ minion }}:
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
|
||||
{% if grains.id.split('_') | last in ['manager','managerhype','managersearch','standalone'] %}
|
||||
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
||||
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
||||
|
||||
@@ -4479,6 +4479,14 @@ elasticsearch:
|
||||
- data
|
||||
- remote_cluster_client
|
||||
- transform
|
||||
so-managerhype:
|
||||
config:
|
||||
node:
|
||||
roles:
|
||||
- master
|
||||
- data
|
||||
- remote_cluster_client
|
||||
- transform
|
||||
so-managersearch:
|
||||
config:
|
||||
node:
|
||||
|
||||
@@ -204,7 +204,7 @@ so-elasticsearch-roles-load:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
{% if GLOBALS.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager', 'so-managerhype'] %}
|
||||
{% if ELASTICSEARCHMERGED.index_clean %}
|
||||
{% set ap = "present" %}
|
||||
{% else %}
|
||||
|
||||
@@ -19,6 +19,7 @@ firewall:
|
||||
localhost:
|
||||
- 127.0.0.1
|
||||
manager: []
|
||||
managerhype: []
|
||||
managersearch: []
|
||||
receiver: []
|
||||
searchnode: []
|
||||
@@ -573,6 +574,215 @@ firewall:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
managerhype:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
managerhype:
|
||||
portgroups:
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- docker_registry
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- localrules
|
||||
- sensoroni
|
||||
fleet:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- beats_5056
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- beats_5644
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
heavynode:
|
||||
portgroups:
|
||||
- redis
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- beats_5644
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
receiver:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- beats_5644
|
||||
elasticsearch_rest:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
elastic_agent_endpoint:
|
||||
portgroups:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
desktop:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- ssh
|
||||
dockernet:
|
||||
portgroups:
|
||||
- all
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
sensor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
searchnode:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
heavynode:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
managersearch:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
|
||||
@@ -12,6 +12,8 @@ logstash:
|
||||
- search
|
||||
manager:
|
||||
- manager
|
||||
managerhype:
|
||||
- manager
|
||||
managersearch:
|
||||
- manager
|
||||
- search
|
||||
|
||||
@@ -16,7 +16,7 @@ include:
|
||||
- elasticsearch.ca
|
||||
{% endif %}
|
||||
{# Kafka ca runs on nodes that can run logstash for Kafka input / output. Only when Kafka is global pipeline #}
|
||||
{% if GLOBALS.role in ['so-searchnode', 'so-manager', 'so-managersearch', 'so-receiver', 'so-standalone'] and GLOBALS.pipeline == 'KAFKA' %}
|
||||
{% if GLOBALS.role in ['so-searchnode', 'so-manager', 'so-managerhype', 'so-managersearch', 'so-receiver', 'so-standalone'] and GLOBALS.pipeline == 'KAFKA' %}
|
||||
- kafka.ca
|
||||
- kafka.ssl
|
||||
{% endif %}
|
||||
@@ -65,26 +65,26 @@ so-logstash:
|
||||
- /opt/so/log/logstash:/var/log/logstash:rw
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
- /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
|
||||
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-eval','so-fleet', 'so-heavynode', 'so-receiver'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %}
|
||||
- /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
|
||||
- /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro
|
||||
- /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro
|
||||
- /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% else %}
|
||||
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == "KAFKA" and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
{% if GLOBALS.pipeline == "KAFKA" and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
|
||||
- /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro
|
||||
{% endif %}
|
||||
@@ -100,7 +100,7 @@ so-logstash:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-fleet', 'so-receiver'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-receiver'] %}
|
||||
- x509: etc_elasticfleet_logstash_key
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
{% endif %}
|
||||
@@ -111,23 +111,23 @@ so-logstash:
|
||||
- file: ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- file: kafkacertz
|
||||
{% endif %}
|
||||
- require:
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
{% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
- x509: etc_filebeat_crt
|
||||
{% endif %}
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
{% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- x509: pki_public_ca_crt
|
||||
{% else %}
|
||||
- x509: trusttheca
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
{% if grains.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %}
|
||||
- file: cacertz
|
||||
- file: capemz
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- file: kafkacertz
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -878,6 +878,23 @@ function createHYPERVISOR() {
|
||||
add_telegraf_to_minion || return 1
|
||||
}
|
||||
|
||||
function createMANAGERHYPE() {
|
||||
log "INFO" "Creating MANAGERHYPE configuration for minion $MINION_ID"
|
||||
add_elasticsearch_to_minion || return 1
|
||||
add_logstash_to_minion || return 1
|
||||
add_elastalert_to_minion || return 1
|
||||
add_kibana_to_minion || return 1
|
||||
add_redis_to_minion || return 1
|
||||
add_telegraf_to_minion || return 1
|
||||
add_influxdb_to_minion || return 1
|
||||
add_nginx_to_minion || return 1
|
||||
add_soc_to_minion || return 1
|
||||
add_registry_to_minion || return 1
|
||||
add_kratos_to_minion || return 1
|
||||
add_idstools_to_minion || return 1
|
||||
add_elastic_fleet_package_registry_to_minion || return 1
|
||||
}
|
||||
|
||||
function createDESKTOP() {
|
||||
log "INFO" "Creating DESKTOP configuration for minion $MINION_ID"
|
||||
add_desktop_to_minion || return 1
|
||||
|
||||
@@ -123,7 +123,7 @@ so-nginx:
|
||||
- /opt/so/tmp/nginx/:/run:rw
|
||||
- /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/:/opt/socore/html/packages
|
||||
- /nsm/elastic-fleet/artifacts/:/opt/socore/html/artifacts
|
||||
{% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %}
|
||||
{% if GLOBALS.is_manager %}
|
||||
- /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
|
||||
- /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
|
||||
# ATT&CK Navigator binds
|
||||
@@ -156,7 +156,7 @@ so-nginx:
|
||||
- file: nginxconfdir
|
||||
- require:
|
||||
- file: nginxconf
|
||||
{% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %}
|
||||
{% if GLOBALS.is_manager %}
|
||||
{% if NGINXMERGED.ssl.replace_cert %}
|
||||
- file: managerssl_key
|
||||
- file: managerssl_crt
|
||||
|
||||
@@ -59,7 +59,7 @@ http {
|
||||
|
||||
{%- endif %}
|
||||
|
||||
{%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %}
|
||||
{%- if GLOBALS.is_manager %}
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
@@ -108,7 +108,7 @@ http {
|
||||
|
||||
{%- endif %}
|
||||
|
||||
{%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %}
|
||||
{%- if GLOBALS.is_manager %}
|
||||
|
||||
server {
|
||||
listen 7788;
|
||||
|
||||
@@ -49,7 +49,7 @@ so_repo:
|
||||
pkgrepo.managed:
|
||||
- name: securityonion
|
||||
- humanname: Security Onion Repo
|
||||
{% if GLOBALS.role in ['so-eval', 'so-standalone', 'so-import', 'so-manager', 'so-managersearch'] %}
|
||||
{% if GLOBALS.is_manager %}
|
||||
- baseurl: file:///nsm/repo/
|
||||
{% else %}
|
||||
- baseurl: https://{{ GLOBALS.repo_host }}/repo
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
{% set COMMONNAME = GLOBALS.manager %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.id.split('_')|last in ['manager', 'managersearch', 'eval', 'standalone', 'import'] %}
|
||||
{% if GLOBALS.is_manager %}
|
||||
include:
|
||||
- ca
|
||||
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
|
||||
@@ -99,7 +99,7 @@ influxkeyperms:
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||
# Create a cert for Redis encryption
|
||||
redis_key:
|
||||
x509.private_key_managed:
|
||||
@@ -139,7 +139,7 @@ rediskeyperms:
|
||||
- group: 939
|
||||
{% endif %}
|
||||
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||
|
||||
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
|
||||
# Start -- Elastic Fleet Host Cert
|
||||
@@ -388,7 +388,7 @@ chownelasticfleetagentkey:
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-receiver'] %}
|
||||
etc_filebeat_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/filebeat.key
|
||||
@@ -552,7 +552,7 @@ elasticp12perms:
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if grains['role'] in ['so-sensor', 'so-manager', 'so-searchnode', 'so-eval', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-idh', 'so-import', 'so-receiver'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-sensor', 'so-searchnode', 'so-heavynode', 'so-fleet', 'so-idh', 'so-receiver'] %}
|
||||
|
||||
fbcertdir:
|
||||
file.directory:
|
||||
@@ -663,7 +663,7 @@ elastickeyperms:
|
||||
- group: 930
|
||||
{%- endif %}
|
||||
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %}
|
||||
elasticfleet_kafka_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-kafka.key
|
||||
|
||||
@@ -48,6 +48,15 @@ telegraf:
|
||||
- redis.sh
|
||||
- sostatus.sh
|
||||
- features.sh
|
||||
managerhype:
|
||||
- agentstatus.sh
|
||||
- influxdbsize.sh
|
||||
- lasthighstate.sh
|
||||
- os.sh
|
||||
- raid.sh
|
||||
- redis.sh
|
||||
- sostatus.sh
|
||||
- features.sh
|
||||
managersearch:
|
||||
- agentstatus.sh
|
||||
- eps.sh
|
||||
|
||||
@@ -85,7 +85,7 @@ base:
|
||||
- utility
|
||||
- elasticfleet
|
||||
|
||||
'*_manager and G@saltversion:{{saltversion}}':
|
||||
'*_manager or *_managerhype and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- salt.master
|
||||
- ca
|
||||
@@ -276,17 +276,19 @@ base:
|
||||
- elasticfleet.install_agent_grid
|
||||
- schedule
|
||||
|
||||
'*_hypervisor and G@saltversion:{{saltversion}}':
|
||||
'*_hypervisor or *_managerhype and I@features:hvn and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
- ssl
|
||||
- sensoroni
|
||||
- telegraf
|
||||
- firewall
|
||||
- elasticfleet.install_agent_grid
|
||||
- libvirt
|
||||
- libvirt.images
|
||||
- hypervisor
|
||||
- stig
|
||||
|
||||
'*_hypervisor and I@features:hvn and G@saltversion:{{saltversion}}':
|
||||
- elasticfleet.install_agent_grid
|
||||
|
||||
'*_desktop and G@saltversion:{{saltversion}}':
|
||||
- ssl
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
'so-import',
|
||||
'so-manager',
|
||||
'so-managersearch',
|
||||
'so-managerhype',
|
||||
'so-standalone'
|
||||
],
|
||||
'sensor_roles': [
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
{% import 'vars/init.map.jinja' as INIT %}
|
||||
{%
|
||||
set has_br0 = INIT.GRAINS is defined and INIT.GRAINS.ip_interfaces is defined and 'br0' in INIT.GRAINS.ip_interfaces and INIT.GRAINS.ip_interfaces.br0 is defined and INIT.GRAINS.ip_interfaces.br0|length > 0 %}
|
||||
{%
|
||||
set has_mainint = INIT.PILLAR is defined and INIT.PILLAR.host is defined and INIT.PILLAR.host.mainint is defined %}
|
||||
{%
|
||||
set fallback_ip = INIT.GRAINS.ip_interfaces.get(INIT.PILLAR.host.mainint, ['127.0.0.1'])[0] if has_mainint else '127.0.0.1' %}
|
||||
{%
|
||||
set ROLE_GLOBALS = {
|
||||
'node_ip': INIT.GRAINS.ip_interfaces.get('br0')[0]
|
||||
'node_ip': INIT.GRAINS.ip_interfaces.get('br0', [fallback_ip])[0] if has_br0 else fallback_ip
|
||||
}
|
||||
%}
|
||||
|
||||
8
salt/vars/managerhype.map.jinja
Normal file
8
salt/vars/managerhype.map.jinja
Normal file
@@ -0,0 +1,8 @@
|
||||
{% from 'vars/manager.map.jinja' import ROLE_GLOBALS as MANAGER_GLOBALS %}
|
||||
{% from 'vars/hypervisor.map.jinja' import ROLE_GLOBALS as HYPERVISOR_GLOBALS %}
|
||||
|
||||
{% set ROLE_GLOBALS = {} %}
|
||||
|
||||
{# Merge both role globals #}
|
||||
{% do salt['defaults.merge'](ROLE_GLOBALS, HYPERVISOR_GLOBALS, merge_lists=False, in_place=True) %}
|
||||
{% do salt['defaults.merge'](ROLE_GLOBALS, MANAGER_GLOBALS, merge_lists=False, in_place=True) %}
|
||||
@@ -512,7 +512,7 @@ configure_minion() {
|
||||
'workstation')
|
||||
echo "master: '$MSRV'" >> "$minion_config"
|
||||
;;
|
||||
'manager' | 'eval' | 'managersearch' | 'standalone' | 'import')
|
||||
'manager'* | 'eval' | 'standalone' | 'import')
|
||||
cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf
|
||||
printf '%s\n'\
|
||||
"master: '$HOSTNAME'"\
|
||||
@@ -609,6 +609,10 @@ check_requirements() {
|
||||
req_mem=8
|
||||
req_cores=2
|
||||
req_nics=1
|
||||
elif [[ $is_managerhype || $is_hypervisor ]]; then
|
||||
req_mem=64
|
||||
req_cores=32
|
||||
req_nics=1
|
||||
fi
|
||||
|
||||
if [[ $setup_type == 'network' ]] ; then
|
||||
@@ -1574,6 +1578,10 @@ process_installtype() {
|
||||
is_receiver=true
|
||||
elif [ "$install_type" = 'DESKTOP' ]; then
|
||||
is_desktop=true
|
||||
elif [ "$install_type" = 'HYPERVISOR' ]; then
|
||||
is_hypervisor=true
|
||||
elif [ "$install_type" = 'MANAGERHYPE' ]; then
|
||||
is_managerhype=true
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
@@ -635,6 +635,29 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
set_minion_info
|
||||
whiptail_end_settings
|
||||
|
||||
elif [[ $is_managerhype ]]; then
|
||||
info "Setting up as node type managerhype"
|
||||
check_elastic_license
|
||||
waitforstate=true
|
||||
[[ $is_iso ]] && whiptail_airgap
|
||||
check_requirements
|
||||
networking_needful
|
||||
configure_hyper_bridge
|
||||
[[ ! $is_airgap ]] && collect_net_method
|
||||
collect_dockernet
|
||||
[[ ! $is_airgap ]] && detect_cloud
|
||||
set_minion_info
|
||||
set_default_log_size >> $setup_log 2>&1
|
||||
info "Verifying all network devices are managed by Network Manager that should be"
|
||||
check_network_manager_conf
|
||||
set_network_dev_status_list
|
||||
calculate_useable_cores
|
||||
collect_webuser_inputs
|
||||
get_redirect
|
||||
collect_so_allow
|
||||
[[ ! $is_airgap ]] && whiptail_accept_telemetry
|
||||
whiptail_end_settings
|
||||
|
||||
fi
|
||||
|
||||
if [[ $waitforstate ]]; then
|
||||
|
||||
@@ -654,9 +654,10 @@ whiptail_install_type_dist_new() {
|
||||
Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations.
|
||||
EOM
|
||||
|
||||
install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 2 \
|
||||
install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 3 \
|
||||
"MANAGER" "New grid, requires separate search node(s) " \
|
||||
"MANAGERSEARCH" "New grid, separate search node(s) are optional " \
|
||||
"MANAGERHYPE" "Manager with hypervisor - Security Onion Pro required " \
|
||||
3>&1 1>&2 2>&3
|
||||
)
|
||||
|
||||
@@ -681,7 +682,7 @@ whiptail_install_type_dist_existing() {
|
||||
"HEAVYNODE" "Sensor + Search Node " \
|
||||
"IDH" "Intrusion Detection Honeypot Node " \
|
||||
"RECEIVER" "Receiver Node " \
|
||||
"HYPERVISOR" "Hypervisor Node " \
|
||||
"HYPERVISOR" "Hypervisor Node - Security Onion Pro required " \
|
||||
3>&1 1>&2 2>&3
|
||||
# "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" \ # TODO
|
||||
# "WARMNODE" "Add Warm Node to existing Hot or Search node" \ # TODO
|
||||
@@ -714,8 +715,6 @@ whiptail_install_type_dist_existing() {
|
||||
is_receiver=true
|
||||
elif [ "$install_type" = 'DESKTOP' ]; then
|
||||
is_desktop=true
|
||||
elif [ "$install_type" = 'HYPERVISOR' ]; then
|
||||
is_hypervisor=true
|
||||
fi
|
||||
|
||||
local exitstatus=$?
|
||||
|
||||
Reference in New Issue
Block a user