From 44a5b3b1e50c737663e20d90c7a78804e744e657 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Wed, 12 Mar 2025 21:05:04 -0400 Subject: [PATCH] MANAGERHYPE setup is now complete! --- pillar/top.sls | 2 +- salt/allowed_states.map.jinja | 423 +++++++++++----------------- salt/common/init.sls | 2 +- salt/elasticfleet/config.sls | 2 +- salt/elasticsearch/config.map.jinja | 2 +- salt/elasticsearch/defaults.yaml | 8 + salt/elasticsearch/enabled.sls | 2 +- salt/firewall/defaults.yaml | 210 ++++++++++++++ salt/logstash/defaults.yaml | 2 + salt/logstash/enabled.sls | 24 +- salt/manager/tools/sbin/so-minion | 17 ++ salt/nginx/enabled.sls | 4 +- salt/nginx/etc/nginx.conf | 4 +- salt/repo/client/oracle.sls | 2 +- salt/ssl/init.sls | 12 +- salt/telegraf/defaults.yaml | 9 + salt/top.sls | 8 +- salt/vars/globals.map.jinja | 1 + salt/vars/hypervisor.map.jinja | 8 +- salt/vars/managerhype.map.jinja | 8 + setup/so-functions | 10 +- setup/so-setup | 23 ++ setup/so-whiptail | 7 +- 23 files changed, 493 insertions(+), 297 deletions(-) create mode 100644 salt/vars/managerhype.map.jinja diff --git a/pillar/top.sls b/pillar/top.sls index bd78b5712..6a2311a64 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -27,7 +27,7 @@ base: - nginx.adv_nginx - node_data.ips - '*_manager or *_managersearch': + '*_manager or *_managersearch or *_managerhype': - match: compound {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} - elasticsearch.auth diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index bfa7ebe50..af9bcaa5d 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -7,271 +7,174 @@ {% import_yaml 'salt/minion.defaults.yaml' as saltversion %} {% set saltversion = saltversion.salt.minion.version %} -{# this is the list we are returning from this map file, it gets built below #} -{% set allowed_states= [] %} +{# Define common state groups to reduce redundancy #} +{% set base_states = [ + 'common', + 'patch.os.schedule', + 'motd', + 'salt.minion-check', + 'sensoroni', + 'salt.lasthighstate', + 'salt.minion' +] %} + +{% set ssl_states = [ + 'ssl', + 'telegraf', + 'firewall', + 'schedule', + 'docker_clean' +] %} + +{% set manager_states = [ + 'salt.master', + 'ca', + 'registry', + 'manager', + 'nginx', + 'influxdb', + 'soc', + 'kratos', + 'hydra', + 'elasticfleet', + 'elastic-fleet-package-registry', + 'idstools', + 'suricata.manager', + 'utility' +] %} + +{% set sensor_states = [ + 'pcap', + 'suricata', + 'healthcheck', + 'tcpreplay' +] %} + +{% set kafka_states = [ + 'kafka' +] %} + +{% set stig_states = [ + 'stig' +] %} + +{% set elastic_stack_states = [ + 'elasticsearch', + 'elasticsearch.auth', + 'kibana', + 'kibana.secrets', + 'elastalert', + 'logstash', + 'redis' +] %} + +{# Initialize the allowed_states list #} +{% set allowed_states = [] %} {% if grains.saltversion | string == saltversion | string %} + {# Map role-specific states #} + {% set role_states = { + 'so-eval': ( + ssl_states + + manager_states + + sensor_states + + elastic_stack_states + ), + 'so-heavynode': ( + ssl_states + + sensor_states + + ['elasticagent', 'zeek', 'strelka'] + + ['elasticsearch', 'logstash', 'redis'] + + stig_states + ), + 'so-idh': ( + ssl_states + + ['idh'] + ), + 'so-import': ( + ssl_states + + manager_states + + sensor_states + + ['zeek'] + + ['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets'] + ), + 'so-manager': ( + ssl_states + + manager_states + + ['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] + + stig_states + + kafka_states + + elastic_stack_states + ), + 'so-managerhype': ( + ssl_states + + manager_states + + ['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] + + stig_states + + kafka_states + + elastic_stack_states + ), + 'so-managersearch': ( + ssl_states + + manager_states + + ['strelka.manager'] + + stig_states + + kafka_states + + elastic_stack_states + ), + 'so-searchnode': ( + ssl_states + + ['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash'] + + stig_states + ), + 'so-standalone': ( + ssl_states + + manager_states + + sensor_states + + stig_states + + kafka_states + + elastic_stack_states + + ['zeek', 'strelka'] + ), + 'so-sensor': ( + ssl_states + + sensor_states + + ['nginx', 'zeek', 'strelka'] + + stig_states + ), + 'so-fleet': ( + ssl_states + + ['logstash', 'nginx', 'healthcheck', 'elasticfleet'] + ), + 'so-receiver': ( + ssl_states + + kafka_states + + stig_states + + ['logstash', 'redis'] + ), + 'so-hypervisor': ( + ssl_states + + stig_states + + ['hypervisor', 'libvirt'] + ), + 'so-desktop': ( + ['ssl', 'docker_clean', 'telegraf'] + + stig_states + ) + } %} - {% set allowed_states= salt['grains.filter_by']({ - 'so-eval': [ - 'salt.master', - 'ca', - 'ssl', - 'registry', - 'manager', - 'nginx', - 'telegraf', - 'influxdb', - 'soc', - 'kratos', - 'hydra', - 'elasticfleet', - 'elastic-fleet-package-registry', - 'firewall', - 'idstools', - 'suricata.manager', - 'healthcheck', - 'pcap', - 'suricata', - 'utility', - 'schedule', - 'tcpreplay', - 'docker_clean' - ], - 'so-heavynode': [ - 'ssl', - 'nginx', - 'telegraf', - 'firewall', - 'pcap', - 'suricata', - 'healthcheck', - 'elasticagent', - 'schedule', - 'tcpreplay', - 'docker_clean' - ], - 'so-idh': [ - 'ssl', - 'telegraf', - 'firewall', - 'idh', - 'schedule', - 'docker_clean' - ], - 'so-import': [ - 'salt.master', - 'ca', - 'ssl', - 'registry', - 'manager', - 'nginx', - 'strelka.manager', - 'soc', - 'kratos', - 'hydra', - 'influxdb', - 'telegraf', - 'firewall', - 'idstools', - 'suricata.manager', - 'pcap', - 'utility', - 'suricata', - 'zeek', - 'schedule', - 'tcpreplay', - 'docker_clean', - 'elasticfleet', - 'elastic-fleet-package-registry' - ], - 'so-manager': [ - 'salt.master', - 'salt.cloud', - 'libvirt.packages', - 'libvirt.ssh.users', - 'ca', - 'ssl', - 'registry', - 'manager', - 'nginx', - 'telegraf', - 'influxdb', - 'strelka.manager', - 'soc', - 'kratos', - 'hydra', - 'elasticfleet', - 'elastic-fleet-package-registry', - 'firewall', - 'idstools', - 'suricata.manager', - 'utility', - 'schedule', - 'docker_clean', - 'stig', - 'kafka' - ], - 'so-managersearch': [ - 'salt.master', - 'ca', - 'ssl', - 'registry', - 'nginx', - 'telegraf', - 'influxdb', - 'strelka.manager', - 'soc', - 'kratos', - 'hydra', - 'elastic-fleet-package-registry', - 'elasticfleet', - 'firewall', - 'manager', - 'idstools', - 'suricata.manager', - 'utility', - 'schedule', - 'docker_clean', - 'stig', - 'kafka' - ], - 'so-searchnode': [ - 'ssl', - 'nginx', - 'telegraf', - 'firewall', - 'schedule', - 'docker_clean', - 'stig', - 'kafka.ca', - 'kafka.ssl' - ], - 'so-standalone': [ - 'salt.master', - 'ca', - 'ssl', - 'registry', - 'manager', - 'nginx', - 'telegraf', - 'influxdb', - 'soc', - 'kratos', - 'hydra', - 'elastic-fleet-package-registry', - 'elasticfleet', - 'firewall', - 'idstools', - 'suricata.manager', - 'pcap', - 'suricata', - 'healthcheck', - 'utility', - 'schedule', - 'tcpreplay', - 'docker_clean', - 'stig', - 'kafka' - ], - 'so-sensor': [ - 'ssl', - 'telegraf', - 'firewall', - 'nginx', - 'pcap', - 'suricata', - 'healthcheck', - 'schedule', - 'tcpreplay', - 'docker_clean', - 'stig' - ], - 'so-fleet': [ - 'ssl', - 'telegraf', - 'firewall', - 'logstash', - 'nginx', - 'healthcheck', - 'schedule', - 'elasticfleet', - 'docker_clean' - ], - 'so-receiver': [ - 'ssl', - 'telegraf', - 'firewall', - 'schedule', - 'docker_clean', - 'kafka', - 'stig' - ], - 'so-hypervisor': [ - 'ssl', - 'telegraf', - 'firewall', - 'schedule', - 'docker_clean', - 'stig', - 'hypervisor', - 'libvirt' - ], - 'so-desktop': [ - 'ssl', - 'docker_clean', - 'telegraf', - 'stig' - ], - }, grain='role') %} - - {%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %} - {% do allowed_states.append('zeek') %} - {%- endif %} - - {% if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %} - {% do allowed_states.append('strelka') %} - {% endif %} - - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %} - {% do allowed_states.append('elasticsearch') %} - {% endif %} - - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %} - {% do allowed_states.append('elasticsearch.auth') %} - {% endif %} - - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %} - {% do allowed_states.append('kibana') %} - {% do allowed_states.append('kibana.secrets') %} - {% endif %} - - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %} - {% do allowed_states.append('elastalert') %} - {% endif %} - - {% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %} - {% do allowed_states.append('logstash') %} - {% endif %} - - {% if grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver', 'so-eval'] %} - {% do allowed_states.append('redis') %} - {% endif %} - - {# all nodes on the right salt version can run the following states #} - {% do allowed_states.append('common') %} - {% do allowed_states.append('patch.os.schedule') %} - {% do allowed_states.append('motd') %} - {% do allowed_states.append('salt.minion-check') %} - {% do allowed_states.append('sensoroni') %} - {% do allowed_states.append('salt.lasthighstate') %} + {# Get states for the current role #} + {% if grains.role in role_states %} + {% set allowed_states = role_states[grains.role] %} + {% endif %} + {# Add base states that apply to all roles #} + {% for state in base_states %} + {% do allowed_states.append(state) %} + {% endfor %} {% endif %} - +{# Add airgap state if needed #} {% if ISAIRGAP %} - {% do allowed_states.append('airgap') %} + {% do allowed_states.append('airgap') %} {% endif %} - -{# all nodes can always run salt.minion state #} -{% do allowed_states.append('salt.minion') %} diff --git a/salt/common/init.sls b/salt/common/init.sls index d4d90cbed..bb7d648a4 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -106,7 +106,7 @@ Etc/UTC: timezone.system # Sync curl configuration for Elasticsearch authentication -{% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %} +{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-searchnode'] %} elastic_curl_config: file.managed: - name: /opt/so/conf/elasticsearch/curl.config diff --git a/salt/elasticfleet/config.sls b/salt/elasticfleet/config.sls index ef921b404..f347a3c80 100644 --- a/salt/elasticfleet/config.sls +++ b/salt/elasticfleet/config.sls @@ -166,7 +166,7 @@ eaoptionalintegrationsdir: {% for minion in node_data %} {% set role = node_data[minion]["role"] %} -{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %} +{% if role in [ "eval","fleet","heavynode","import","manager", "managerhype", "managersearch","standalone" ] %} {% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %} {% set integration_keys = optional_integrations.keys() %} fleet_server_integrations_{{ minion }}: diff --git a/salt/elasticsearch/config.map.jinja b/salt/elasticsearch/config.map.jinja index f6062e1f2..cfbab8524 100644 --- a/salt/elasticsearch/config.map.jinja +++ b/salt/elasticsearch/config.map.jinja @@ -28,7 +28,7 @@ {% endfor %} {% endfor %} -{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %} +{% if grains.id.split('_') | last in ['manager','managerhype','managersearch','standalone'] %} {% if ELASTICSEARCH_SEED_HOSTS | length > 1 %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %} {% for NODE in ELASTICSEARCH_SEED_HOSTS %} diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 7b38ed0bb..f55f49197 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -4479,6 +4479,14 @@ elasticsearch: - data - remote_cluster_client - transform + so-managerhype: + config: + node: + roles: + - master + - data + - remote_cluster_client + - transform so-managersearch: config: node: diff --git a/salt/elasticsearch/enabled.sls b/salt/elasticsearch/enabled.sls index af162d9e9..4e1eecd0a 100644 --- a/salt/elasticsearch/enabled.sls +++ b/salt/elasticsearch/enabled.sls @@ -204,7 +204,7 @@ so-elasticsearch-roles-load: - docker_container: so-elasticsearch - file: elasticsearch_sbin_jinja -{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %} +{% if GLOBALS.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager', 'so-managerhype'] %} {% if ELASTICSEARCHMERGED.index_clean %} {% set ap = "present" %} {% else %} diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 9b4e0a430..930ceac25 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -19,6 +19,7 @@ firewall: localhost: - 127.0.0.1 manager: [] + managerhype: [] managersearch: [] receiver: [] searchnode: [] @@ -573,6 +574,215 @@ firewall: portgroups: [] customhostgroup9: portgroups: [] + managerhype: + chain: + DOCKER-USER: + hostgroups: + managerhype: + portgroups: + - kibana + - redis + - influxdb + - elasticsearch_rest + - elasticsearch_node + - docker_registry + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - localrules + - sensoroni + fleet: + portgroups: + - elasticsearch_rest + - docker_registry + - influxdb + - sensoroni + - yum + - beats_5044 + - beats_5644 + - beats_5056 + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + idh: + portgroups: + - docker_registry + - influxdb + - sensoroni + - yum + - beats_5044 + - beats_5644 + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + sensor: + portgroups: + - beats_5044 + - beats_5644 + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - yum + - docker_registry + - influxdb + - sensoroni + searchnode: + portgroups: + - redis + - elasticsearch_rest + - elasticsearch_node + - beats_5644 + - yum + - docker_registry + - influxdb + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - sensoroni + heavynode: + portgroups: + - redis + - elasticsearch_rest + - elasticsearch_node + - beats_5644 + - yum + - docker_registry + - influxdb + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - sensoroni + receiver: + portgroups: + - yum + - docker_registry + - influxdb + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - sensoroni + analyst: + portgroups: + - nginx + beats_endpoint: + portgroups: + - beats_5044 + beats_endpoint_ssl: + portgroups: + - beats_5644 + elasticsearch_rest: + portgroups: + - elasticsearch_rest + elastic_agent_endpoint: + portgroups: + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + endgame: + portgroups: + - endgame + external_suricata: + portgroups: + - external_suricata + desktop: + portgroups: + - docker_registry + - influxdb + - sensoroni + - yum + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + hypervisor: + portgroups: + - yum + - docker_registry + - influxdb + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - sensoroni + customhostgroup0: + portgroups: [] + customhostgroup1: + portgroups: [] + customhostgroup2: + portgroups: [] + customhostgroup3: + portgroups: [] + customhostgroup4: + portgroups: [] + customhostgroup5: + portgroups: [] + customhostgroup6: + portgroups: [] + customhostgroup7: + portgroups: [] + customhostgroup8: + portgroups: [] + customhostgroup9: + portgroups: [] + INPUT: + hostgroups: + anywhere: + portgroups: + - ssh + dockernet: + portgroups: + - all + fleet: + portgroups: + - salt_manager + idh: + portgroups: + - salt_manager + localhost: + portgroups: + - all + sensor: + portgroups: + - salt_manager + searchnode: + portgroups: + - salt_manager + heavynode: + portgroups: + - salt_manager + receiver: + portgroups: + - salt_manager + desktop: + portgroups: + - salt_manager + hypervisor: + portgroups: + - salt_manager + self: + portgroups: + - syslog + syslog: + portgroups: + - syslog + customhostgroup0: + portgroups: [] + customhostgroup1: + portgroups: [] + customhostgroup2: + portgroups: [] + customhostgroup3: + portgroups: [] + customhostgroup4: + portgroups: [] + customhostgroup5: + portgroups: [] + customhostgroup6: + portgroups: [] + customhostgroup7: + portgroups: [] + customhostgroup8: + portgroups: [] + customhostgroup9: + portgroups: [] managersearch: chain: DOCKER-USER: diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index 9930b7bcf..5af366459 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -12,6 +12,8 @@ logstash: - search manager: - manager + managerhype: + - manager managersearch: - manager - search diff --git a/salt/logstash/enabled.sls b/salt/logstash/enabled.sls index 0f44a3767..cd71cd574 100644 --- a/salt/logstash/enabled.sls +++ b/salt/logstash/enabled.sls @@ -16,7 +16,7 @@ include: - elasticsearch.ca {% endif %} {# Kafka ca runs on nodes that can run logstash for Kafka input / output. Only when Kafka is global pipeline #} -{% if GLOBALS.role in ['so-searchnode', 'so-manager', 'so-managersearch', 'so-receiver', 'so-standalone'] and GLOBALS.pipeline == 'KAFKA' %} +{% if GLOBALS.role in ['so-searchnode', 'so-manager', 'so-managerhype', 'so-managersearch', 'so-receiver', 'so-standalone'] and GLOBALS.pipeline == 'KAFKA' %} - kafka.ca - kafka.ssl {% endif %} @@ -65,26 +65,26 @@ so-logstash: - /opt/so/log/logstash:/var/log/logstash:rw - /sys/fs/cgroup:/sys/fs/cgroup:ro - /opt/so/conf/logstash/etc/certs:/usr/share/logstash/certs:ro - {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %} + {% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %} - /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro - /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro {% endif %} - {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-eval','so-fleet', 'so-heavynode', 'so-receiver'] %} + {% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-heavynode', 'so-receiver'] %} - /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro - /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro - /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro - /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro {% endif %} - {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %} + {% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %} - /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro {% else %} - /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro {% endif %} - {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %} + {% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %} - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro {% endif %} - {% if GLOBALS.pipeline == "KAFKA" and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} + {% if GLOBALS.pipeline == "KAFKA" and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} - /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro - /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro {% endif %} @@ -100,7 +100,7 @@ so-logstash: {% endfor %} {% endif %} - watch: - {% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-fleet', 'so-receiver'] %} + {% if GLOBALS.is_manager or GLOBALS.role in ['so-fleet', 'so-receiver'] %} - x509: etc_elasticfleet_logstash_key - x509: etc_elasticfleet_logstash_crt {% endif %} @@ -111,23 +111,23 @@ so-logstash: - file: ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }} {% endfor %} {% endfor %} - {% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} + {% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} - file: kafkacertz {% endif %} - require: - {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %} + {% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %} - x509: etc_filebeat_crt {% endif %} - {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %} + {% if grains['role'] in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %} - x509: pki_public_ca_crt {% else %} - x509: trusttheca {% endif %} - {% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %} + {% if grains.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-import'] %} - file: cacertz - file: capemz {% endif %} - {% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} + {% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone', 'so-searchnode'] %} - file: kafkacertz {% endif %} diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 6de864e4b..222119bde 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -878,6 +878,23 @@ function createHYPERVISOR() { add_telegraf_to_minion || return 1 } +function createMANAGERHYPE() { + log "INFO" "Creating MANAGERHYPE configuration for minion $MINION_ID" + add_elasticsearch_to_minion || return 1 + add_logstash_to_minion || return 1 + add_elastalert_to_minion || return 1 + add_kibana_to_minion || return 1 + add_redis_to_minion || return 1 + add_telegraf_to_minion || return 1 + add_influxdb_to_minion || return 1 + add_nginx_to_minion || return 1 + add_soc_to_minion || return 1 + add_registry_to_minion || return 1 + add_kratos_to_minion || return 1 + add_idstools_to_minion || return 1 + add_elastic_fleet_package_registry_to_minion || return 1 +} + function createDESKTOP() { log "INFO" "Creating DESKTOP configuration for minion $MINION_ID" add_desktop_to_minion || return 1 diff --git a/salt/nginx/enabled.sls b/salt/nginx/enabled.sls index e2bcef863..76bf7aba4 100644 --- a/salt/nginx/enabled.sls +++ b/salt/nginx/enabled.sls @@ -123,7 +123,7 @@ so-nginx: - /opt/so/tmp/nginx/:/run:rw - /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/:/opt/socore/html/packages - /nsm/elastic-fleet/artifacts/:/opt/socore/html/artifacts - {% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %} + {% if GLOBALS.is_manager %} - /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro - /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro # ATT&CK Navigator binds @@ -156,7 +156,7 @@ so-nginx: - file: nginxconfdir - require: - file: nginxconf -{% if grains.role in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone', 'so-import'] %} +{% if GLOBALS.is_manager %} {% if NGINXMERGED.ssl.replace_cert %} - file: managerssl_key - file: managerssl_crt diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 069e55cdb..0506c1e60 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -59,7 +59,7 @@ http { {%- endif %} - {%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %} + {%- if GLOBALS.is_manager %} server { listen 80 default_server; @@ -108,7 +108,7 @@ http { {%- endif %} - {%- if role in ['eval', 'managersearch', 'manager', 'standalone', 'import'] %} + {%- if GLOBALS.is_manager %} server { listen 7788; diff --git a/salt/repo/client/oracle.sls b/salt/repo/client/oracle.sls index 89d41beae..2f421b700 100644 --- a/salt/repo/client/oracle.sls +++ b/salt/repo/client/oracle.sls @@ -49,7 +49,7 @@ so_repo: pkgrepo.managed: - name: securityonion - humanname: Security Onion Repo - {% if GLOBALS.role in ['so-eval', 'so-standalone', 'so-import', 'so-manager', 'so-managersearch'] %} + {% if GLOBALS.is_manager %} - baseurl: file:///nsm/repo/ {% else %} - baseurl: https://{{ GLOBALS.repo_host }}/repo diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index f5be34c40..0cef8c1e3 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -17,7 +17,7 @@ {% set COMMONNAME = GLOBALS.manager %} {% endif %} -{% if grains.id.split('_')|last in ['manager', 'managersearch', 'eval', 'standalone', 'import'] %} +{% if GLOBALS.is_manager %} include: - ca {% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %} @@ -99,7 +99,7 @@ influxkeyperms: - mode: 640 - group: 939 -{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet', 'so-receiver'] %} +{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %} # Create a cert for Redis encryption redis_key: x509.private_key_managed: @@ -139,7 +139,7 @@ rediskeyperms: - group: 939 {% endif %} -{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet', 'so-receiver'] %} +{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %} {% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %} # Start -- Elastic Fleet Host Cert @@ -388,7 +388,7 @@ chownelasticfleetagentkey: {% endif %} -{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %} +{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-receiver'] %} etc_filebeat_key: x509.private_key_managed: - name: /etc/pki/filebeat.key @@ -552,7 +552,7 @@ elasticp12perms: {% endif %} -{% if grains['role'] in ['so-sensor', 'so-manager', 'so-searchnode', 'so-eval', 'so-managersearch', 'so-heavynode', 'so-fleet', 'so-standalone', 'so-idh', 'so-import', 'so-receiver'] %} +{% if GLOBALS.is_manager or GLOBALS.role in ['so-sensor', 'so-searchnode', 'so-heavynode', 'so-fleet', 'so-idh', 'so-receiver'] %} fbcertdir: file.directory: @@ -663,7 +663,7 @@ elastickeyperms: - group: 930 {%- endif %} -{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %} +{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %} elasticfleet_kafka_key: x509.private_key_managed: - name: /etc/pki/elasticfleet-kafka.key diff --git a/salt/telegraf/defaults.yaml b/salt/telegraf/defaults.yaml index d32fff179..79ad9008d 100644 --- a/salt/telegraf/defaults.yaml +++ b/salt/telegraf/defaults.yaml @@ -48,6 +48,15 @@ telegraf: - redis.sh - sostatus.sh - features.sh + managerhype: + - agentstatus.sh + - influxdbsize.sh + - lasthighstate.sh + - os.sh + - raid.sh + - redis.sh + - sostatus.sh + - features.sh managersearch: - agentstatus.sh - eps.sh diff --git a/salt/top.sls b/salt/top.sls index 0d22bd782..260bb9c74 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -85,7 +85,7 @@ base: - utility - elasticfleet - '*_manager and G@saltversion:{{saltversion}}': + '*_manager or *_managerhype and G@saltversion:{{saltversion}}': - match: compound - salt.master - ca @@ -276,17 +276,19 @@ base: - elasticfleet.install_agent_grid - schedule - '*_hypervisor and G@saltversion:{{saltversion}}': + '*_hypervisor or *_managerhype and I@features:hvn and G@saltversion:{{saltversion}}': - match: compound - ssl - sensoroni - telegraf - firewall - - elasticfleet.install_agent_grid - libvirt - libvirt.images - hypervisor - stig + + '*_hypervisor and I@features:hvn and G@saltversion:{{saltversion}}': + - elasticfleet.install_agent_grid '*_desktop and G@saltversion:{{saltversion}}': - ssl diff --git a/salt/vars/globals.map.jinja b/salt/vars/globals.map.jinja index 000cfa354..ca75437eb 100644 --- a/salt/vars/globals.map.jinja +++ b/salt/vars/globals.map.jinja @@ -38,6 +38,7 @@ 'so-import', 'so-manager', 'so-managersearch', + 'so-managerhype', 'so-standalone' ], 'sensor_roles': [ diff --git a/salt/vars/hypervisor.map.jinja b/salt/vars/hypervisor.map.jinja index b8fd4ac1f..0a9dfd3f7 100644 --- a/salt/vars/hypervisor.map.jinja +++ b/salt/vars/hypervisor.map.jinja @@ -1,6 +1,12 @@ {% import 'vars/init.map.jinja' as INIT %} +{% + set has_br0 = INIT.GRAINS is defined and INIT.GRAINS.ip_interfaces is defined and 'br0' in INIT.GRAINS.ip_interfaces and INIT.GRAINS.ip_interfaces.br0 is defined and INIT.GRAINS.ip_interfaces.br0|length > 0 %} +{% + set has_mainint = INIT.PILLAR is defined and INIT.PILLAR.host is defined and INIT.PILLAR.host.mainint is defined %} +{% + set fallback_ip = INIT.GRAINS.ip_interfaces.get(INIT.PILLAR.host.mainint, ['127.0.0.1'])[0] if has_mainint else '127.0.0.1' %} {% set ROLE_GLOBALS = { - 'node_ip': INIT.GRAINS.ip_interfaces.get('br0')[0] + 'node_ip': INIT.GRAINS.ip_interfaces.get('br0', [fallback_ip])[0] if has_br0 else fallback_ip } %} diff --git a/salt/vars/managerhype.map.jinja b/salt/vars/managerhype.map.jinja new file mode 100644 index 000000000..5f7118f31 --- /dev/null +++ b/salt/vars/managerhype.map.jinja @@ -0,0 +1,8 @@ +{% from 'vars/manager.map.jinja' import ROLE_GLOBALS as MANAGER_GLOBALS %} +{% from 'vars/hypervisor.map.jinja' import ROLE_GLOBALS as HYPERVISOR_GLOBALS %} + +{% set ROLE_GLOBALS = {} %} + +{# Merge both role globals #} +{% do salt['defaults.merge'](ROLE_GLOBALS, HYPERVISOR_GLOBALS, merge_lists=False, in_place=True) %} +{% do salt['defaults.merge'](ROLE_GLOBALS, MANAGER_GLOBALS, merge_lists=False, in_place=True) %} diff --git a/setup/so-functions b/setup/so-functions index 25362f179..db0741851 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -512,7 +512,7 @@ configure_minion() { 'workstation') echo "master: '$MSRV'" >> "$minion_config" ;; - 'manager' | 'eval' | 'managersearch' | 'standalone' | 'import') + 'manager'* | 'eval' | 'standalone' | 'import') cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf printf '%s\n'\ "master: '$HOSTNAME'"\ @@ -609,6 +609,10 @@ check_requirements() { req_mem=8 req_cores=2 req_nics=1 + elif [[ $is_managerhype || $is_hypervisor ]]; then + req_mem=64 + req_cores=32 + req_nics=1 fi if [[ $setup_type == 'network' ]] ; then @@ -1574,6 +1578,10 @@ process_installtype() { is_receiver=true elif [ "$install_type" = 'DESKTOP' ]; then is_desktop=true + elif [ "$install_type" = 'HYPERVISOR' ]; then + is_hypervisor=true + elif [ "$install_type" = 'MANAGERHYPE' ]; then + is_managerhype=true fi } diff --git a/setup/so-setup b/setup/so-setup index 0f445850f..6217511fc 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -635,6 +635,29 @@ if ! [[ -f $install_opt_file ]]; then set_minion_info whiptail_end_settings + elif [[ $is_managerhype ]]; then + info "Setting up as node type managerhype" + check_elastic_license + waitforstate=true + [[ $is_iso ]] && whiptail_airgap + check_requirements + networking_needful + configure_hyper_bridge + [[ ! $is_airgap ]] && collect_net_method + collect_dockernet + [[ ! $is_airgap ]] && detect_cloud + set_minion_info + set_default_log_size >> $setup_log 2>&1 + info "Verifying all network devices are managed by Network Manager that should be" + check_network_manager_conf + set_network_dev_status_list + calculate_useable_cores + collect_webuser_inputs + get_redirect + collect_so_allow + [[ ! $is_airgap ]] && whiptail_accept_telemetry + whiptail_end_settings + fi if [[ $waitforstate ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index 2ea5dd38b..57bd10b8c 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -654,9 +654,10 @@ whiptail_install_type_dist_new() { Note: MANAGER is the recommended option for most users. MANAGERSEARCH should only be used in very specific situations. EOM - install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 2 \ + install_type=$(whiptail --title "$whiptail_title" --menu "$mngr_msg" 20 75 3 \ "MANAGER" "New grid, requires separate search node(s) " \ "MANAGERSEARCH" "New grid, separate search node(s) are optional " \ + "MANAGERHYPE" "Manager with hypervisor - Security Onion Pro required " \ 3>&1 1>&2 2>&3 ) @@ -681,7 +682,7 @@ whiptail_install_type_dist_existing() { "HEAVYNODE" "Sensor + Search Node " \ "IDH" "Intrusion Detection Honeypot Node " \ "RECEIVER" "Receiver Node " \ - "HYPERVISOR" "Hypervisor Node " \ + "HYPERVISOR" "Hypervisor Node - Security Onion Pro required " \ 3>&1 1>&2 2>&3 # "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" \ # TODO # "WARMNODE" "Add Warm Node to existing Hot or Search node" \ # TODO @@ -714,8 +715,6 @@ whiptail_install_type_dist_existing() { is_receiver=true elif [ "$install_type" = 'DESKTOP' ]; then is_desktop=true - elif [ "$install_type" = 'HYPERVISOR' ]; then - is_hypervisor=true fi local exitstatus=$?