From b62cc32b1a4a80356f46621084cb0e0892967617 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Tue, 21 Feb 2023 11:52:37 -0500 Subject: [PATCH 1/2] Initial support for IDH --- salt/allowed_states.map.jinja | 1 - salt/common/tools/sbin/so-firewall-minion | 2 +- salt/common/tools/sbin/so-minion | 16 ++++- salt/docker/defaults.yaml | 2 + salt/firewall/assigned_hostgroups.map.yaml | 8 ++- salt/firewall/containers.map.jinja | 84 ++++++++++++++++++++++ salt/firewall/map.jinja | 7 ++ salt/idh/opencanary_config.map.jinja | 22 ++++-- salt/top.sls | 3 - salt/vars/idh.map.jinja | 1 + setup/so-functions | 29 ++++++++ setup/so-setup | 4 +- setup/so-whiptail | 52 ++++++++++++++ 13 files changed, 216 insertions(+), 15 deletions(-) create mode 100644 salt/vars/idh.map.jinja diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 42ec3604a..446892ba4 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -81,7 +81,6 @@ 'ssl', 'telegraf', 'firewall', - 'filebeat', 'idh', 'schedule', 'docker_clean' diff --git a/salt/common/tools/sbin/so-firewall-minion b/salt/common/tools/sbin/so-firewall-minion index acedcffeb..e796035f9 100755 --- a/salt/common/tools/sbin/so-firewall-minion +++ b/salt/common/tools/sbin/so-firewall-minion @@ -69,7 +69,7 @@ fi so-firewall --apply --role=heavynodes --ip="$IP" ;; 'IDH') - so-firewall --apply --role=beats_endpoint_ssl --ip="$IP" + so-firewall --apply --role=sensors --ip="$IP" ;; 'RECEIVER') so-firewall --apply --role=receivers --ip="$IP" diff --git a/salt/common/tools/sbin/so-minion b/salt/common/tools/sbin/so-minion index a51a31ed4..03f26c556 100755 --- a/salt/common/tools/sbin/so-minion +++ b/salt/common/tools/sbin/so-minion @@ -119,6 +119,18 @@ function add_elastic_to_minion() { " " >> $PILLARFILE } +# Add IDH Services info to the minion file +function add_idh_to_minion() { + printf '%s\n'\ + "idh:"\ + " restrict_management_ip: $IDH_MGTRESTRICT"\ + " services:" >> "$PILLARFILE" + IFS=',' read -ra IDH_SERVICES_ARRAY <<< "$IDH_SERVICES" + for service in ${IDH_SERVICES_ARRAY[@]}; do + echo " - $service" | tr '[:upper:]' '[:lower:]' | tr -d '"' >> "$PILLARFILE" + done +} + function add_logstash_to_minion() { # Create the logstash advanced pillar printf '%s\n'\ @@ -183,8 +195,8 @@ function createEVAL() { add_sensor_to_minion } -function createIDHNODE() { - echo "Nothing custom needed for IDH nodes" +function createIDH() { + add_idh_to_minion } function createIMPORT() { diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index 6886703db..30b2c78e1 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -102,3 +102,5 @@ docker: final_octet: 44 port_bindings: - 0.0.0.0:8080:8080/tcp + 'so-idh': + final_octet: 45 diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml index 25dbba1b0..4253d9bc2 100644 --- a/salt/firewall/assigned_hostgroups.map.yaml +++ b/salt/firewall/assigned_hostgroups.map.yaml @@ -2,6 +2,7 @@ {% import_yaml 'firewall/ports/ports.yaml' as portgroups %} {% set portgroups = portgroups.firewall.ports %} {% set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', True) %} +{% from 'idh/opencanary_config.map.jinja' import IDH_PORTGROUPS %} role: eval: @@ -573,7 +574,7 @@ role: portgroups: {% set idh_services = salt['pillar.get']('idh:services', []) %} {% for service in idh_services %} - - {{ portgroups['idh_'~service] }} + - {{ IDH_PORTGROUPS['idh_'~service] }} {% endfor %} dockernet: portgroups: @@ -583,4 +584,7 @@ role: - {{ portgroups.all }} manager: portgroups: - - {{ portgroups.ssh }} + - {{ IDH_PORTGROUPS.openssh }} + standalone: + portgroups: + - {{ IDH_PORTGROUPS.openssh }} diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 702f2ff63..70a676e89 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -1,3 +1,31 @@ +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% if GLOBALS.role == 'so-eval' %} +{% set NODE_CONTAINERS = [ + 'so-curator', + 'so-dockerregistry', + 'so-elasticsearch', + 'so-elastic-fleet', + 'so-elastic-fleet-package-registry', + 'so-grafana', + 'so-influxdb', + 'so-kibana', + 'so-kratos', + 'so-mysql', + 'so-nginx', + 'so-redis', + 'so-soc', + 'so-soctopus', + 'so-strelka-coordinator', + 'so-strelka-gatekeeper', + 'so-strelka-frontend', + 'so-strelka-backend', + 'so-strelka-manager', + 'so-strelka-filestream' +] %} +{% endif %} + +{% if GLOBALS.role == 'so-manager' or GLOBALS.role == 'so-standalone' or GLOBALS.role == 'so-managersearch' %} {% set NODE_CONTAINERS = [ 'so-curator', 'so-dockerregistry', @@ -21,3 +49,59 @@ 'so-strelka-manager', 'so-strelka-filestream' ] %} +{% endif %} + +{% if GLOBALS.role == 'so-searchnode' %} +{% set NODE_CONTAINERS = [ + 'so-elasticsearch', + 'so-filebeat', + 'so-logstash', + 'so-nginx' +] %} +{% endif %} + +{% if GLOBALS.role == 'so-heavynode' %} +{% set NODE_CONTAINERS = [ + 'so-curator', + 'so-elasticsearch', + 'so-filebeat', + 'so-logstash', + 'so-nginx', + 'so-redis', + 'so-strelka-coordinator', + 'so-strelka-gatekeeper', + 'so-strelka-frontend', + 'so-strelka-backend', + 'so-strelka-manager', + 'so-strelka-filestream' +] %} +{% endif %} + +{% if GLOBALS.role == 'so-import' %} +{% set NODE_CONTAINERS = [ + 'so-dockerregistry', + 'so-elasticsearch', + 'so-elastic-fleet', + 'so-elastic-fleet-package-registry', + 'so-filebeat', + 'so-influxdb', + 'so-kibana', + 'so-kratos', + 'so-nginx', + 'so-soc' +] %} +{% endif %} + +{% if GLOBALS.role == 'so-receiver' %} +{% set NODE_CONTAINERS = [ + 'so-filebeat', + 'so-logstash', + 'so-redis', +] %} +{% endif %} + +{% if GLOBALS.role == 'so-idh' %} +{% set NODE_CONTAINERS = [ + 'so-idh', +] %} +{% endif %} diff --git a/salt/firewall/map.jinja b/salt/firewall/map.jinja index ea2d1b4e8..06586ddf2 100644 --- a/salt/firewall/map.jinja +++ b/salt/firewall/map.jinja @@ -1,3 +1,4 @@ +{% from 'vars/globals.map.jinja' import GLOBALS %} {% set role = grains.id.split('_') | last %} {% set translated_pillar_assigned_hostgroups = {} %} @@ -9,9 +10,15 @@ {% else %} {% set local_portgroups = {} %} {% endif %} + {% set portgroups = salt['defaults.merge'](default_portgroups, local_portgroups, in_place=False) %} {% set defined_portgroups = portgroups %} +{% if GLOBALS.role == 'so-idh' %} +{% from 'idh/opencanary_config.map.jinja' import IDH_PORTGROUPS %} +{% do salt['defaults.merge'](defined_portgroups, IDH_PORTGROUPS, in_place=True) %} +{% endif %} + {% set local_hostgroups = {'firewall': {'hostgroups': {}}} %} {% set hostgroup_list = salt['cp.list_master'](prefix='firewall/hostgroups') %} diff --git a/salt/idh/opencanary_config.map.jinja b/salt/idh/opencanary_config.map.jinja index c4533682d..cb601c163 100644 --- a/salt/idh/opencanary_config.map.jinja +++ b/salt/idh/opencanary_config.map.jinja @@ -1,11 +1,25 @@ {% set idh_services = salt['pillar.get']('idh:services', []) %} -{% import_yaml "idh/defaults/defaults.yaml" as OPENCANARYCONFIG with context %} +{% set IDH_PORTGROUPS = {} %} + +{% import_yaml "idh/defaults/defaults.yaml" as IDHCONFIG with context %} {% for service in idh_services %} {% import_yaml "idh/defaults/" ~ service ~ ".defaults.yaml" as SERVICECONFIG with context %} - {% do salt['defaults.merge'](OPENCANARYCONFIG, SERVICECONFIG, in_place=True) %} + {% do salt['defaults.merge'](IDHCONFIG, SERVICECONFIG, in_place=True) %} {% endfor %} -{% set OPENCANARYCONFIG = salt['pillar.get']('idh:opencanary:config', default=OPENCANARYCONFIG.idh.opencanary.config, merge=True) %} -{% do OPENCANARYCONFIG.idh.opencanary.config.update({'device.node_id': grains.host}) %} +{% set OPENCANARYCONFIG = salt['pillar.get']('idh:opencanary:config', default=IDHCONFIG.idh.opencanary.config, merge=True) %} +{% set OPENSSH = salt['pillar.get']('idh:openssh', default=IDHCONFIG.idh.openssh, merge=True) %} +{% for service in idh_services %} + {% if service in ["smnp","ntp", "tftp"] %} + {% set proto = 'udp' %} + {% else %} + {% set proto = 'tcp' %} + {% endif %} + {% do IDH_PORTGROUPS.update({'idh_' ~ service: {proto: [OPENCANARYCONFIG[service ~ '.port']]}}) %} +{% endfor %} + +{% do IDH_PORTGROUPS.update({'openssh': {'tcp': [OPENSSH.config.port]}}) %} + +{% do OPENCANARYCONFIG.update({'device.node_id': grains.host}) %} diff --git a/salt/top.sls b/salt/top.sls index 3c4401478..529bdd2a4 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -368,9 +368,6 @@ base: - firewall - schedule - docker_clean - {%- if FILEBEAT %} - - filebeat - {%- endif %} - idh 'J@workstation:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:CentOS )': diff --git a/salt/vars/idh.map.jinja b/salt/vars/idh.map.jinja new file mode 100644 index 000000000..396cefcc9 --- /dev/null +++ b/salt/vars/idh.map.jinja @@ -0,0 +1 @@ +{% set ROLE_GLOBALS = {} %} \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index 34bbe0610..8139b60a1 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -339,6 +339,33 @@ collect_hostname_validate() { done } +collect_idh_preferences() { + IDH_MGTRESTRICT='False' + whiptail_idh_preferences + + if [[ "$idh_preferences" != "" ]]; then IDH_MGTRESTRICT='True'; fi +} + +collect_idh_services() { + whiptail_idh_services + + case "$IDH_SERVICES" in + 'Linux Webserver (NAS Skin)') + IDH_SERVICES='"HTTP","FTP","SSH"' + ;; + 'MySQL Server') + IDH_SERVICES='"MYSQL","SSH"' + ;; + 'MSSQL Server') + IDH_SERVICES='"MSSQL","VNC' + ;; + 'Custom') + whiptail_idh_services_custom + IDH_SERVICES=$(echo "$IDH_SERVICES" | tr '[:blank:]' ',' ) + ;; + esac +} + collect_int_ip_mask() { whiptail_management_interface_ip_mask @@ -1875,6 +1902,8 @@ drop_install_options() { echo "LSHOSTNAME=$HOSTNAME" >> /opt/so/install.txt echo "LSHEAP=$LS_HEAP_SIZE" >> /opt/so/install.txt echo "CPUCORES=$num_cpu_cores" >> /opt/so/install.txt + echo "IDH_MGTRESTRICT=$IDH_MGTRESTRICT" >> /opt/so/install.txt + echo "IDH_SERVICES=$IDH_SERVICES" >> /opt/so/install.txt } remove_package() { diff --git a/setup/so-setup b/setup/so-setup index 9ecbed08c..a1b672918 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -453,6 +453,8 @@ if ! [[ -f $install_opt_file ]]; then collect_mngr_hostname add_mngr_ip_to_hosts check_manager_connection + collect_idh_services + collect_idh_preferences set_minion_info whiptail_end_settings @@ -538,8 +540,6 @@ if ! [[ -f $install_opt_file ]]; then export NODETYPE=$install_type export MINION_ID=$MINION_ID export ES_HEAP_SIZE=$ES_HEAP_SIZE - export IDHMGTRESTRICT=$IDHMGTRESTRICT - export idh_services=$idh_services export MNIC=$MNIC export NODE_DESCRIPTION=$NODE_DESCRIPTION export MAINIP=$MAINIP diff --git a/setup/so-whiptail b/setup/so-whiptail index 715baa375..6123ea97d 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -466,6 +466,58 @@ whiptail_gauge_post_setup() { fi } + whiptail_idh_preferences() { + + [ -n "$TESTING" ] && return + + idh_preferences=$(whiptail --title "$whiptail_title" --radiolist \ + "\nBy default, the IDH services selected in the previous screen will be bound to all interfaces and IP addresses on this system.\n\nIf you would like to prevent IDH services from being published on this system's management IP, you can select the option below." 20 75 5 \ + "$MAINIP" "Disable IDH services on this management IP " OFF 3>&1 1>&2 2>&3 ) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} + +whiptail_idh_services() { + + [ -n "$TESTING" ] && return + + IDH_SERVICES=$(whiptail --title "$whiptail_title" --radiolist \ + "\nThe IDH node can mimic many different services.\n\nChoose one of the common options along with their default ports (TCP) or select the Custom option to build a customized set of services." 20 75 5 \ + "Linux Webserver (NAS Skin)" "Apache (80), FTP (21), SSH (22)" ON \ + "MySQL Server" "MySQL (3306), SSH (22)" OFF \ + "MSSQL Server" "Microsoft SQL (1433), VNC (5900)" OFF \ + "Custom" "Select a custom set of services" OFF 3>&1 1>&2 2>&3 ) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} + + +whiptail_idh_services_custom() { + + [ -n "$TESTING" ] && return + + IDH_SERVICES=$(whiptail --title "$whiptail_title" --checklist \ + "\nThe IDH node can mimic many different services.\n\nChoose one or more of the following services along with their default ports. Some services have additional configuration options, please consult the documentation for further information." 25 75 8 \ + "FTP" " TCP/21, Additional Configuration Available " OFF \ + "Git" " TCP/9418 " OFF \ + "HTTP" " TCP/80, Additional Configuration Available " OFF \ + "HTTPPROXY" " TCP/8080, Additional Configuration Available " OFF \ + "MSSQL" " TCP/1433 " OFF \ + "MySQL" " TCP/3306, Additional Configuration Available " OFF \ + "NTP" " UDP/123 " OFF \ + "REDIS" " TCP/6379 " OFF \ + "SNMP" " UDP/161 " OFF \ + "SSH" " TCP/22, Additional Configuration Available " OFF \ + "TELNET" " TCP/23, Additional Configuration Available " OFF \ + "TFTP" " UDP/69 " OFF \ + "VNC" " TCP/5900 " OFF 3>&1 1>&2 2>&3 ) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} + whiptail_install_type() { [ -n "$TESTING" ] && return From ecf70847fdf58c84540ff0a748829bb0058a28bf Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 22 Feb 2023 16:23:48 -0500 Subject: [PATCH 2/2] Change 'GLOBALS.minion_id' to 'GLOBALS.hostname' for 'analyzerNodeId' value to ensure SOC creates analyzer jobs in the correct directory --- salt/soc/defaults.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.map.jinja b/salt/soc/defaults.map.jinja index 17253b7fb..742d3d0dc 100644 --- a/salt/soc/defaults.map.jinja +++ b/salt/soc/defaults.map.jinja @@ -27,6 +27,6 @@ {% do SOCDEFAULTS.soc.server.modules.statickeyauth.update({'anonymousCidr': DOCKER.sorange, 'apiKey': pillar.sensoroni.sensoronikey}) %} -{% do SOCDEFAULTS.soc.server.client.case.update({'analyzerNodeId': GLOBALS.minion_id}) %} +{% do SOCDEFAULTS.soc.server.client.case.update({'analyzerNodeId': GLOBALS.hostname}) %} {% set SOCDEFAULTS = SOCDEFAULTS.soc %}