Initial support for IDH

This commit is contained in:
Josh Brower
2023-02-21 11:52:37 -05:00
parent eef81fdd1b
commit b62cc32b1a
13 changed files with 216 additions and 15 deletions

View File

@@ -81,7 +81,6 @@
'ssl', 'ssl',
'telegraf', 'telegraf',
'firewall', 'firewall',
'filebeat',
'idh', 'idh',
'schedule', 'schedule',
'docker_clean' 'docker_clean'

View File

@@ -69,7 +69,7 @@ fi
so-firewall --apply --role=heavynodes --ip="$IP" so-firewall --apply --role=heavynodes --ip="$IP"
;; ;;
'IDH') 'IDH')
so-firewall --apply --role=beats_endpoint_ssl --ip="$IP" so-firewall --apply --role=sensors --ip="$IP"
;; ;;
'RECEIVER') 'RECEIVER')
so-firewall --apply --role=receivers --ip="$IP" so-firewall --apply --role=receivers --ip="$IP"

View File

@@ -119,6 +119,18 @@ function add_elastic_to_minion() {
" " >> $PILLARFILE " " >> $PILLARFILE
} }
# Add IDH Services info to the minion file
function add_idh_to_minion() {
printf '%s\n'\
"idh:"\
" restrict_management_ip: $IDH_MGTRESTRICT"\
" services:" >> "$PILLARFILE"
IFS=',' read -ra IDH_SERVICES_ARRAY <<< "$IDH_SERVICES"
for service in ${IDH_SERVICES_ARRAY[@]}; do
echo " - $service" | tr '[:upper:]' '[:lower:]' | tr -d '"' >> "$PILLARFILE"
done
}
function add_logstash_to_minion() { function add_logstash_to_minion() {
# Create the logstash advanced pillar # Create the logstash advanced pillar
printf '%s\n'\ printf '%s\n'\
@@ -183,8 +195,8 @@ function createEVAL() {
add_sensor_to_minion add_sensor_to_minion
} }
function createIDHNODE() { function createIDH() {
echo "Nothing custom needed for IDH nodes" add_idh_to_minion
} }
function createIMPORT() { function createIMPORT() {

View File

@@ -102,3 +102,5 @@ docker:
final_octet: 44 final_octet: 44
port_bindings: port_bindings:
- 0.0.0.0:8080:8080/tcp - 0.0.0.0:8080:8080/tcp
'so-idh':
final_octet: 45

View File

@@ -2,6 +2,7 @@
{% import_yaml 'firewall/ports/ports.yaml' as portgroups %} {% import_yaml 'firewall/ports/ports.yaml' as portgroups %}
{% set portgroups = portgroups.firewall.ports %} {% set portgroups = portgroups.firewall.ports %}
{% set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', True) %} {% set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', True) %}
{% from 'idh/opencanary_config.map.jinja' import IDH_PORTGROUPS %}
role: role:
eval: eval:
@@ -573,7 +574,7 @@ role:
portgroups: portgroups:
{% set idh_services = salt['pillar.get']('idh:services', []) %} {% set idh_services = salt['pillar.get']('idh:services', []) %}
{% for service in idh_services %} {% for service in idh_services %}
- {{ portgroups['idh_'~service] }} - {{ IDH_PORTGROUPS['idh_'~service] }}
{% endfor %} {% endfor %}
dockernet: dockernet:
portgroups: portgroups:
@@ -583,4 +584,7 @@ role:
- {{ portgroups.all }} - {{ portgroups.all }}
manager: manager:
portgroups: portgroups:
- {{ portgroups.ssh }} - {{ IDH_PORTGROUPS.openssh }}
standalone:
portgroups:
- {{ IDH_PORTGROUPS.openssh }}

View File

@@ -1,3 +1,31 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.role == 'so-eval' %}
{% set NODE_CONTAINERS = [
'so-curator',
'so-dockerregistry',
'so-elasticsearch',
'so-elastic-fleet',
'so-elastic-fleet-package-registry',
'so-grafana',
'so-influxdb',
'so-kibana',
'so-kratos',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',
'so-strelka-backend',
'so-strelka-manager',
'so-strelka-filestream'
] %}
{% endif %}
{% if GLOBALS.role == 'so-manager' or GLOBALS.role == 'so-standalone' or GLOBALS.role == 'so-managersearch' %}
{% set NODE_CONTAINERS = [ {% set NODE_CONTAINERS = [
'so-curator', 'so-curator',
'so-dockerregistry', 'so-dockerregistry',
@@ -21,3 +49,59 @@
'so-strelka-manager', 'so-strelka-manager',
'so-strelka-filestream' 'so-strelka-filestream'
] %} ] %}
{% endif %}
{% if GLOBALS.role == 'so-searchnode' %}
{% set NODE_CONTAINERS = [
'so-elasticsearch',
'so-filebeat',
'so-logstash',
'so-nginx'
] %}
{% endif %}
{% if GLOBALS.role == 'so-heavynode' %}
{% set NODE_CONTAINERS = [
'so-curator',
'so-elasticsearch',
'so-filebeat',
'so-logstash',
'so-nginx',
'so-redis',
'so-strelka-coordinator',
'so-strelka-gatekeeper',
'so-strelka-frontend',
'so-strelka-backend',
'so-strelka-manager',
'so-strelka-filestream'
] %}
{% endif %}
{% if GLOBALS.role == 'so-import' %}
{% set NODE_CONTAINERS = [
'so-dockerregistry',
'so-elasticsearch',
'so-elastic-fleet',
'so-elastic-fleet-package-registry',
'so-filebeat',
'so-influxdb',
'so-kibana',
'so-kratos',
'so-nginx',
'so-soc'
] %}
{% endif %}
{% if GLOBALS.role == 'so-receiver' %}
{% set NODE_CONTAINERS = [
'so-filebeat',
'so-logstash',
'so-redis',
] %}
{% endif %}
{% if GLOBALS.role == 'so-idh' %}
{% set NODE_CONTAINERS = [
'so-idh',
] %}
{% endif %}

View File

@@ -1,3 +1,4 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set role = grains.id.split('_') | last %} {% set role = grains.id.split('_') | last %}
{% set translated_pillar_assigned_hostgroups = {} %} {% set translated_pillar_assigned_hostgroups = {} %}
@@ -9,9 +10,15 @@
{% else %} {% else %}
{% set local_portgroups = {} %} {% set local_portgroups = {} %}
{% endif %} {% endif %}
{% set portgroups = salt['defaults.merge'](default_portgroups, local_portgroups, in_place=False) %} {% set portgroups = salt['defaults.merge'](default_portgroups, local_portgroups, in_place=False) %}
{% set defined_portgroups = portgroups %} {% set defined_portgroups = portgroups %}
{% if GLOBALS.role == 'so-idh' %}
{% from 'idh/opencanary_config.map.jinja' import IDH_PORTGROUPS %}
{% do salt['defaults.merge'](defined_portgroups, IDH_PORTGROUPS, in_place=True) %}
{% endif %}
{% set local_hostgroups = {'firewall': {'hostgroups': {}}} %} {% set local_hostgroups = {'firewall': {'hostgroups': {}}} %}
{% set hostgroup_list = salt['cp.list_master'](prefix='firewall/hostgroups') %} {% set hostgroup_list = salt['cp.list_master'](prefix='firewall/hostgroups') %}

View File

@@ -1,11 +1,25 @@
{% set idh_services = salt['pillar.get']('idh:services', []) %} {% set idh_services = salt['pillar.get']('idh:services', []) %}
{% import_yaml "idh/defaults/defaults.yaml" as OPENCANARYCONFIG with context %} {% set IDH_PORTGROUPS = {} %}
{% import_yaml "idh/defaults/defaults.yaml" as IDHCONFIG with context %}
{% for service in idh_services %} {% for service in idh_services %}
{% import_yaml "idh/defaults/" ~ service ~ ".defaults.yaml" as SERVICECONFIG with context %} {% import_yaml "idh/defaults/" ~ service ~ ".defaults.yaml" as SERVICECONFIG with context %}
{% do salt['defaults.merge'](OPENCANARYCONFIG, SERVICECONFIG, in_place=True) %} {% do salt['defaults.merge'](IDHCONFIG, SERVICECONFIG, in_place=True) %}
{% endfor %} {% endfor %}
{% set OPENCANARYCONFIG = salt['pillar.get']('idh:opencanary:config', default=OPENCANARYCONFIG.idh.opencanary.config, merge=True) %} {% set OPENCANARYCONFIG = salt['pillar.get']('idh:opencanary:config', default=IDHCONFIG.idh.opencanary.config, merge=True) %}
{% do OPENCANARYCONFIG.idh.opencanary.config.update({'device.node_id': grains.host}) %} {% set OPENSSH = salt['pillar.get']('idh:openssh', default=IDHCONFIG.idh.openssh, merge=True) %}
{% for service in idh_services %}
{% if service in ["smnp","ntp", "tftp"] %}
{% set proto = 'udp' %}
{% else %}
{% set proto = 'tcp' %}
{% endif %}
{% do IDH_PORTGROUPS.update({'idh_' ~ service: {proto: [OPENCANARYCONFIG[service ~ '.port']]}}) %}
{% endfor %}
{% do IDH_PORTGROUPS.update({'openssh': {'tcp': [OPENSSH.config.port]}}) %}
{% do OPENCANARYCONFIG.update({'device.node_id': grains.host}) %}

View File

@@ -368,9 +368,6 @@ base:
- firewall - firewall
- schedule - schedule
- docker_clean - docker_clean
{%- if FILEBEAT %}
- filebeat
{%- endif %}
- idh - idh
'J@workstation:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:CentOS )': 'J@workstation:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:CentOS )':

1
salt/vars/idh.map.jinja Normal file
View File

@@ -0,0 +1 @@
{% set ROLE_GLOBALS = {} %}

View File

@@ -339,6 +339,33 @@ collect_hostname_validate() {
done done
} }
collect_idh_preferences() {
IDH_MGTRESTRICT='False'
whiptail_idh_preferences
if [[ "$idh_preferences" != "" ]]; then IDH_MGTRESTRICT='True'; fi
}
collect_idh_services() {
whiptail_idh_services
case "$IDH_SERVICES" in
'Linux Webserver (NAS Skin)')
IDH_SERVICES='"HTTP","FTP","SSH"'
;;
'MySQL Server')
IDH_SERVICES='"MYSQL","SSH"'
;;
'MSSQL Server')
IDH_SERVICES='"MSSQL","VNC'
;;
'Custom')
whiptail_idh_services_custom
IDH_SERVICES=$(echo "$IDH_SERVICES" | tr '[:blank:]' ',' )
;;
esac
}
collect_int_ip_mask() { collect_int_ip_mask() {
whiptail_management_interface_ip_mask whiptail_management_interface_ip_mask
@@ -1875,6 +1902,8 @@ drop_install_options() {
echo "LSHOSTNAME=$HOSTNAME" >> /opt/so/install.txt echo "LSHOSTNAME=$HOSTNAME" >> /opt/so/install.txt
echo "LSHEAP=$LS_HEAP_SIZE" >> /opt/so/install.txt echo "LSHEAP=$LS_HEAP_SIZE" >> /opt/so/install.txt
echo "CPUCORES=$num_cpu_cores" >> /opt/so/install.txt echo "CPUCORES=$num_cpu_cores" >> /opt/so/install.txt
echo "IDH_MGTRESTRICT=$IDH_MGTRESTRICT" >> /opt/so/install.txt
echo "IDH_SERVICES=$IDH_SERVICES" >> /opt/so/install.txt
} }
remove_package() { remove_package() {

View File

@@ -453,6 +453,8 @@ if ! [[ -f $install_opt_file ]]; then
collect_mngr_hostname collect_mngr_hostname
add_mngr_ip_to_hosts add_mngr_ip_to_hosts
check_manager_connection check_manager_connection
collect_idh_services
collect_idh_preferences
set_minion_info set_minion_info
whiptail_end_settings whiptail_end_settings
@@ -538,8 +540,6 @@ if ! [[ -f $install_opt_file ]]; then
export NODETYPE=$install_type export NODETYPE=$install_type
export MINION_ID=$MINION_ID export MINION_ID=$MINION_ID
export ES_HEAP_SIZE=$ES_HEAP_SIZE export ES_HEAP_SIZE=$ES_HEAP_SIZE
export IDHMGTRESTRICT=$IDHMGTRESTRICT
export idh_services=$idh_services
export MNIC=$MNIC export MNIC=$MNIC
export NODE_DESCRIPTION=$NODE_DESCRIPTION export NODE_DESCRIPTION=$NODE_DESCRIPTION
export MAINIP=$MAINIP export MAINIP=$MAINIP

View File

@@ -466,6 +466,58 @@ whiptail_gauge_post_setup() {
fi fi
} }
whiptail_idh_preferences() {
[ -n "$TESTING" ] && return
idh_preferences=$(whiptail --title "$whiptail_title" --radiolist \
"\nBy default, the IDH services selected in the previous screen will be bound to all interfaces and IP addresses on this system.\n\nIf you would like to prevent IDH services from being published on this system's management IP, you can select the option below." 20 75 5 \
"$MAINIP" "Disable IDH services on this management IP " OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_idh_services() {
[ -n "$TESTING" ] && return
IDH_SERVICES=$(whiptail --title "$whiptail_title" --radiolist \
"\nThe IDH node can mimic many different services.\n\nChoose one of the common options along with their default ports (TCP) or select the Custom option to build a customized set of services." 20 75 5 \
"Linux Webserver (NAS Skin)" "Apache (80), FTP (21), SSH (22)" ON \
"MySQL Server" "MySQL (3306), SSH (22)" OFF \
"MSSQL Server" "Microsoft SQL (1433), VNC (5900)" OFF \
"Custom" "Select a custom set of services" OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_idh_services_custom() {
[ -n "$TESTING" ] && return
IDH_SERVICES=$(whiptail --title "$whiptail_title" --checklist \
"\nThe IDH node can mimic many different services.\n\nChoose one or more of the following services along with their default ports. Some services have additional configuration options, please consult the documentation for further information." 25 75 8 \
"FTP" " TCP/21, Additional Configuration Available " OFF \
"Git" " TCP/9418 " OFF \
"HTTP" " TCP/80, Additional Configuration Available " OFF \
"HTTPPROXY" " TCP/8080, Additional Configuration Available " OFF \
"MSSQL" " TCP/1433 " OFF \
"MySQL" " TCP/3306, Additional Configuration Available " OFF \
"NTP" " UDP/123 " OFF \
"REDIS" " TCP/6379 " OFF \
"SNMP" " UDP/161 " OFF \
"SSH" " TCP/22, Additional Configuration Available " OFF \
"TELNET" " TCP/23, Additional Configuration Available " OFF \
"TFTP" " UDP/69 " OFF \
"VNC" " TCP/5900 " OFF 3>&1 1>&2 2>&3 )
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_install_type() { whiptail_install_type() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return