2.4 firewall changes

This commit is contained in:
m0duspwnens
2022-12-21 15:03:45 -05:00
27 changed files with 270 additions and 169 deletions

View File

@@ -1,12 +1,14 @@
{%- set DOCKERRANGE = salt['pillar.get']('docker:range', '172.17.0.0/24') %} {%- set DOCKERRANGE = salt['pillar.get']('docker:range', '172.17.0.0/24') %}
{%- set DOCKERBIND = salt['pillar.get']('docker:bip', '172.17.0.1/24') %} {%- set DOCKERBIND = salt['pillar.get']('docker:bip', '172.17.0.1/24') %}
{ {
"registry-mirrors": [ "https://:5000" ], "registry-mirrors": [
"bip": "{{ DOCKERBIND }}", "https://:5000"
"default-address-pools": [ ],
{ "bip": "{{ DOCKERBIND }}",
"base" : "{{ DOCKERRANGE }}", "default-address-pools": [
"size" : 24 {
} "base": "{{ DOCKERRANGE }}",
] "size": 24
}
]
} }

80
salt/docker/defaults.yaml Normal file
View File

@@ -0,0 +1,80 @@
docker:
bip: '172.17.0.1'
range: '172.17.0.0/24'
sosrange: '172.17.1.0/24'
sosbip: '172.17.1.1'
containers:
'so-dockerregistry':
final_octet: 20
ports:
5000: tcp
'so-elastic-fleet':
final_octet: 21
'so-elasticsearch':
final_octet: 22
ports:
9200: tcp
9300: tcp
'so-filebeat':
final_octet: 23
'so-grafana':
final_octet: 24
ports:
3000: tcp
'so-idstools':
final_octet: 25
'so-influxdb':
final_octet: 26
ports:
8086: tcp
'so-kibana':
final_octet: 27
ports:
5601: tcp
'so-kratos':
final_octet: 28
ports:
4433: tcp
4434: tcp
'so-logstash':
final_octet: 29
'so-mysql':
final_octet: 30
ports:
3306: tcp
'so-nginx':
final_octet: 31
ports:
80: tcp
443: tcp
'so-playbook':
final_octet: 32
'so-redis':
final_octet: 33
ports:
6379: tcp
9696: tcp
'so-soc':
final_octet: 34
ports:
9822: tcp
'so-soctopus':
final_octet: 35
ports:
7000: tcp
'so-strelka-backend':
final_octet: 36
'so-strelka-filestream':
final_octet: 37
'so-strelka-frontend':
final_octet: 38
'so-strelka-manager':
final_octet: 39
'so-strelka-gatekeeper':
final_octet: 40
'so-strelka-coordinator':
final_octet: 41
'so-elastalert':
final_octet: 42
'so-curator':
final_octet: 43

View File

@@ -0,0 +1,8 @@
{% import_yaml 'docker/defaults.yaml' as DOCKERDEFAULTS %}
{% set DOCKER = salt['pillar.get']('docker', DOCKERDEFAULTS.docker, merge=True) %}
{% set RANGESPLIT = DOCKER.sosrange.split('.') %}
{% set FIRSTTHREE = RANGESPLIT[0] ~ '.' ~ RANGESPLIT[1] ~ '.' ~ RANGESPLIT[2] ~ '.' %}
{% for container, vals in DOCKER.containers.items() %}
{% do DOCKER.containers[container].update({'ip': FIRSTTHREE ~ DOCKER.containers[container].final_octet}) %}
{% endfor %}

View File

@@ -3,6 +3,8 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'docker/docker.map.jinja' import DOCKER %}
dockergroup: dockergroup:
group.present: group.present:
- name: docker - name: docker
@@ -50,3 +52,13 @@ dockerreserveports:
- source: salt://common/files/99-reserved-ports.conf - source: salt://common/files/99-reserved-ports.conf
- name: /etc/sysctl.d/99-reserved-ports.conf - name: /etc/sysctl.d/99-reserved-ports.conf
sos_docker_net:
docker_network.present:
- name: sosnet
- subnet: {{ DOCKER.sosrange }}
- gateway: {{ DOCKER.sosbip }}
- options:
com.docker.network.bridge.name: 'sosbridge'
com.docker.network.driver.mtu: '1500'
com.docker.network.bridge.enable_ip_masquerade: 'true'
- unless: 'docker network ls | grep sosnet'

View File

@@ -4,6 +4,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
# These values are generated during node install and stored in minion pillar # These values are generated during node install and stored in minion pillar
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:server:es_token','') %} {% set SERVICETOKEN = salt['pillar.get']('elasticfleet:server:es_token','') %}
@@ -47,6 +48,9 @@ so-elastic-fleet:
- hostname: Fleet-{{ GLOBALS.hostname }} - hostname: Fleet-{{ GLOBALS.hostname }}
- detach: True - detach: True
- user: 947 - user: 947
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet'].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
- port_bindings: - port_bindings:

View File

@@ -10,6 +10,7 @@ include:
- ssl - ssl
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
{% set ROLES = salt['pillar.get']('elasticsearch:roles', {}) %} {% set ROLES = salt['pillar.get']('elasticsearch:roles', {}) %}
{% from 'elasticsearch/config.map.jinja' import ESCONFIG with context %} {% from 'elasticsearch/config.map.jinja' import ESCONFIG with context %}
@@ -289,6 +290,9 @@ so-elasticsearch:
- hostname: elasticsearch - hostname: elasticsearch
- name: so-elasticsearch - name: so-elasticsearch
- user: elasticsearch - user: elasticsearch
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
- extra_hosts: {{ REDIS_NODES }} - extra_hosts: {{ REDIS_NODES }}
- environment: - environment:
{% if REDIS_NODES | length == 1 %} {% if REDIS_NODES | length == 1 %}

View File

@@ -5,6 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %} {% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
{% from 'filebeat/modules.map.jinja' import MODULESENABLED with context %} {% from 'filebeat/modules.map.jinja' import MODULESENABLED with context %}
@@ -97,6 +98,9 @@ so-filebeat:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-filebeat:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-filebeat:{{ GLOBALS.so_version }}
- hostname: so-filebeat - hostname: so-filebeat
- user: root - user: root
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-filebeat'].ip }}
- extra_hosts: {{ FILEBEAT_EXTRA_HOSTS }} - extra_hosts: {{ FILEBEAT_EXTRA_HOSTS }}
- binds: - binds:
- /nsm:/nsm:ro - /nsm:/nsm:ro

View File

@@ -0,0 +1,13 @@
{% set NODE_CONTAINERS = [
'so-dockerregistry',
'so-elasticsearch',
'so-grafana',
'so-influxdb',
'so-kibana',
'so-kratos',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus'
] %}

View File

@@ -1,142 +1,23 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
# Firewall Magic for the grid
{% from 'firewall/map.jinja' import hostgroups with context %}
{% from 'firewall/map.jinja' import assigned_hostgroups with context %}
create_sysconfig_iptables: create_sysconfig_iptables:
file.touch: file.touch:
- name: /etc/sysconfig/iptables - name: /etc/sysconfig/iptables
- makedirs: True - makedirs: True
- unless: 'ls /etc/sysconfig/iptables' - unless: 'ls /etc/sysconfig/iptables'
# Quick Fix for Docker being difficult iptables_config:
iptables_fix_docker: file.managed:
iptables.chain_present: - name: /etc/sysconfig/iptables
- name: DOCKER-USER - source: salt://firewall/iptables.jinja
- table: filter - template: jinja
# Add the Forward Rule since Docker ripped it out iptables_restore:
iptables_fix_fwd: cmd.run:
iptables.insert: - name: iptables-restore < /etc/sysconfig/iptables
- table: filter - onchanges:
- chain: FORWARD - file: iptables_config
- jump: ACCEPT
- position: 1
- target: DOCKER-USER
# I like pings
iptables_allow_pings:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- proto: icmp
# Create the chain for logging
iptables_LOGGING_chain:
iptables.chain_present:
- name: LOGGING
- table: filter
- family: ipv4
iptables_LOGGING_limit:
iptables.append:
- table: filter
- chain: LOGGING
- match: limit
- jump: LOG
- limit: 2/min
- log-level: 4
- log-prefix: "IPTables-dropped: "
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_log_input_drops:
iptables.append:
- table: filter
- chain: INPUT
- jump: LOGGING
# Enable global DOCKER-USER block rule
enable_docker_user_fw_policy:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: LOGGING
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
{% set count = namespace(value=0) %}
{% for chain, hg in assigned_hostgroups.chain.items() %}
{% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
{% for action in ['insert', 'delete' ] %}
{% if hostgroups[hostgroup].ips[action] %}
{% for ip in hostgroups[hostgroup].ips[action] %}
{% for portgroup in portgroups.portgroups %}
{% for proto, ports in portgroup.items() %}
{% for port in ports %}
{% set count.value = count.value + 1 %}
{{action}}_{{chain}}_{{hostgroup}}_{{ip}}_{{port}}_{{proto}}_{{count.value}}:
iptables.{{action}}:
- table: filter
- chain: {{ chain }}
- jump: ACCEPT
- proto: {{ proto }}
- source: {{ ip }}
- dport: {{ port }}
{% if action == 'insert' %}
- position: 1
{% endif %}
{% endfor %}
{% endfor %}
{% endfor %}
{% endfor %}
{% endif %}
{% endfor %}
{% endfor %}
{% endfor %}
# Allow related/established sessions
iptables_allow_established:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- position: 1
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
enable_docker_user_established:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
# Block icmp timestamp response
block_icmp_timestamp_reply:
iptables.append:
- table: filter
- chain: OUTPUT
- jump: DROP
- proto: icmp
- icmp-type: 'timestamp-reply'
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_drop_all_the_things:
iptables.append:
- table: filter
- chain: LOGGING
- jump: DROP
- save: True
{% else %} {% else %}

View File

@@ -1,8 +1,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% set ADMINPASS = salt['pillar.get']('secrets:grafana_admin') %} {% set ADMINPASS = salt['pillar.get']('secrets:grafana_admin') %}
@@ -126,6 +125,9 @@ so-grafana:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-grafana:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-grafana:{{ GLOBALS.so_version }}
- hostname: grafana - hostname: grafana
- user: socore - user: socore
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-grafana'].ip }}
- binds: - binds:
- /nsm/grafana:/var/lib/grafana:rw - /nsm/grafana:/var/lib/grafana:rw
- /opt/so/conf/grafana/etc/grafana.ini:/etc/grafana/grafana.ini:ro - /opt/so/conf/grafana/etc/grafana.ini:/etc/grafana/grafana.ini:ro

View File

@@ -5,6 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% import_yaml 'docker/defaults.yaml' as DOCKERDEFAULTS %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set RESTRICTIDHSERVICES = salt['pillar.get']('idh:restrict_management_ip', False) %} {% set RESTRICTIDHSERVICES = salt['pillar.get']('idh:restrict_management_ip', False) %}

View File

@@ -4,6 +4,7 @@
# Elastic License 2.0. # Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set proxy = salt['pillar.get']('manager:proxy') %} {% set proxy = salt['pillar.get']('manager:proxy') %}
@@ -31,6 +32,9 @@ so-idstools:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-idstools:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-idstools:{{ GLOBALS.so_version }}
- hostname: so-idstools - hostname: so-idstools
- user: socore - user: socore
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-idstools'].ip }}
{% if proxy %} {% if proxy %}
- environment: - environment:
- http_proxy={{ proxy }} - http_proxy={{ proxy }}

View File

@@ -1,5 +1,6 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
@@ -47,6 +48,11 @@ so-influxdb:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-influxdb:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-influxdb:{{ GLOBALS.so_version }}
- hostname: influxdb - hostname: influxdb
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }}
- environment:
- INFLUXDB_HTTP_LOG_ENABLED=false
- binds: - binds:
- /opt/so/log/influxdb/:/log:rw - /opt/so/log/influxdb/:/log:rw
- /opt/so/conf/influxdb/etc/influxdb.conf:/etc/influxdb/influxdb.conf:ro - /opt/so/conf/influxdb/etc/influxdb.conf:/etc/influxdb/influxdb.conf:ro

View File

@@ -5,12 +5,10 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'kibana/defaults.yaml' as default_settings %} {% import_yaml 'kibana/defaults.yaml' as default_settings %}
{% set KIBANA_SETTINGS = salt['grains.filter_by'](default_settings, default='kibana', merge=salt['pillar.get']('kibana', {})) %} {% set KIBANA_SETTINGS = salt['grains.filter_by'](default_settings, default='kibana', merge=salt['pillar.get']('kibana', {})) %}
{% from 'kibana/config.map.jinja' import KIBANACONFIG with context %} {% from 'kibana/config.map.jinja' import KIBANACONFIG with context %}
# Add ES Group # Add ES Group
@@ -84,6 +82,9 @@ so-kibana:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kibana:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kibana:{{ GLOBALS.so_version }}
- hostname: kibana - hostname: kibana
- user: kibana - user: kibana
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-kibana'].ip }}
- environment: - environment:
- ELASTICSEARCH_HOST={{ GLOBALS.manager }} - ELASTICSEARCH_HOST={{ GLOBALS.manager }}
- ELASTICSEARCH_PORT=9200 - ELASTICSEARCH_PORT=9200

View File

@@ -5,6 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# Add Kratos Group # Add Kratos Group
@@ -67,6 +68,9 @@ so-kratos:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kratos:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kratos:{{ GLOBALS.so_version }}
- hostname: kratos - hostname: kratos
- name: so-kratos - name: so-kratos
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-kratos'].ip }}
- binds: - binds:
- /opt/so/conf/kratos/schema.json:/kratos-conf/schema.json:ro - /opt/so/conf/kratos/schema.json:/kratos-conf/schema.json:ro
- /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro - /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro

View File

@@ -6,19 +6,19 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'logstash/map.jinja' import REDIS_NODES with context %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'logstash/map.jinja' import REDIS_NODES with context %} # Logstash Section - Decide which pillar to use
{% from 'vars/globals.map.jinja' import GLOBALS %} {% set lsheap = salt['pillar.get']('logstash_settings:lsheap') %}
{% if GLOBALS.role in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
# Logstash Section - Decide which pillar to use
{% set lsheap = salt['pillar.get']('logstash_settings:lsheap') %}
{% if GLOBALS.role in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set nodetype = GLOBALS.role %} {% set nodetype = GLOBALS.role %}
{% endif %} {% endif %}
{% set PIPELINES = salt['pillar.get']('logstash:pipelines', {}) %} {% set PIPELINES = salt['pillar.get']('logstash:pipelines', {}) %}
{% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %} {% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
include: include:
- ssl - ssl
@@ -139,6 +139,9 @@ so-logstash:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }}
- hostname: so-logstash - hostname: so-logstash
- name: so-logstash - name: so-logstash
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-logstash'].ip }}
- user: logstash - user: logstash
- extra_hosts: {{ REDIS_NODES }} - extra_hosts: {{ REDIS_NODES }}
- environment: - environment:

View File

@@ -5,8 +5,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') %} {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
# MySQL Setup # MySQL Setup
@@ -84,6 +84,9 @@ so-mysql:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }}
- hostname: so-mysql - hostname: so-mysql
- user: socore - user: socore
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-mysql'].ip }}
- port_bindings: - port_bindings:
- 0.0.0.0:3306:3306 - 0.0.0.0:3306:3306
- environment: - environment:

View File

@@ -1,6 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- ssl - ssl
@@ -83,6 +84,9 @@ so-nginx:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }}
- hostname: so-nginx - hostname: so-nginx
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-nginx'].ip }}
- binds: - binds:
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro - /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/so/log/nginx/:/var/log/nginx:rw - /opt/so/log/nginx/:/var/log/nginx:rw

View File

@@ -5,8 +5,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') -%} {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') -%}
{%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') -%} {%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') -%}
@@ -80,6 +80,9 @@ so-playbook:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-playbook:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-playbook:{{ GLOBALS.so_version }}
- hostname: playbook - hostname: playbook
- name: so-playbook - name: so-playbook
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-playbook'].ip }}
- binds: - binds:
- /opt/so/log/playbook:/playbook/log:rw - /opt/so/log/playbook:/playbook/log:rw
- environment: - environment:

View File

@@ -5,7 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
@@ -46,6 +46,9 @@ so-redis:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- hostname: so-redis - hostname: so-redis
- user: socore - user: socore
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-redis'].ip }}
- port_bindings: - port_bindings:
- 0.0.0.0:6379:6379 - 0.0.0.0:6379:6379
- 0.0.0.0:9696:9696 - 0.0.0.0:9696:9696

View File

@@ -1,5 +1,6 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- ssl - ssl
@@ -37,6 +38,9 @@ so-dockerregistry:
docker_container.running: docker_container.running:
- image: ghcr.io/security-onion-solutions/registry:latest - image: ghcr.io/security-onion-solutions/registry:latest
- hostname: so-registry - hostname: so-registry
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-dockerregistry'].ip }}
- restart_policy: always - restart_policy: always
- port_bindings: - port_bindings:
- 0.0.0.0:5000:5000 - 0.0.0.0:5000:5000

View File

@@ -2,6 +2,7 @@
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- manager.sync_es_users - manager.sync_es_users
@@ -95,6 +96,9 @@ so-soc:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }}
- hostname: soc - hostname: soc
- name: so-soc - name: so-soc
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-soc'].ip }}
- binds: - binds:
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw - /opt/so/log/soc/:/opt/sensoroni/logs/:rw

View File

@@ -1,6 +1,6 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
@@ -63,6 +63,9 @@ so-soctopus:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soctopus:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soctopus:{{ GLOBALS.so_version }}
- hostname: soctopus - hostname: soctopus
- name: so-soctopus - name: so-soctopus
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-soctopus'].ip }}
- binds: - binds:
- /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro - /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro
- /opt/so/log/soctopus/:/var/log/SOCtopus/:rw - /opt/so/log/soctopus/:/var/log/SOCtopus/:rw

View File

@@ -5,7 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %} {% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %}
{% import_yaml 'strelka/defaults.yaml' as strelka_config with context %} {% import_yaml 'strelka/defaults.yaml' as strelka_config with context %}
@@ -168,6 +168,9 @@ strelka_coordinator:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- name: so-strelka-coordinator - name: so-strelka-coordinator
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-strelka-coordinator'].ip }}
- entrypoint: redis-server --save "" --appendonly no - entrypoint: redis-server --save "" --appendonly no
- port_bindings: - port_bindings:
- 0.0.0.0:6380:6379 - 0.0.0.0:6380:6379
@@ -181,6 +184,9 @@ strelka_gatekeeper:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- name: so-strelka-gatekeeper - name: so-strelka-gatekeeper
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-strelka-gatekeeper'].ip }}
- entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru - entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru
- port_bindings: - port_bindings:
- 0.0.0.0:6381:6379 - 0.0.0.0:6381:6379
@@ -198,6 +204,9 @@ strelka_frontend:
- /nsm/strelka/log/:/var/log/strelka/:rw - /nsm/strelka/log/:/var/log/strelka/:rw
- privileged: True - privileged: True
- name: so-strelka-frontend - name: so-strelka-frontend
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-strelka-frontend'].ip }}
- command: strelka-frontend - command: strelka-frontend
- port_bindings: - port_bindings:
- 0.0.0.0:57314:57314 - 0.0.0.0:57314:57314
@@ -214,6 +223,9 @@ strelka_backend:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro - /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/rules/:/etc/yara/:ro - /opt/so/conf/strelka/rules/:/etc/yara/:ro
- name: so-strelka-backend - name: so-strelka-backend
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-strelka-backend'].ip }}
- command: strelka-backend - command: strelka-backend
- restart_policy: on-failure - restart_policy: on-failure
@@ -228,6 +240,9 @@ strelka_manager:
- binds: - binds:
- /opt/so/conf/strelka/manager/:/etc/strelka/:ro - /opt/so/conf/strelka/manager/:/etc/strelka/:ro
- name: so-strelka-manager - name: so-strelka-manager
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-strelka-manager'].ip }}
- command: strelka-manager - command: strelka-manager
append_so-strelka-manager_so-status.conf: append_so-strelka-manager_so-status.conf:
@@ -242,6 +257,9 @@ strelka_filestream:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro - /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka - /nsm/strelka:/nsm/strelka
- name: so-strelka-filestream - name: so-strelka-filestream
- networks:
- sosnet:
- ipv4_address: {{ DOCKER.containers['so-strelka-filestream'].ip }}
- command: strelka-filestream - command: strelka-filestream
append_so-strelka-filestream_so-status.conf: append_so-strelka-filestream_so-status.conf:

View File

@@ -254,11 +254,16 @@ collect_dns_domain() {
collect_dockernet() { collect_dockernet() {
if ! whiptail_dockernet_check; then if ! whiptail_dockernet_check; then
whiptail_dockernet_net "172.17.0.0" whiptail_dockernet_sosnet "172.17.1.0"
whiptail_dockernet_nososnet "172.17.0.0"
while ! valid_ip4 "$DOCKERNET"; do while ! valid_ip4 "$DOCKERNET"; do
whiptail_invalid_input whiptail_invalid_input
whiptail_dockernet_net "$DOCKERNET" whiptail_dockernet_nonsosnet "$DOCKERNET"
done
while ! valid_ip4 "$DOCKERNET2"; do
whiptail_invalid_input
whiptail_dockernet_sosnet "$DOCKERNET2"
done done
fi fi
} }
@@ -996,6 +1001,9 @@ docker_registry() {
if [ -z "$DOCKERNET" ]; then if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0 DOCKERNET=172.17.0.0
fi fi
if [ -z "$DOCKERNET2" ]; then
DOCKERNET2=172.17.1.0
fi
# Make the host use the manager docker registry # Make the host use the manager docker registry
DNETBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 DNETBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi
@@ -1402,9 +1410,12 @@ create_global() {
if [ -z "$DOCKERNET" ]; then if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0 DOCKERNET=172.17.0.0
DOCKERNET2=172.17.1.0
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
DOCKER2BIP=$(echo $DOCKERNET2 | awk -F'.' '{print $1,$2,$3,1}' OFS='.')
else else
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
DOCKER2BIP=$(echo $DOCKERNET2 | awk -F'.' '{print $1,$2,$3,1}' OFS='.')
fi fi
if [ -f "$global_pillar_file" ]; then if [ -f "$global_pillar_file" ]; then
@@ -1488,6 +1499,8 @@ docker_pillar() {
touch $adv_docker_pillar_file touch $adv_docker_pillar_file
printf '%s\n'\ printf '%s\n'\
"docker:"\ "docker:"\
" sosrange: '$DOCKERNET2/24'"\
" sosbip: '$DOCKER2BIP'"\
" range: '$DOCKERNET/24'"\ " range: '$DOCKERNET/24'"\
" bip: '$DOCKERBIP'" > $docker_pillar_file " bip: '$DOCKERBIP'" > $docker_pillar_file
} }
@@ -1910,14 +1923,6 @@ securityonion_repo() {
repo_sync_local() { repo_sync_local() {
# Sync the repo from the the SO repo locally. # Sync the repo from the the SO repo locally.
# Check for reposync # Check for reposync
REPOSYNC=$(rpm -qa | grep createrepo | wc -l)
if [[ ! "$REPOSYNC" -gt 0 ]]; then
# Install reposync
info "Installing createrepo"
logCmd "yum -y install yum-utils createrepo"
else
info "We have what we need to sync"
fi
info "Backing up old repos" info "Backing up old repos"
mkdir -p /nsm/repo mkdir -p /nsm/repo
mkdir -p /root/reposync_cache mkdir -p /root/reposync_cache
@@ -1941,6 +1946,15 @@ repo_sync_local() {
echo "gpgcheck=1" >> /root/repodownload.conf echo "gpgcheck=1" >> /root/repodownload.conf
echo "gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/securityonion.pub" >> /root/repodownload.conf echo "gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/securityonion.pub" >> /root/repodownload.conf
REPOSYNC=$(rpm -qa | grep createrepo | wc -l)
if [[ ! "$REPOSYNC" -gt 0 ]]; then
# Install reposync
info "Installing createrepo"
logCmd "yum -y install -c /root/repodownload.conf yum-utils createrepo"
else
info "We have what we need to sync"
fi
logCmd "reposync --norepopath -n -g -l -d -m -c /root/repodownload.conf -r securityonionsync --download-metadata -p /nsm/repo/" logCmd "reposync --norepopath -n -g -l -d -m -c /root/repodownload.conf -r securityonionsync --download-metadata -p /nsm/repo/"

View File

@@ -317,6 +317,7 @@ if ! [[ -f $install_opt_file ]]; then
check_elastic_license check_elastic_license
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_dockernet
whiptail_airgap whiptail_airgap
detect_cloud detect_cloud
set_minion_info set_minion_info
@@ -336,6 +337,7 @@ if ! [[ -f $install_opt_file ]]; then
check_elastic_license check_elastic_license
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_dockernet
whiptail_airgap whiptail_airgap
detect_cloud detect_cloud
set_minion_info set_minion_info
@@ -354,6 +356,7 @@ if ! [[ -f $install_opt_file ]]; then
waitforstate=true waitforstate=true
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_dockernet
whiptail_airgap whiptail_airgap
detect_cloud detect_cloud
set_default_log_size >> $setup_log 2>&1 set_default_log_size >> $setup_log 2>&1
@@ -370,6 +373,7 @@ if ! [[ -f $install_opt_file ]]; then
waitforstate=true waitforstate=true
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_dockernet
whiptail_airgap whiptail_airgap
detect_cloud detect_cloud
set_default_log_size >> $setup_log 2>&1 set_default_log_size >> $setup_log 2>&1

View File

@@ -325,12 +325,24 @@ whiptail_dockernet_check(){
} }
whiptail_dockernet_net() { whiptail_dockernet_sosnet() {
[ -n "$TESTING" ] && return
DOCKERNET2=$(whiptail --title "$whiptail_title" --inputbox \
"\nEnter a /24 size network range for SOS containers to use WITHOUT the /24 suffix. This range will be used on ALL nodes." 11 65 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_dockernet_nososnet() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
DOCKERNET=$(whiptail --title "$whiptail_title" --inputbox \ DOCKERNET=$(whiptail --title "$whiptail_title" --inputbox \
"\nEnter a /24 size network range for docker to use WITHOUT the /24 suffix. This range will be used on ALL nodes." 11 65 "$1" 3>&1 1>&2 2>&3) "\nEnter a /24 size network range for NON SOS containers to use WITHOUT the /24 suffix. This range will be used on ALL nodes." 11 65 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus