Merge pull request #9458 from Security-Onion-Solutions/2.4/firewall

2.4/firewall
This commit is contained in:
Mike Reeves
2023-01-11 08:49:15 -05:00
committed by GitHub
37 changed files with 423 additions and 192 deletions

View File

@@ -1,7 +1,9 @@
{%- set DOCKERRANGE = salt['pillar.get']('docker:range', '172.17.0.0/24') %} {%- set DOCKERRANGE = salt['pillar.get']('docker:range', '172.17.0.0/24') %}
{%- set DOCKERBIND = salt['pillar.get']('docker:bip', '172.17.0.1/24') %} {%- set DOCKERBIND = salt['pillar.get']('docker:bip', '172.17.0.1/24') %}
{ {
"registry-mirrors": [ "https://:5000" ], "registry-mirrors": [
"https://:5000"
],
"bip": "{{ DOCKERBIND }}", "bip": "{{ DOCKERBIND }}",
"default-address-pools": [ "default-address-pools": [
{ {

View File

@@ -49,33 +49,30 @@ fi
case "$ROLE" in case "$ROLE" in
'MANAGER') 'MANAGER')
so-firewall includehost manager "$IP" so-firewall --role=manager --ip="$IP"
so-firewall --apply includehost minion "$IP"
;; ;;
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
so-firewall includehost manager "$IP" so-firewall --role=manager --ip="$IP"
so-firewall includehost minion "$IP" so-firewall --role=sensors --ip="$IP"
so-firewall includehost sensor "$IP" so-firewall --apply --role=searchnodes --ip="$IP"
so-firewall --apply includehost search_node "$IP"
;; ;;
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'IDH' | 'RECEIVER') 'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'IDH' | 'RECEIVER')
so-firewall includehost minion "$IP"
case "$ROLE" in case "$ROLE" in
'SENSOR') 'SENSOR')
so-firewall --apply includehost sensor "$IP" so-firewall --apply --role=sensors --ip="$IP"
;; ;;
'SEARCHNODE') 'SEARCHNODE')
so-firewall --apply includehost search_node "$IP" so-firewall --apply --role=searchnodes --ip="$IP"
;; ;;
'HEAVYNODE') 'HEAVYNODE')
so-firewall includehost sensor "$IP" so-firewall --role=sensors --ip="$IP"
so-firewall --apply includehost heavy_node "$IP" so-firewall --apply --role=heavynodes --ip="$IP"
;; ;;
'IDH') 'IDH')
so-firewall --apply includehost beats_endpoint_ssl "$IP" so-firewall --apply --role=beats_endpoint_ssl --ip="$IP"
;; ;;
'RECEIVER') 'RECEIVER')
so-firewall --apply includehost receiver "$IP" so-firewall --apply --role=receivers --ip="$IP"
;; ;;
esac esac
;; ;;

View File

@@ -206,7 +206,7 @@ function createSTANDALONE() {
} }
function testConnection() { function testConnection() {
retry 15 3 "salt '$MINION_ID' test.ping" 0 retry 15 3 "salt '$MINION_ID' test.ping" True
local ret=$? local ret=$?
if [[ $ret != 0 ]]; then if [[ $ret != 0 ]]; then
echo "The Minion has been accepted but is not online. Try again later" echo "The Minion has been accepted but is not online. Try again later"

View File

@@ -6,6 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from "curator/map.jinja" import CURATOROPTIONS %} {% from "curator/map.jinja" import CURATOROPTIONS %}
{% from "curator/map.jinja" import CURATORMERGED %} {% from "curator/map.jinja" import CURATORMERGED %}
{% set REMOVECURATORCRON = False %} {% set REMOVECURATORCRON = False %}
@@ -128,6 +129,9 @@ so-curator:
- hostname: curator - hostname: curator
- name: so-curator - name: so-curator
- user: curator - user: curator
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-curator'].ip }}
- interactive: True - interactive: True
- tty: True - tty: True
- binds: - binds:

94
salt/docker/defaults.yaml Normal file
View File

@@ -0,0 +1,94 @@
docker:
bip: '172.17.0.1'
range: '172.17.0.0/24'
sosrange: '172.17.1.0/24'
sosbip: '172.17.1.1'
containers:
'so-dockerregistry':
final_octet: 20
ports:
5000: tcp
'so-elastic-fleet':
final_octet: 21
'so-elasticsearch':
final_octet: 22
ports:
9200: tcp
9300: tcp
'so-filebeat':
final_octet: 23
'so-grafana':
final_octet: 24
ports:
3000: tcp
'so-idstools':
final_octet: 25
'so-influxdb':
final_octet: 26
ports:
8086: tcp
'so-kibana':
final_octet: 27
ports:
5601: tcp
'so-kratos':
final_octet: 28
ports:
4433: tcp
4434: tcp
'so-logstash':
final_octet: 29
ports:
3765: tcp
5044: tcp
5055: tcp
5644: tcp
6050: tcp
6051: tcp
6052: tcp
6053: tcp
9600: tcp
'so-mysql':
final_octet: 30
ports:
3306: tcp
'so-nginx':
final_octet: 31
ports:
80: tcp
443: tcp
'so-playbook':
final_octet: 32
'so-redis':
final_octet: 33
ports:
6379: tcp
9696: tcp
'so-soc':
final_octet: 34
ports:
9822: tcp
'so-soctopus':
final_octet: 35
ports:
7000: tcp
'so-strelka-backend':
final_octet: 36
'so-strelka-filestream':
final_octet: 37
'so-strelka-frontend':
final_octet: 38
'so-strelka-manager':
final_octet: 39
'so-strelka-gatekeeper':
final_octet: 40
'so-strelka-coordinator':
final_octet: 41
'so-elastalert':
final_octet: 42
'so-curator':
final_octet: 43
'so-elastic-fleet-package-registry':
final_octet: 44
ports:
8080: tcp

View File

@@ -0,0 +1,8 @@
{% import_yaml 'docker/defaults.yaml' as DOCKERDEFAULTS %}
{% set DOCKER = salt['pillar.get']('docker', DOCKERDEFAULTS.docker, merge=True) %}
{% set RANGESPLIT = DOCKER.sosrange.split('.') %}
{% set FIRSTTHREE = RANGESPLIT[0] ~ '.' ~ RANGESPLIT[1] ~ '.' ~ RANGESPLIT[2] ~ '.' %}
{% for container, vals in DOCKER.containers.items() %}
{% do DOCKER.containers[container].update({'ip': FIRSTTHREE ~ DOCKER.containers[container].final_octet}) %}
{% endfor %}

View File

@@ -0,0 +1,3 @@
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --iptables=false

View File

@@ -3,6 +3,8 @@
# https://securityonion.net/license; you may not use this file except in compliance with the # https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0. # Elastic License 2.0.
{% from 'docker/docker.map.jinja' import DOCKER %}
dockergroup: dockergroup:
group.present: group.present:
- name: docker - name: docker
@@ -18,6 +20,17 @@ dockerheldpackages:
- hold: True - hold: True
- update_holds: True - update_holds: True
#disable docker from managing iptables
iptables_disabled:
file.managed:
- name: /etc/systemd/system/docker.service.d/iptables-disabled.conf
- source: salt://docker/files/iptables-disabled.conf
- makedirs: True
cmd.run:
- name: systemctl daemon-reload
- onchanges:
- file: iptables_disabled
# Make sure etc/docker exists # Make sure etc/docker exists
dockeretc: dockeretc:
file.directory: file.directory:
@@ -50,3 +63,15 @@ dockerreserveports:
- source: salt://common/files/99-reserved-ports.conf - source: salt://common/files/99-reserved-ports.conf
- name: /etc/sysctl.d/99-reserved-ports.conf - name: /etc/sysctl.d/99-reserved-ports.conf
sos_docker_net:
docker_network.present:
- name: sosbridge
- subnet: {{ DOCKER.sosrange }}
- gateway: {{ DOCKER.sosbip }}
- options:
com.docker.network.bridge.name: 'sosbridge'
com.docker.network.driver.mtu: '1500'
com.docker.network.bridge.enable_ip_masquerade: 'true'
com.docker.network.bridge.enable_icc: 'true'
com.docker.network.bridge.host_binding_ipv4: '0.0.0.0'
- unless: 'docker network ls | grep sosbridge'

View File

@@ -5,6 +5,7 @@
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'elastalert/elastalert_config.map.jinja' import ELASTALERT as elastalert_config with context %} {% from 'elastalert/elastalert_config.map.jinja' import ELASTALERT as elastalert_config with context %}
# Create the group # Create the group
@@ -86,6 +87,9 @@ so-elastalert:
- hostname: elastalert - hostname: elastalert
- name: so-elastalert - name: so-elastalert
- user: so-elastalert - user: so-elastalert
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-elastalert'].ip }}
- detach: True - detach: True
- binds: - binds:
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro - /opt/so/rules/elastalert:/opt/elastalert/rules/:ro

View File

@@ -4,6 +4,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
# Add Group # Add Group
elasticsagentprgroup: elasticsagentprgroup:
@@ -27,6 +28,9 @@ so-elastic-fleet-package-registry:
- hostname: Fleet-package-reg-{{ GLOBALS.hostname }} - hostname: Fleet-package-reg-{{ GLOBALS.hostname }}
- detach: True - detach: True
- user: 948 - user: 948
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
- port_bindings: - port_bindings:

View File

@@ -4,6 +4,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
# These values are generated during node install and stored in minion pillar # These values are generated during node install and stored in minion pillar
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:server:es_token','') %} {% set SERVICETOKEN = salt['pillar.get']('elasticfleet:server:es_token','') %}
@@ -47,6 +48,9 @@ so-elastic-fleet:
- hostname: Fleet-{{ GLOBALS.hostname }} - hostname: Fleet-{{ GLOBALS.hostname }}
- detach: True - detach: True
- user: 947 - user: 947
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet'].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
- port_bindings: - port_bindings:

View File

@@ -10,6 +10,7 @@ include:
- ssl - ssl
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %} {% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
{% set ROLES = salt['pillar.get']('elasticsearch:roles', {}) %} {% set ROLES = salt['pillar.get']('elasticsearch:roles', {}) %}
{% from 'elasticsearch/config.map.jinja' import ESCONFIG with context %} {% from 'elasticsearch/config.map.jinja' import ESCONFIG with context %}
@@ -289,6 +290,9 @@ so-elasticsearch:
- hostname: elasticsearch - hostname: elasticsearch
- name: so-elasticsearch - name: so-elasticsearch
- user: elasticsearch - user: elasticsearch
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
- extra_hosts: {{ REDIS_NODES }} - extra_hosts: {{ REDIS_NODES }}
- environment: - environment:
{% if REDIS_NODES | length == 1 %} {% if REDIS_NODES | length == 1 %}

View File

@@ -5,6 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %} {% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
{% from 'filebeat/modules.map.jinja' import MODULESENABLED with context %} {% from 'filebeat/modules.map.jinja' import MODULESENABLED with context %}
@@ -97,6 +98,9 @@ so-filebeat:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-filebeat:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-filebeat:{{ GLOBALS.so_version }}
- hostname: so-filebeat - hostname: so-filebeat
- user: root - user: root
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-filebeat'].ip }}
- extra_hosts: {{ FILEBEAT_EXTRA_HOSTS }} - extra_hosts: {{ FILEBEAT_EXTRA_HOSTS }}
- binds: - binds:
- /nsm:/nsm:ro - /nsm:/nsm:ro

View File

@@ -0,0 +1,16 @@
{% set NODE_CONTAINERS = [
'so-curator',
'so-dockerregistry',
'so-elasticsearch',
'so-elastic-fleet-package-registry',
'so-grafana',
'so-influxdb',
'so-kibana',
'so-kratos',
'so-logstash',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus'
] %}

View File

@@ -1,4 +1,4 @@
{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %} {% from 'docker/docker.map.jinja' import DOCKER %}
firewall: firewall:
hostgroups: hostgroups:
anywhere: anywhere:
@@ -10,7 +10,7 @@ firewall:
ips: ips:
delete: delete:
insert: insert:
- {{ DNET }}/24 - {{ DOCKER.sosrange }}
localhost: localhost:
ips: ips:
delete: delete:

View File

@@ -1,142 +1,21 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
# Firewall Magic for the grid
{% from 'firewall/map.jinja' import hostgroups with context %}
{% from 'firewall/map.jinja' import assigned_hostgroups with context %}
create_sysconfig_iptables: create_sysconfig_iptables:
file.touch: file.touch:
- name: /etc/sysconfig/iptables - name: /etc/sysconfig/iptables
- makedirs: True - makedirs: True
- unless: 'ls /etc/sysconfig/iptables' - unless: 'ls /etc/sysconfig/iptables'
# Quick Fix for Docker being difficult iptables_config:
iptables_fix_docker: file.managed:
iptables.chain_present: - name: /etc/sysconfig/iptables
- name: DOCKER-USER - source: salt://firewall/iptables.jinja
- table: filter - template: jinja
# Add the Forward Rule since Docker ripped it out iptables_restore:
iptables_fix_fwd: cmd.run:
iptables.insert: - name: iptables-restore < /etc/sysconfig/iptables
- table: filter
- chain: FORWARD
- jump: ACCEPT
- position: 1
- target: DOCKER-USER
# I like pings
iptables_allow_pings:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- proto: icmp
# Create the chain for logging
iptables_LOGGING_chain:
iptables.chain_present:
- name: LOGGING
- table: filter
- family: ipv4
iptables_LOGGING_limit:
iptables.append:
- table: filter
- chain: LOGGING
- match: limit
- jump: LOG
- limit: 2/min
- log-level: 4
- log-prefix: "IPTables-dropped: "
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_log_input_drops:
iptables.append:
- table: filter
- chain: INPUT
- jump: LOGGING
# Enable global DOCKER-USER block rule
enable_docker_user_fw_policy:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: LOGGING
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
{% set count = namespace(value=0) %}
{% for chain, hg in assigned_hostgroups.chain.items() %}
{% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
{% for action in ['insert', 'delete' ] %}
{% if hostgroups[hostgroup].ips[action] %}
{% for ip in hostgroups[hostgroup].ips[action] %}
{% for portgroup in portgroups.portgroups %}
{% for proto, ports in portgroup.items() %}
{% for port in ports %}
{% set count.value = count.value + 1 %}
{{action}}_{{chain}}_{{hostgroup}}_{{ip}}_{{port}}_{{proto}}_{{count.value}}:
iptables.{{action}}:
- table: filter
- chain: {{ chain }}
- jump: ACCEPT
- proto: {{ proto }}
- source: {{ ip }}
- dport: {{ port }}
{% if action == 'insert' %}
- position: 1
{% endif %}
{% endfor %}
{% endfor %}
{% endfor %}
{% endfor %}
{% endif %}
{% endfor %}
{% endfor %}
{% endfor %}
# Allow related/established sessions
iptables_allow_established:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- position: 1
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
enable_docker_user_established:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
# Block icmp timestamp response
block_icmp_timestamp_reply:
iptables.append:
- table: filter
- chain: OUTPUT
- jump: DROP
- proto: icmp
- icmp-type: 'timestamp-reply'
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_drop_all_the_things:
iptables.append:
- table: filter
- chain: LOGGING
- jump: DROP
- save: True
{% else %} {% else %}

View File

@@ -0,0 +1,90 @@
{% from 'docker/docker.map.jinja' import DOCKER -%}
{% from 'firewall/containers.map.jinja' import NODE_CONTAINERS -%}
{% from 'firewall/map.jinja' import hostgroups with context -%}
{% from 'firewall/map.jinja' import assigned_hostgroups with context -%}
*nat
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:DOCKER - [0:0]
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -s {{DOCKER.sosrange}} ! -o sosbridge -j MASQUERADE
{%- for container in NODE_CONTAINERS %}
{%- if DOCKER.containers[container].ports is defined %}
{%- for port, proto in DOCKER.containers[container].ports.items() %}
-A POSTROUTING -s {{DOCKER.containers[container].ip}}/32 -d {{DOCKER.containers[container].ip}}/32 -p {{proto}} -m {{proto}} --dport {{port}} -j MASQUERADE
{%- endfor %}
{%- endif %}
{%- endfor %}
-A DOCKER -i sosbridge -j RETURN
{%- for container in NODE_CONTAINERS %}
{%- if DOCKER.containers[container].ports is defined %}
{%- for port, proto in DOCKER.containers[container].ports.items() %}
-A DOCKER ! -i sosbridge -p {{proto}} -m {{proto}} --dport {{port}} -j DNAT --to-destination {{DOCKER.containers[container].ip}}:{{port}}
{%- endfor %}
{%- endif %}
{%- endfor %}
COMMIT
*filter
:INPUT ACCEPT [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
:DOCKER - [0:0]
:DOCKER-ISOLATION-STAGE-1 - [0:0]
:DOCKER-ISOLATION-STAGE-2 - [0:0]
:DOCKER-USER - [0:0]
:LOGGING - [0:0]
{%- set count = namespace(value=0) %}
{%- for chain, hg in assigned_hostgroups.chain.items() %}
{%- for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
{%- for action in ['insert', 'delete' ] %}
{%- if hostgroups[hostgroup].ips[action] %}
{%- for ip in hostgroups[hostgroup].ips[action] %}
{%- for portgroup in portgroups.portgroups %}
{%- for proto, ports in portgroup.items() %}
{%- for port in ports %}
{%- set count.value = count.value + 1 %}
-A {{chain}} -s {{ip}} -p {{proto}} -m {{proto}} --dport {{port}} -j ACCEPT
{%- endfor %}
{%- endfor %}
{%- endfor %}
{%- endfor %}
{%- endif %}
{%- endfor %}
{%- endfor %}
{%- endfor %}
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A INPUT -m conntrack --ctstate INVALID -j DROP
-A INPUT -p icmp -j ACCEPT
-A INPUT -j LOGGING
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o sosbridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o sosbridge -j DOCKER
-A FORWARD -i sosbridge ! -o sosbridge -j ACCEPT
-A FORWARD -i sosbridge -o sosbridge -j ACCEPT
-A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A OUTPUT -p icmp -m icmp --icmp-type 14 -j DROP
{%- for container in NODE_CONTAINERS %}
{%- if DOCKER.containers[container].ports is defined %}
{%- for port, proto in DOCKER.containers[container].ports.items() %}
-A DOCKER -d {{DOCKER.containers[container].ip}}/32 ! -i sosbridge -o sosbridge -p {{proto}} -m {{proto}} --dport {{port}} -j ACCEPT
{%- endfor %}
{%- endif %}
{%- endfor %}
-A DOCKER-ISOLATION-STAGE-1 -i sosbridge ! -o sosbridge -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o sosbridge -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER ! -i sosbridge -o sosbridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A DOCKER-USER -j RETURN
-A LOGGING -m limit --limit 2/min -j LOG --log-prefix "IPTables-dropped: "
-A LOGGING -j DROP
COMMIT

View File

@@ -1,8 +1,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% set ADMINPASS = salt['pillar.get']('secrets:grafana_admin') %} {% set ADMINPASS = salt['pillar.get']('secrets:grafana_admin') %}
@@ -126,6 +125,9 @@ so-grafana:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-grafana:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-grafana:{{ GLOBALS.so_version }}
- hostname: grafana - hostname: grafana
- user: socore - user: socore
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-grafana'].ip }}
- binds: - binds:
- /nsm/grafana:/var/lib/grafana:rw - /nsm/grafana:/var/lib/grafana:rw
- /opt/so/conf/grafana/etc/grafana.ini:/etc/grafana/grafana.ini:ro - /opt/so/conf/grafana/etc/grafana.ini:/etc/grafana/grafana.ini:ro

View File

@@ -5,6 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% import_yaml 'docker/defaults.yaml' as DOCKERDEFAULTS %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set RESTRICTIDHSERVICES = salt['pillar.get']('idh:restrict_management_ip', False) %} {% set RESTRICTIDHSERVICES = salt['pillar.get']('idh:restrict_management_ip', False) %}

View File

@@ -4,6 +4,7 @@
# Elastic License 2.0. # Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set proxy = salt['pillar.get']('manager:proxy') %} {% set proxy = salt['pillar.get']('manager:proxy') %}
@@ -31,6 +32,9 @@ so-idstools:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-idstools:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-idstools:{{ GLOBALS.so_version }}
- hostname: so-idstools - hostname: so-idstools
- user: socore - user: socore
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-idstools'].ip }}
{% if proxy %} {% if proxy %}
- environment: - environment:
- http_proxy={{ proxy }} - http_proxy={{ proxy }}

View File

@@ -1,5 +1,6 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} {% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
@@ -47,6 +48,11 @@ so-influxdb:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-influxdb:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-influxdb:{{ GLOBALS.so_version }}
- hostname: influxdb - hostname: influxdb
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }}
- environment:
- INFLUXDB_HTTP_LOG_ENABLED=false
- binds: - binds:
- /opt/so/log/influxdb/:/log:rw - /opt/so/log/influxdb/:/log:rw
- /opt/so/conf/influxdb/etc/influxdb.conf:/etc/influxdb/influxdb.conf:ro - /opt/so/conf/influxdb/etc/influxdb.conf:/etc/influxdb/influxdb.conf:ro

View File

@@ -5,12 +5,10 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'kibana/defaults.yaml' as default_settings %} {% import_yaml 'kibana/defaults.yaml' as default_settings %}
{% set KIBANA_SETTINGS = salt['grains.filter_by'](default_settings, default='kibana', merge=salt['pillar.get']('kibana', {})) %} {% set KIBANA_SETTINGS = salt['grains.filter_by'](default_settings, default='kibana', merge=salt['pillar.get']('kibana', {})) %}
{% from 'kibana/config.map.jinja' import KIBANACONFIG with context %} {% from 'kibana/config.map.jinja' import KIBANACONFIG with context %}
# Add ES Group # Add ES Group
@@ -84,6 +82,9 @@ so-kibana:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kibana:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kibana:{{ GLOBALS.so_version }}
- hostname: kibana - hostname: kibana
- user: kibana - user: kibana
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-kibana'].ip }}
- environment: - environment:
- ELASTICSEARCH_HOST={{ GLOBALS.manager }} - ELASTICSEARCH_HOST={{ GLOBALS.manager }}
- ELASTICSEARCH_PORT=9200 - ELASTICSEARCH_PORT=9200

View File

@@ -5,6 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
# Add Kratos Group # Add Kratos Group
@@ -67,6 +68,9 @@ so-kratos:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kratos:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kratos:{{ GLOBALS.so_version }}
- hostname: kratos - hostname: kratos
- name: so-kratos - name: so-kratos
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-kratos'].ip }}
- binds: - binds:
- /opt/so/conf/kratos/schema.json:/kratos-conf/schema.json:ro - /opt/so/conf/kratos/schema.json:/kratos-conf/schema.json:ro
- /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro - /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro

View File

@@ -6,7 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'logstash/map.jinja' import REDIS_NODES with context %} {% from 'logstash/map.jinja' import REDIS_NODES with context %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
@@ -139,6 +139,9 @@ so-logstash:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }}
- hostname: so-logstash - hostname: so-logstash
- name: so-logstash - name: so-logstash
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-logstash'].ip }}
- user: logstash - user: logstash
- extra_hosts: {{ REDIS_NODES }} - extra_hosts: {{ REDIS_NODES }}
- environment: - environment:

View File

@@ -5,8 +5,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') %} {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
# MySQL Setup # MySQL Setup
@@ -84,6 +84,9 @@ so-mysql:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }}
- hostname: so-mysql - hostname: so-mysql
- user: socore - user: socore
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-mysql'].ip }}
- port_bindings: - port_bindings:
- 0.0.0.0:3306:3306 - 0.0.0.0:3306:3306
- environment: - environment:

View File

@@ -1,6 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- ssl - ssl
@@ -83,6 +84,9 @@ so-nginx:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }}
- hostname: so-nginx - hostname: so-nginx
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-nginx'].ip }}
- binds: - binds:
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro - /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/so/log/nginx/:/var/log/nginx:rw - /opt/so/log/nginx/:/var/log/nginx:rw

View File

@@ -5,8 +5,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') -%} {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') -%}
{%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') -%} {%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') -%}
@@ -18,7 +18,7 @@ create_playbookdbuser:
mysql_user.present: mysql_user.present:
- name: playbookdbuser - name: playbookdbuser
- password: {{ PLAYBOOKPASS }} - password: {{ PLAYBOOKPASS }}
- host: "{{ GLOBALS.docker_range.split('/')[0] }}/255.255.255.0" - host: "{{ DOCKER.sosrange.split('/')[0] }}/255.255.255.0"
- connection_host: {{ GLOBALS.manager_ip }} - connection_host: {{ GLOBALS.manager_ip }}
- connection_port: 3306 - connection_port: 3306
- connection_user: root - connection_user: root
@@ -27,7 +27,7 @@ create_playbookdbuser:
query_playbookdbuser_grants: query_playbookdbuser_grants:
mysql_query.run: mysql_query.run:
- database: playbook - database: playbook
- query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ GLOBALS.docker_range.split('/')[0] }}/255.255.255.0';" - query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DOCKER.sosrange.split('/')[0] }}/255.255.255.0';"
- connection_host: {{ GLOBALS.manager_ip }} - connection_host: {{ GLOBALS.manager_ip }}
- connection_port: 3306 - connection_port: 3306
- connection_user: root - connection_user: root
@@ -80,6 +80,9 @@ so-playbook:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-playbook:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-playbook:{{ GLOBALS.so_version }}
- hostname: playbook - hostname: playbook
- name: so-playbook - name: so-playbook
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-playbook'].ip }}
- binds: - binds:
- /opt/so/log/playbook:/playbook/log:rw - /opt/so/log/playbook:/playbook/log:rw
- environment: - environment:

View File

@@ -5,7 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
@@ -46,6 +46,9 @@ so-redis:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- hostname: so-redis - hostname: so-redis
- user: socore - user: socore
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-redis'].ip }}
- port_bindings: - port_bindings:
- 0.0.0.0:6379:6379 - 0.0.0.0:6379:6379
- 0.0.0.0:9696:9696 - 0.0.0.0:9696:9696

View File

@@ -1,5 +1,6 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- ssl - ssl
@@ -37,6 +38,9 @@ so-dockerregistry:
docker_container.running: docker_container.running:
- image: ghcr.io/security-onion-solutions/registry:latest - image: ghcr.io/security-onion-solutions/registry:latest
- hostname: so-registry - hostname: so-registry
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-dockerregistry'].ip }}
- restart_policy: always - restart_policy: always
- port_bindings: - port_bindings:
- 0.0.0.0:5000:5000 - 0.0.0.0:5000:5000

View File

@@ -1,5 +1,6 @@
{% import_yaml 'soc/defaults.yaml' as SOCDEFAULTS %} {% import_yaml 'soc/defaults.yaml' as SOCDEFAULTS %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER -%}
{% for module, application_url in GLOBALS.application_urls.items() %} {% for module, application_url in GLOBALS.application_urls.items() %}
{% do SOCDEFAULTS.soc.server.modules[module].update({'hostUrl': application_url}) %} {% do SOCDEFAULTS.soc.server.modules[module].update({'hostUrl': application_url}) %}
@@ -18,7 +19,7 @@
{% do SOCDEFAULTS.soc.server.modules.influxdb.update({'hostUrl': 'https://' ~ GLOBALS.influxdb_host ~ ':8086'}) %} {% do SOCDEFAULTS.soc.server.modules.influxdb.update({'hostUrl': 'https://' ~ GLOBALS.influxdb_host ~ ':8086'}) %}
{% endif %} {% endif %}
{% do SOCDEFAULTS.soc.server.modules.statickeyauth.update({'anonymousCidr': GLOBALS.docker_range, 'apiKey': pillar.sensoroni.sensoronikey}) %} {% do SOCDEFAULTS.soc.server.modules.statickeyauth.update({'anonymousCidr': DOCKER.sosrange, 'apiKey': pillar.sensoroni.sensoronikey}) %}
{% do SOCDEFAULTS.soc.server.client.case.update({'analyzerNodeId': GLOBALS.minion_id}) %} {% do SOCDEFAULTS.soc.server.client.case.update({'analyzerNodeId': GLOBALS.minion_id}) %}

View File

@@ -2,6 +2,7 @@
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- manager.sync_es_users - manager.sync_es_users
@@ -95,6 +96,9 @@ so-soc:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }}
- hostname: soc - hostname: soc
- name: so-soc - name: so-soc
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-soc'].ip }}
- binds: - binds:
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw - /opt/so/log/soc/:/opt/sensoroni/logs/:rw

View File

@@ -1,6 +1,6 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
include: include:
@@ -63,6 +63,9 @@ so-soctopus:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soctopus:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soctopus:{{ GLOBALS.so_version }}
- hostname: soctopus - hostname: soctopus
- name: so-soctopus - name: so-soctopus
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-soctopus'].ip }}
- binds: - binds:
- /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro - /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro
- /opt/so/log/soctopus/:/var/log/SOCtopus/:rw - /opt/so/log/soctopus/:/var/log/SOCtopus/:rw

View File

@@ -5,7 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %} {% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %} {% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %}
{% import_yaml 'strelka/defaults.yaml' as strelka_config with context %} {% import_yaml 'strelka/defaults.yaml' as strelka_config with context %}
@@ -168,6 +168,9 @@ strelka_coordinator:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- name: so-strelka-coordinator - name: so-strelka-coordinator
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-coordinator'].ip }}
- entrypoint: redis-server --save "" --appendonly no - entrypoint: redis-server --save "" --appendonly no
- port_bindings: - port_bindings:
- 0.0.0.0:6380:6379 - 0.0.0.0:6380:6379
@@ -181,6 +184,9 @@ strelka_gatekeeper:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- name: so-strelka-gatekeeper - name: so-strelka-gatekeeper
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-gatekeeper'].ip }}
- entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru - entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru
- port_bindings: - port_bindings:
- 0.0.0.0:6381:6379 - 0.0.0.0:6381:6379
@@ -198,6 +204,9 @@ strelka_frontend:
- /nsm/strelka/log/:/var/log/strelka/:rw - /nsm/strelka/log/:/var/log/strelka/:rw
- privileged: True - privileged: True
- name: so-strelka-frontend - name: so-strelka-frontend
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-frontend'].ip }}
- command: strelka-frontend - command: strelka-frontend
- port_bindings: - port_bindings:
- 0.0.0.0:57314:57314 - 0.0.0.0:57314:57314
@@ -214,6 +223,9 @@ strelka_backend:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro - /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/rules/:/etc/yara/:ro - /opt/so/conf/strelka/rules/:/etc/yara/:ro
- name: so-strelka-backend - name: so-strelka-backend
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-backend'].ip }}
- command: strelka-backend - command: strelka-backend
- restart_policy: on-failure - restart_policy: on-failure
@@ -228,6 +240,9 @@ strelka_manager:
- binds: - binds:
- /opt/so/conf/strelka/manager/:/etc/strelka/:ro - /opt/so/conf/strelka/manager/:/etc/strelka/:ro
- name: so-strelka-manager - name: so-strelka-manager
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-manager'].ip }}
- command: strelka-manager - command: strelka-manager
append_so-strelka-manager_so-status.conf: append_so-strelka-manager_so-status.conf:
@@ -242,6 +257,9 @@ strelka_filestream:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro - /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka - /nsm/strelka:/nsm/strelka
- name: so-strelka-filestream - name: so-strelka-filestream
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-filestream'].ip }}
- command: strelka-filestream - command: strelka-filestream
append_so-strelka-filestream_so-status.conf: append_so-strelka-filestream_so-status.conf:

View File

@@ -24,7 +24,6 @@
'url_base': INIT.PILLAR.global.url_base, 'url_base': INIT.PILLAR.global.url_base,
'so_model': INIT.GRAINS.get('sosmodel',''), 'so_model': INIT.GRAINS.get('sosmodel',''),
'description': INIT.PILLAR.sensoroni.get('node_description',''), 'description': INIT.PILLAR.sensoroni.get('node_description',''),
'docker_range': INIT.PILLAR.docker.range,
'sensoroni_key': INIT.PILLAR.sensoroni.sensoronikey, 'sensoroni_key': INIT.PILLAR.sensoroni.sensoronikey,
'os': INIT.GRAINS.os, 'os': INIT.GRAINS.os,
'application_urls': {}, 'application_urls': {},

View File

@@ -254,11 +254,16 @@ collect_dns_domain() {
collect_dockernet() { collect_dockernet() {
if ! whiptail_dockernet_check; then if ! whiptail_dockernet_check; then
whiptail_dockernet_net "172.17.0.0" whiptail_dockernet_sosnet "172.17.1.0"
whiptail_dockernet_nososnet "172.17.0.0"
while ! valid_ip4 "$DOCKERNET"; do while ! valid_ip4 "$DOCKERNET"; do
whiptail_invalid_input whiptail_invalid_input
whiptail_dockernet_net "$DOCKERNET" whiptail_dockernet_nonsosnet "$DOCKERNET"
done
while ! valid_ip4 "$DOCKERNET2"; do
whiptail_invalid_input
whiptail_dockernet_sosnet "$DOCKERNET2"
done done
fi fi
} }
@@ -614,7 +619,7 @@ configure_minion() {
"log_file: /opt/so/log/salt/minion" >> "$minion_config" "log_file: /opt/so/log/salt/minion" >> "$minion_config"
cp -f ../salt/salt/etc/minion.d/mine_functions.conf.jinja /etc/salt/minion.d/mine_functions.conf cp -f ../salt/salt/etc/minion.d/mine_functions.conf.jinja /etc/salt/minion.d/mine_functions.conf
sed -i "s/{{ GLOBALS.main_interface }}/$MAININT/" /etc/salt/minion.d/mine_functions.conf sed -i "s/{{ GLOBALS.main_interface }}/$MNIC/" /etc/salt/minion.d/mine_functions.conf
{ {
systemctl restart salt-minion; systemctl restart salt-minion;
@@ -996,6 +1001,9 @@ docker_registry() {
if [ -z "$DOCKERNET" ]; then if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0 DOCKERNET=172.17.0.0
fi fi
if [ -z "$DOCKERNET2" ]; then
DOCKERNET2=172.17.1.0
fi
# Make the host use the manager docker registry # Make the host use the manager docker registry
DNETBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 DNETBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi
@@ -1410,9 +1418,12 @@ create_global() {
if [ -z "$DOCKERNET" ]; then if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0 DOCKERNET=172.17.0.0
DOCKERNET2=172.17.1.0
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
DOCKER2BIP=$(echo $DOCKERNET2 | awk -F'.' '{print $1,$2,$3,1}' OFS='.')
else else
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
DOCKER2BIP=$(echo $DOCKERNET2 | awk -F'.' '{print $1,$2,$3,1}' OFS='.')
fi fi
if [ -f "$global_pillar_file" ]; then if [ -f "$global_pillar_file" ]; then
@@ -1497,6 +1508,8 @@ docker_pillar() {
touch $adv_docker_pillar_file touch $adv_docker_pillar_file
printf '%s\n'\ printf '%s\n'\
"docker:"\ "docker:"\
" sosrange: '$DOCKERNET2/24'"\
" sosbip: '$DOCKER2BIP'"\
" range: '$DOCKERNET/24'"\ " range: '$DOCKERNET/24'"\
" bip: '$DOCKERBIP'" > $docker_pillar_file " bip: '$DOCKERBIP'" > $docker_pillar_file
} }
@@ -1922,14 +1935,6 @@ securityonion_repo() {
repo_sync_local() { repo_sync_local() {
# Sync the repo from the the SO repo locally. # Sync the repo from the the SO repo locally.
# Check for reposync # Check for reposync
REPOSYNC=$(rpm -qa | grep createrepo | wc -l)
if [[ ! "$REPOSYNC" -gt 0 ]]; then
# Install reposync
info "Installing createrepo"
logCmd "yum -y install yum-utils createrepo"
else
info "We have what we need to sync"
fi
info "Backing up old repos" info "Backing up old repos"
mkdir -p /nsm/repo mkdir -p /nsm/repo
mkdir -p /root/reposync_cache mkdir -p /root/reposync_cache
@@ -1953,6 +1958,15 @@ repo_sync_local() {
echo "gpgcheck=1" >> /root/repodownload.conf echo "gpgcheck=1" >> /root/repodownload.conf
echo "gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/securityonion.pub" >> /root/repodownload.conf echo "gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/securityonion.pub" >> /root/repodownload.conf
REPOSYNC=$(rpm -qa | grep createrepo | wc -l)
if [[ ! "$REPOSYNC" -gt 0 ]]; then
# Install reposync
info "Installing createrepo"
logCmd "yum -y install -c /root/repodownload.conf yum-utils createrepo"
else
info "We have what we need to sync"
fi
logCmd "reposync --norepopath -n -g -l -d -m -c /root/repodownload.conf -r securityonionsync --download-metadata -p /nsm/repo/" logCmd "reposync --norepopath -n -g -l -d -m -c /root/repodownload.conf -r securityonionsync --download-metadata -p /nsm/repo/"

View File

@@ -317,6 +317,7 @@ if ! [[ -f $install_opt_file ]]; then
check_elastic_license check_elastic_license
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_dockernet
whiptail_airgap whiptail_airgap
detect_cloud detect_cloud
set_minion_info set_minion_info
@@ -336,6 +337,7 @@ if ! [[ -f $install_opt_file ]]; then
check_elastic_license check_elastic_license
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_dockernet
whiptail_airgap whiptail_airgap
detect_cloud detect_cloud
set_minion_info set_minion_info
@@ -354,6 +356,7 @@ if ! [[ -f $install_opt_file ]]; then
waitforstate=true waitforstate=true
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_dockernet
whiptail_airgap whiptail_airgap
detect_cloud detect_cloud
set_default_log_size >> $setup_log 2>&1 set_default_log_size >> $setup_log 2>&1
@@ -370,6 +373,7 @@ if ! [[ -f $install_opt_file ]]; then
waitforstate=true waitforstate=true
check_requirements "manager" check_requirements "manager"
networking_needful networking_needful
collect_dockernet
whiptail_airgap whiptail_airgap
detect_cloud detect_cloud
set_default_log_size >> $setup_log 2>&1 set_default_log_size >> $setup_log 2>&1
@@ -548,6 +552,7 @@ if ! [[ -f $install_opt_file ]]; then
generate_ca generate_ca
generate_ssl generate_ssl
logCmd "salt-call state.apply -l info firewall"
# create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf # create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf
logCmd "mkdir -p /opt/so/conf/so-status/ " logCmd "mkdir -p /opt/so/conf/so-status/ "
@@ -560,7 +565,6 @@ if ! [[ -f $install_opt_file ]]; then
docker_seed_registry docker_seed_registry
title "Applying the manager state" title "Applying the manager state"
logCmd "salt-call state.apply -l info manager" logCmd "salt-call state.apply -l info manager"
logCmd "salt-call state.apply -l info firewall"
logCmd "salt-call state.highstate -l info" logCmd "salt-call state.highstate -l info"
add_web_user add_web_user
info "Restarting SOC to pick up initial user" info "Restarting SOC to pick up initial user"

View File

@@ -325,12 +325,24 @@ whiptail_dockernet_check(){
} }
whiptail_dockernet_net() { whiptail_dockernet_sosnet() {
[ -n "$TESTING" ] && return
DOCKERNET2=$(whiptail --title "$whiptail_title" --inputbox \
"\nEnter a /24 size network range for SOS containers to use WITHOUT the /24 suffix. This range will be used on ALL nodes." 11 65 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_dockernet_nososnet() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
DOCKERNET=$(whiptail --title "$whiptail_title" --inputbox \ DOCKERNET=$(whiptail --title "$whiptail_title" --inputbox \
"\nEnter a /24 size network range for docker to use WITHOUT the /24 suffix. This range will be used on ALL nodes." 11 65 "$1" 3>&1 1>&2 2>&3) "\nEnter a /24 size network range for NON SOS containers to use WITHOUT the /24 suffix. This range will be used on ALL nodes." 11 65 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus