Merge branch '2.4/firewall' into 2.4createrepoinstall

This commit is contained in:
Josh Patterson
2023-01-10 11:38:31 -05:00
committed by GitHub
37 changed files with 413 additions and 183 deletions

View File

@@ -1,12 +1,14 @@
{%- set DOCKERRANGE = salt['pillar.get']('docker:range', '172.17.0.0/24') %}
{%- set DOCKERBIND = salt['pillar.get']('docker:bip', '172.17.0.1/24') %}
{
"registry-mirrors": [ "https://:5000" ],
"bip": "{{ DOCKERBIND }}",
"default-address-pools": [
{
"base" : "{{ DOCKERRANGE }}",
"size" : 24
}
]
"registry-mirrors": [
"https://:5000"
],
"bip": "{{ DOCKERBIND }}",
"default-address-pools": [
{
"base": "{{ DOCKERRANGE }}",
"size": 24
}
]
}

View File

@@ -49,33 +49,30 @@ fi
case "$ROLE" in
'MANAGER')
so-firewall includehost manager "$IP"
so-firewall --apply includehost minion "$IP"
so-firewall --role=manager --ip="$IP"
;;
'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
so-firewall includehost manager "$IP"
so-firewall includehost minion "$IP"
so-firewall includehost sensor "$IP"
so-firewall --apply includehost search_node "$IP"
so-firewall --role=manager --ip="$IP"
so-firewall --role=sensors --ip="$IP"
so-firewall --apply --role=searchnodes --ip="$IP"
;;
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'IDH' | 'RECEIVER')
so-firewall includehost minion "$IP"
case "$ROLE" in
'SENSOR')
so-firewall --apply includehost sensor "$IP"
so-firewall --apply --role=sensors --ip="$IP"
;;
'SEARCHNODE')
so-firewall --apply includehost search_node "$IP"
so-firewall --apply --role=searchnodes --ip="$IP"
;;
'HEAVYNODE')
so-firewall includehost sensor "$IP"
so-firewall --apply includehost heavy_node "$IP"
so-firewall --role=sensors --ip="$IP"
so-firewall --apply --role=heavynodes --ip="$IP"
;;
'IDH')
so-firewall --apply includehost beats_endpoint_ssl "$IP"
so-firewall --apply --role=beats_endpoint_ssl --ip="$IP"
;;
'RECEIVER')
so-firewall --apply includehost receiver "$IP"
so-firewall --apply --role=receivers --ip="$IP"
;;
esac
;;

View File

@@ -206,7 +206,7 @@ function createSTANDALONE() {
}
function testConnection() {
retry 15 3 "salt '$MINION_ID' test.ping" 0
retry 15 3 "salt '$MINION_ID' test.ping" True
local ret=$?
if [[ $ret != 0 ]]; then
echo "The Minion has been accepted but is not online. Try again later"

View File

@@ -6,6 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from "curator/map.jinja" import CURATOROPTIONS %}
{% from "curator/map.jinja" import CURATORMERGED %}
{% set REMOVECURATORCRON = False %}
@@ -128,6 +129,9 @@ so-curator:
- hostname: curator
- name: so-curator
- user: curator
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-curator'].ip }}
- interactive: True
- tty: True
- binds:

84
salt/docker/defaults.yaml Normal file
View File

@@ -0,0 +1,84 @@
docker:
bip: '172.17.0.1'
range: '172.17.0.0/24'
sosrange: '172.17.1.0/24'
sosbip: '172.17.1.1'
containers:
'so-dockerregistry':
final_octet: 20
ports:
5000: tcp
'so-elastic-fleet':
final_octet: 21
'so-elasticsearch':
final_octet: 22
ports:
9200: tcp
9300: tcp
'so-filebeat':
final_octet: 23
'so-grafana':
final_octet: 24
ports:
3000: tcp
'so-idstools':
final_octet: 25
'so-influxdb':
final_octet: 26
ports:
8086: tcp
'so-kibana':
final_octet: 27
ports:
5601: tcp
'so-kratos':
final_octet: 28
ports:
4433: tcp
4434: tcp
'so-logstash':
final_octet: 29
'so-mysql':
final_octet: 30
ports:
3306: tcp
'so-nginx':
final_octet: 31
ports:
80: tcp
443: tcp
'so-playbook':
final_octet: 32
'so-redis':
final_octet: 33
ports:
6379: tcp
9696: tcp
'so-soc':
final_octet: 34
ports:
9822: tcp
'so-soctopus':
final_octet: 35
ports:
7000: tcp
'so-strelka-backend':
final_octet: 36
'so-strelka-filestream':
final_octet: 37
'so-strelka-frontend':
final_octet: 38
'so-strelka-manager':
final_octet: 39
'so-strelka-gatekeeper':
final_octet: 40
'so-strelka-coordinator':
final_octet: 41
'so-elastalert':
final_octet: 42
'so-curator':
final_octet: 43
'so-elastic-fleet-package-registry':
final_octet: 44
ports:
8080: tcp

View File

@@ -0,0 +1,8 @@
{% import_yaml 'docker/defaults.yaml' as DOCKERDEFAULTS %}
{% set DOCKER = salt['pillar.get']('docker', DOCKERDEFAULTS.docker, merge=True) %}
{% set RANGESPLIT = DOCKER.sosrange.split('.') %}
{% set FIRSTTHREE = RANGESPLIT[0] ~ '.' ~ RANGESPLIT[1] ~ '.' ~ RANGESPLIT[2] ~ '.' %}
{% for container, vals in DOCKER.containers.items() %}
{% do DOCKER.containers[container].update({'ip': FIRSTTHREE ~ DOCKER.containers[container].final_octet}) %}
{% endfor %}

View File

@@ -0,0 +1,3 @@
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --iptables=false

View File

@@ -3,6 +3,8 @@
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'docker/docker.map.jinja' import DOCKER %}
dockergroup:
group.present:
- name: docker
@@ -18,6 +20,17 @@ dockerheldpackages:
- hold: True
- update_holds: True
#disable docker from managing iptables
iptables_disabled:
file.managed:
- name: /etc/systemd/system/docker.service.d/iptables-disabled.conf
- source: salt://docker/files/iptables-disabled.conf
- makedirs: True
cmd.run:
- name: systemctl daemon-reload
- onchanges:
- file: iptables_disabled
# Make sure etc/docker exists
dockeretc:
file.directory:
@@ -50,3 +63,15 @@ dockerreserveports:
- source: salt://common/files/99-reserved-ports.conf
- name: /etc/sysctl.d/99-reserved-ports.conf
sos_docker_net:
docker_network.present:
- name: sosbridge
- subnet: {{ DOCKER.sosrange }}
- gateway: {{ DOCKER.sosbip }}
- options:
com.docker.network.bridge.name: 'sosbridge'
com.docker.network.driver.mtu: '1500'
com.docker.network.bridge.enable_ip_masquerade: 'true'
com.docker.network.bridge.enable_icc: 'true'
com.docker.network.bridge.host_binding_ipv4: '0.0.0.0'
- unless: 'docker network ls | grep sosbridge'

View File

@@ -5,6 +5,7 @@
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'elastalert/elastalert_config.map.jinja' import ELASTALERT as elastalert_config with context %}
# Create the group
@@ -86,6 +87,9 @@ so-elastalert:
- hostname: elastalert
- name: so-elastalert
- user: so-elastalert
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-elastalert'].ip }}
- detach: True
- binds:
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro

View File

@@ -4,6 +4,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
# Add Group
elasticsagentprgroup:
@@ -27,6 +28,9 @@ so-elastic-fleet-package-registry:
- hostname: Fleet-package-reg-{{ GLOBALS.hostname }}
- detach: True
- user: 948
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }}
- extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
- port_bindings:

View File

@@ -4,6 +4,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
# These values are generated during node install and stored in minion pillar
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:server:es_token','') %}
@@ -47,6 +48,9 @@ so-elastic-fleet:
- hostname: Fleet-{{ GLOBALS.hostname }}
- detach: True
- user: 947
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet'].ip }}
- extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
- port_bindings:
@@ -77,4 +81,4 @@ append_so-elastic-fleet_so-status.conf:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -10,6 +10,7 @@ include:
- ssl
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
{% set ROLES = salt['pillar.get']('elasticsearch:roles', {}) %}
{% from 'elasticsearch/config.map.jinja' import ESCONFIG with context %}
@@ -289,6 +290,9 @@ so-elasticsearch:
- hostname: elasticsearch
- name: so-elasticsearch
- user: elasticsearch
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
- extra_hosts: {{ REDIS_NODES }}
- environment:
{% if REDIS_NODES | length == 1 %}

View File

@@ -5,6 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'filebeat/modules.map.jinja' import MODULESMERGED with context %}
{% from 'filebeat/modules.map.jinja' import MODULESENABLED with context %}
@@ -97,6 +98,9 @@ so-filebeat:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-filebeat:{{ GLOBALS.so_version }}
- hostname: so-filebeat
- user: root
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-filebeat'].ip }}
- extra_hosts: {{ FILEBEAT_EXTRA_HOSTS }}
- binds:
- /nsm:/nsm:ro

View File

@@ -0,0 +1,15 @@
{% set NODE_CONTAINERS = [
'so-curator',
'so-dockerregistry',
'so-elasticsearch',
'so-elastic-fleet-package-registry',
'so-grafana',
'so-influxdb',
'so-kibana',
'so-kratos',
'so-mysql',
'so-nginx',
'so-redis',
'so-soc',
'so-soctopus'
] %}

View File

@@ -1,4 +1,4 @@
{%- set DNET = salt['pillar.get']('global:dockernet', '172.17.0.0') %}
{% from 'docker/docker.map.jinja' import DOCKER %}
firewall:
hostgroups:
anywhere:
@@ -10,7 +10,7 @@ firewall:
ips:
delete:
insert:
- {{ DNET }}/24
- {{ DOCKER.sosrange }}
localhost:
ips:
delete:

View File

@@ -1,142 +1,23 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
# Firewall Magic for the grid
{% from 'firewall/map.jinja' import hostgroups with context %}
{% from 'firewall/map.jinja' import assigned_hostgroups with context %}
create_sysconfig_iptables:
file.touch:
- name: /etc/sysconfig/iptables
- makedirs: True
- unless: 'ls /etc/sysconfig/iptables'
# Quick Fix for Docker being difficult
iptables_fix_docker:
iptables.chain_present:
- name: DOCKER-USER
- table: filter
iptables_config:
file.managed:
- name: /etc/sysconfig/iptables
- source: salt://firewall/iptables.jinja
- template: jinja
# Add the Forward Rule since Docker ripped it out
iptables_fix_fwd:
iptables.insert:
- table: filter
- chain: FORWARD
- jump: ACCEPT
- position: 1
- target: DOCKER-USER
# I like pings
iptables_allow_pings:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- proto: icmp
# Create the chain for logging
iptables_LOGGING_chain:
iptables.chain_present:
- name: LOGGING
- table: filter
- family: ipv4
iptables_LOGGING_limit:
iptables.append:
- table: filter
- chain: LOGGING
- match: limit
- jump: LOG
- limit: 2/min
- log-level: 4
- log-prefix: "IPTables-dropped: "
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_log_input_drops:
iptables.append:
- table: filter
- chain: INPUT
- jump: LOGGING
# Enable global DOCKER-USER block rule
enable_docker_user_fw_policy:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: LOGGING
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
{% set count = namespace(value=0) %}
{% for chain, hg in assigned_hostgroups.chain.items() %}
{% for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
{% for action in ['insert', 'delete' ] %}
{% if hostgroups[hostgroup].ips[action] %}
{% for ip in hostgroups[hostgroup].ips[action] %}
{% for portgroup in portgroups.portgroups %}
{% for proto, ports in portgroup.items() %}
{% for port in ports %}
{% set count.value = count.value + 1 %}
{{action}}_{{chain}}_{{hostgroup}}_{{ip}}_{{port}}_{{proto}}_{{count.value}}:
iptables.{{action}}:
- table: filter
- chain: {{ chain }}
- jump: ACCEPT
- proto: {{ proto }}
- source: {{ ip }}
- dport: {{ port }}
{% if action == 'insert' %}
- position: 1
{% endif %}
{% endfor %}
{% endfor %}
{% endfor %}
{% endfor %}
{% endif %}
{% endfor %}
{% endfor %}
{% endfor %}
# Allow related/established sessions
iptables_allow_established:
iptables.insert:
- table: filter
- chain: INPUT
- jump: ACCEPT
- position: 1
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
enable_docker_user_established:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- in-interface: '!docker0'
- out-interface: docker0
- position: 1
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
# Block icmp timestamp response
block_icmp_timestamp_reply:
iptables.append:
- table: filter
- chain: OUTPUT
- jump: DROP
- proto: icmp
- icmp-type: 'timestamp-reply'
# Make the input policy send stuff that doesn't match to be logged and dropped
iptables_drop_all_the_things:
iptables.append:
- table: filter
- chain: LOGGING
- jump: DROP
- save: True
iptables_restore:
cmd.run:
- name: iptables-restore < /etc/sysconfig/iptables
- onchanges:
- file: iptables_config
{% else %}

View File

@@ -0,0 +1,90 @@
{% from 'docker/docker.map.jinja' import DOCKER -%}
{% from 'firewall/containers.map.jinja' import NODE_CONTAINERS -%}
{% from 'firewall/map.jinja' import hostgroups with context -%}
{% from 'firewall/map.jinja' import assigned_hostgroups with context -%}
*nat
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:DOCKER - [0:0]
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
-A POSTROUTING -s {{DOCKER.sosrange}} ! -o sosbridge -j MASQUERADE
{%- for container in NODE_CONTAINERS %}
{%- if DOCKER.containers[container].ports is defined %}
{%- for port, proto in DOCKER.containers[container].ports.items() %}
-A POSTROUTING -s {{DOCKER.containers[container].ip}}/32 -d {{DOCKER.containers[container].ip}}/32 -p {{proto}} -m {{proto}} --dport {{port}} -j MASQUERADE
{%- endfor %}
{%- endif %}
{%- endfor %}
-A DOCKER -i sosbridge -j RETURN
{%- for container in NODE_CONTAINERS %}
{%- if DOCKER.containers[container].ports is defined %}
{%- for port, proto in DOCKER.containers[container].ports.items() %}
-A DOCKER ! -i sosbridge -p {{proto}} -m {{proto}} --dport {{port}} -j DNAT --to-destination {{DOCKER.containers[container].ip}}:{{port}}
{%- endfor %}
{%- endif %}
{%- endfor %}
COMMIT
*filter
:INPUT ACCEPT [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
:DOCKER - [0:0]
:DOCKER-ISOLATION-STAGE-1 - [0:0]
:DOCKER-ISOLATION-STAGE-2 - [0:0]
:DOCKER-USER - [0:0]
:LOGGING - [0:0]
{%- set count = namespace(value=0) %}
{%- for chain, hg in assigned_hostgroups.chain.items() %}
{%- for hostgroup, portgroups in assigned_hostgroups.chain[chain].hostgroups.items() %}
{%- for action in ['insert', 'delete' ] %}
{%- if hostgroups[hostgroup].ips[action] %}
{%- for ip in hostgroups[hostgroup].ips[action] %}
{%- for portgroup in portgroups.portgroups %}
{%- for proto, ports in portgroup.items() %}
{%- for port in ports %}
{%- set count.value = count.value + 1 %}
-A {{chain}} -s {{ip}} -p {{proto}} -m {{proto}} --dport {{port}} -j ACCEPT
{%- endfor %}
{%- endfor %}
{%- endfor %}
{%- endfor %}
{%- endif %}
{%- endfor %}
{%- endfor %}
{%- endfor %}
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A INPUT -m conntrack --ctstate INVALID -j DROP
-A INPUT -p icmp -j ACCEPT
-A INPUT -j LOGGING
-A FORWARD -j DOCKER-USER
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
-A FORWARD -o sosbridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A FORWARD -o sosbridge -j DOCKER
-A FORWARD -i sosbridge ! -o sosbridge -j ACCEPT
-A FORWARD -i sosbridge -o sosbridge -j ACCEPT
-A FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A OUTPUT -p icmp -m icmp --icmp-type 14 -j DROP
{%- for container in NODE_CONTAINERS %}
{%- if DOCKER.containers[container].ports is defined %}
{%- for port, proto in DOCKER.containers[container].ports.items() %}
-A DOCKER -d {{DOCKER.containers[container].ip}}/32 ! -i sosbridge -o sosbridge -p {{proto}} -m {{proto}} --dport {{port}} -j ACCEPT
{%- endfor %}
{%- endif %}
{%- endfor %}
-A DOCKER-ISOLATION-STAGE-1 -i sosbridge ! -o sosbridge -j DOCKER-ISOLATION-STAGE-2
-A DOCKER-ISOLATION-STAGE-1 -j RETURN
-A DOCKER-ISOLATION-STAGE-2 -o sosbridge -j DROP
-A DOCKER-ISOLATION-STAGE-2 -j RETURN
-A DOCKER-USER ! -i sosbridge -o sosbridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A DOCKER-USER -j RETURN
-A LOGGING -m limit --limit 2/min -j LOG --log-prefix "IPTables-dropped: "
-A LOGGING -j DROP
COMMIT

View File

@@ -1,8 +1,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% set ADMINPASS = salt['pillar.get']('secrets:grafana_admin') %}
@@ -126,6 +125,9 @@ so-grafana:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-grafana:{{ GLOBALS.so_version }}
- hostname: grafana
- user: socore
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-grafana'].ip }}
- binds:
- /nsm/grafana:/var/lib/grafana:rw
- /opt/so/conf/grafana/etc/grafana.ini:/etc/grafana/grafana.ini:ro

View File

@@ -5,6 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% import_yaml 'docker/defaults.yaml' as DOCKERDEFAULTS %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set RESTRICTIDHSERVICES = salt['pillar.get']('idh:restrict_management_ip', False) %}

View File

@@ -4,6 +4,7 @@
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set proxy = salt['pillar.get']('manager:proxy') %}
@@ -31,6 +32,9 @@ so-idstools:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-idstools:{{ GLOBALS.so_version }}
- hostname: so-idstools
- user: socore
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-idstools'].ip }}
{% if proxy %}
- environment:
- http_proxy={{ proxy }}

View File

@@ -1,5 +1,6 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
@@ -47,6 +48,11 @@ so-influxdb:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-influxdb:{{ GLOBALS.so_version }}
- hostname: influxdb
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }}
- environment:
- INFLUXDB_HTTP_LOG_ENABLED=false
- binds:
- /opt/so/log/influxdb/:/log:rw
- /opt/so/conf/influxdb/etc/influxdb.conf:/etc/influxdb/influxdb.conf:ro

View File

@@ -5,12 +5,10 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% import_yaml 'kibana/defaults.yaml' as default_settings %}
{% set KIBANA_SETTINGS = salt['grains.filter_by'](default_settings, default='kibana', merge=salt['pillar.get']('kibana', {})) %}
{% from 'kibana/config.map.jinja' import KIBANACONFIG with context %}
# Add ES Group
@@ -84,6 +82,9 @@ so-kibana:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kibana:{{ GLOBALS.so_version }}
- hostname: kibana
- user: kibana
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-kibana'].ip }}
- environment:
- ELASTICSEARCH_HOST={{ GLOBALS.manager }}
- ELASTICSEARCH_PORT=9200

View File

@@ -5,6 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# Add Kratos Group
@@ -67,6 +68,9 @@ so-kratos:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kratos:{{ GLOBALS.so_version }}
- hostname: kratos
- name: so-kratos
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-kratos'].ip }}
- binds:
- /opt/so/conf/kratos/schema.json:/kratos-conf/schema.json:ro
- /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro

View File

@@ -6,19 +6,19 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'logstash/map.jinja' import REDIS_NODES with context %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'logstash/map.jinja' import REDIS_NODES with context %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# Logstash Section - Decide which pillar to use
{% set lsheap = salt['pillar.get']('logstash_settings:lsheap') %}
{% if GLOBALS.role in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
# Logstash Section - Decide which pillar to use
{% set lsheap = salt['pillar.get']('logstash_settings:lsheap') %}
{% if GLOBALS.role in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
{% set nodetype = GLOBALS.role %}
{% endif %}
{% endif %}
{% set PIPELINES = salt['pillar.get']('logstash:pipelines', {}) %}
{% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
{% set PIPELINES = salt['pillar.get']('logstash:pipelines', {}) %}
{% set DOCKER_OPTIONS = salt['pillar.get']('logstash:docker_options', {}) %}
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
include:
- ssl
@@ -139,6 +139,9 @@ so-logstash:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-logstash:{{ GLOBALS.so_version }}
- hostname: so-logstash
- name: so-logstash
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-logstash'].ip }}
- user: logstash
- extra_hosts: {{ REDIS_NODES }}
- environment:

View File

@@ -5,8 +5,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
# MySQL Setup
@@ -84,6 +84,9 @@ so-mysql:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }}
- hostname: so-mysql
- user: socore
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-mysql'].ip }}
- port_bindings:
- 0.0.0.0:3306:3306
- environment:

View File

@@ -1,6 +1,7 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- ssl
@@ -83,6 +84,9 @@ so-nginx:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }}
- hostname: so-nginx
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-nginx'].ip }}
- binds:
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/so/log/nginx/:/var/log/nginx:rw

View File

@@ -5,8 +5,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql') -%}
{%- set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') -%}
@@ -18,7 +18,7 @@ create_playbookdbuser:
mysql_user.present:
- name: playbookdbuser
- password: {{ PLAYBOOKPASS }}
- host: "{{ GLOBALS.docker_range.split('/')[0] }}/255.255.255.0"
- host: "{{ DOCKER.sosrange.split('/')[0] }}/255.255.255.0"
- connection_host: {{ GLOBALS.manager_ip }}
- connection_port: 3306
- connection_user: root
@@ -27,7 +27,7 @@ create_playbookdbuser:
query_playbookdbuser_grants:
mysql_query.run:
- database: playbook
- query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ GLOBALS.docker_range.split('/')[0] }}/255.255.255.0';"
- query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DOCKER.sosrange.split('/')[0] }}/255.255.255.0';"
- connection_host: {{ GLOBALS.manager_ip }}
- connection_port: 3306
- connection_user: root
@@ -80,6 +80,9 @@ so-playbook:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-playbook:{{ GLOBALS.so_version }}
- hostname: playbook
- name: so-playbook
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-playbook'].ip }}
- binds:
- /opt/so/log/playbook:/playbook/log:rw
- environment:

View File

@@ -5,7 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
@@ -46,6 +46,9 @@ so-redis:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- hostname: so-redis
- user: socore
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-redis'].ip }}
- port_bindings:
- 0.0.0.0:6379:6379
- 0.0.0.0:9696:9696

View File

@@ -1,5 +1,6 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- ssl
@@ -37,6 +38,9 @@ so-dockerregistry:
docker_container.running:
- image: ghcr.io/security-onion-solutions/registry:latest
- hostname: so-registry
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-dockerregistry'].ip }}
- restart_policy: always
- port_bindings:
- 0.0.0.0:5000:5000

View File

@@ -1,5 +1,6 @@
{% import_yaml 'soc/defaults.yaml' as SOCDEFAULTS %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER -%}
{% for module, application_url in GLOBALS.application_urls.items() %}
{% do SOCDEFAULTS.soc.server.modules[module].update({'hostUrl': application_url}) %}
@@ -18,7 +19,7 @@
{% do SOCDEFAULTS.soc.server.modules.influxdb.update({'hostUrl': 'https://' ~ GLOBALS.influxdb_host ~ ':8086'}) %}
{% endif %}
{% do SOCDEFAULTS.soc.server.modules.statickeyauth.update({'anonymousCidr': GLOBALS.docker_range, 'apiKey': pillar.sensoroni.sensoronikey}) %}
{% do SOCDEFAULTS.soc.server.modules.statickeyauth.update({'anonymousCidr': DOCKER.sosrange, 'apiKey': pillar.sensoroni.sensoronikey}) %}
{% do SOCDEFAULTS.soc.server.client.case.update({'analyzerNodeId': GLOBALS.minion_id}) %}

View File

@@ -2,6 +2,7 @@
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- manager.sync_es_users
@@ -95,6 +96,9 @@ so-soc:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soc:{{ GLOBALS.so_version }}
- hostname: soc
- name: so-soc
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-soc'].ip }}
- binds:
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw

View File

@@ -1,6 +1,6 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
@@ -63,6 +63,9 @@ so-soctopus:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soctopus:{{ GLOBALS.so_version }}
- hostname: soctopus
- name: so-soctopus
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-soctopus'].ip }}
- binds:
- /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro
- /opt/so/log/soctopus/:/var/log/SOCtopus/:rw

View File

@@ -5,7 +5,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %}
{% import_yaml 'strelka/defaults.yaml' as strelka_config with context %}
@@ -168,6 +168,9 @@ strelka_coordinator:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- name: so-strelka-coordinator
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-coordinator'].ip }}
- entrypoint: redis-server --save "" --appendonly no
- port_bindings:
- 0.0.0.0:6380:6379
@@ -181,6 +184,9 @@ strelka_gatekeeper:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-redis:{{ GLOBALS.so_version }}
- name: so-strelka-gatekeeper
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-gatekeeper'].ip }}
- entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru
- port_bindings:
- 0.0.0.0:6381:6379
@@ -198,6 +204,9 @@ strelka_frontend:
- /nsm/strelka/log/:/var/log/strelka/:rw
- privileged: True
- name: so-strelka-frontend
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-frontend'].ip }}
- command: strelka-frontend
- port_bindings:
- 0.0.0.0:57314:57314
@@ -214,6 +223,9 @@ strelka_backend:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/rules/:/etc/yara/:ro
- name: so-strelka-backend
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-backend'].ip }}
- command: strelka-backend
- restart_policy: on-failure
@@ -228,6 +240,9 @@ strelka_manager:
- binds:
- /opt/so/conf/strelka/manager/:/etc/strelka/:ro
- name: so-strelka-manager
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-manager'].ip }}
- command: strelka-manager
append_so-strelka-manager_so-status.conf:
@@ -242,6 +257,9 @@ strelka_filestream:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka
- name: so-strelka-filestream
- networks:
- sosbridge:
- ipv4_address: {{ DOCKER.containers['so-strelka-filestream'].ip }}
- command: strelka-filestream
append_so-strelka-filestream_so-status.conf:

View File

@@ -24,7 +24,6 @@
'url_base': INIT.PILLAR.global.url_base,
'so_model': INIT.GRAINS.get('sosmodel',''),
'description': INIT.PILLAR.sensoroni.get('node_description',''),
'docker_range': INIT.PILLAR.docker.range,
'sensoroni_key': INIT.PILLAR.sensoroni.sensoronikey,
'os': INIT.GRAINS.os,
'application_urls': {},

26
setup/so-functions Executable file → Normal file
View File

@@ -254,11 +254,16 @@ collect_dns_domain() {
collect_dockernet() {
if ! whiptail_dockernet_check; then
whiptail_dockernet_net "172.17.0.0"
whiptail_dockernet_sosnet "172.17.1.0"
whiptail_dockernet_nososnet "172.17.0.0"
while ! valid_ip4 "$DOCKERNET"; do
whiptail_invalid_input
whiptail_dockernet_net "$DOCKERNET"
whiptail_dockernet_nonsosnet "$DOCKERNET"
done
while ! valid_ip4 "$DOCKERNET2"; do
whiptail_invalid_input
whiptail_dockernet_sosnet "$DOCKERNET2"
done
fi
}
@@ -996,6 +1001,9 @@ docker_registry() {
if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0
fi
if [ -z "$DOCKERNET2" ]; then
DOCKERNET2=172.17.1.0
fi
# Make the host use the manager docker registry
DNETBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi
@@ -1410,9 +1418,12 @@ create_global() {
if [ -z "$DOCKERNET" ]; then
DOCKERNET=172.17.0.0
DOCKERNET2=172.17.1.0
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
DOCKER2BIP=$(echo $DOCKERNET2 | awk -F'.' '{print $1,$2,$3,1}' OFS='.')
else
DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
DOCKER2BIP=$(echo $DOCKERNET2 | awk -F'.' '{print $1,$2,$3,1}' OFS='.')
fi
if [ -f "$global_pillar_file" ]; then
@@ -1497,6 +1508,8 @@ docker_pillar() {
touch $adv_docker_pillar_file
printf '%s\n'\
"docker:"\
" sosrange: '$DOCKERNET2/24'"\
" sosbip: '$DOCKER2BIP'"\
" range: '$DOCKERNET/24'"\
" bip: '$DOCKERBIP'" > $docker_pillar_file
}
@@ -1953,6 +1966,15 @@ repo_sync_local() {
echo "gpgcheck=1" >> /root/repodownload.conf
echo "gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/securityonion.pub" >> /root/repodownload.conf
REPOSYNC=$(rpm -qa | grep createrepo | wc -l)
if [[ ! "$REPOSYNC" -gt 0 ]]; then
# Install reposync
info "Installing createrepo"
logCmd "yum -y install -c /root/repodownload.conf yum-utils createrepo"
else
info "We have what we need to sync"
fi
logCmd "reposync --norepopath -n -g -l -d -m -c /root/repodownload.conf -r securityonionsync --download-metadata -p /nsm/repo/"

View File

@@ -317,6 +317,7 @@ if ! [[ -f $install_opt_file ]]; then
check_elastic_license
check_requirements "manager"
networking_needful
collect_dockernet
whiptail_airgap
detect_cloud
set_minion_info
@@ -336,6 +337,7 @@ if ! [[ -f $install_opt_file ]]; then
check_elastic_license
check_requirements "manager"
networking_needful
collect_dockernet
whiptail_airgap
detect_cloud
set_minion_info
@@ -354,6 +356,7 @@ if ! [[ -f $install_opt_file ]]; then
waitforstate=true
check_requirements "manager"
networking_needful
collect_dockernet
whiptail_airgap
detect_cloud
set_default_log_size >> $setup_log 2>&1
@@ -370,6 +373,7 @@ if ! [[ -f $install_opt_file ]]; then
waitforstate=true
check_requirements "manager"
networking_needful
collect_dockernet
whiptail_airgap
detect_cloud
set_default_log_size >> $setup_log 2>&1
@@ -548,6 +552,7 @@ if ! [[ -f $install_opt_file ]]; then
generate_ca
generate_ssl
logCmd "salt-call state.apply -l info firewall"
# create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf
logCmd "mkdir -p /opt/so/conf/so-status/ "
@@ -560,7 +565,6 @@ if ! [[ -f $install_opt_file ]]; then
docker_seed_registry
title "Applying the manager state"
logCmd "salt-call state.apply -l info manager"
logCmd "salt-call state.apply -l info firewall"
logCmd "salt-call state.highstate -l info"
add_web_user
info "Restarting SOC to pick up initial user"

View File

@@ -325,12 +325,24 @@ whiptail_dockernet_check(){
}
whiptail_dockernet_net() {
whiptail_dockernet_sosnet() {
[ -n "$TESTING" ] && return
DOCKERNET2=$(whiptail --title "$whiptail_title" --inputbox \
"\nEnter a /24 size network range for SOS containers to use WITHOUT the /24 suffix. This range will be used on ALL nodes." 11 65 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_dockernet_nososnet() {
[ -n "$TESTING" ] && return
DOCKERNET=$(whiptail --title "$whiptail_title" --inputbox \
"\nEnter a /24 size network range for docker to use WITHOUT the /24 suffix. This range will be used on ALL nodes." 11 65 "$1" 3>&1 1>&2 2>&3)
"\nEnter a /24 size network range for NON SOS containers to use WITHOUT the /24 suffix. This range will be used on ALL nodes." 11 65 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus