Merge pull request #10387 from Security-Onion-Solutions/airgaps

Docker Enhancements
This commit is contained in:
Mike Reeves
2023-05-26 17:35:23 -04:00
committed by GitHub
45 changed files with 698 additions and 634 deletions

View File

@@ -28,6 +28,23 @@ so-curator:
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro - /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
- /opt/so/conf/curator/action/:/etc/curator/action:ro - /opt/so/conf/curator/action/:/etc/curator/action:ro
- /opt/so/log/curator:/var/log/curator:rw - /opt/so/log/curator:/var/log/curator:rw
{% if DOCKER.containers['so-curator'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-curator'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-curator'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-curator'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-curator'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-curator'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- require: - require:
- file: actionconfs - file: actionconfs
- file: curconf - file: curconf

View File

@@ -10,12 +10,14 @@ docker:
- 0.0.0.0:5000:5000 - 0.0.0.0:5000:5000
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-elastic-fleet': 'so-elastic-fleet':
final_octet: 21 final_octet: 21
port_bindings: port_bindings:
- 0.0.0.0:8220:8220/tcp - 0.0.0.0:8220:8220/tcp
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-elasticsearch': 'so-elasticsearch':
final_octet: 22 final_octet: 22
port_bindings: port_bindings:
@@ -23,22 +25,26 @@ docker:
- 0.0.0.0:9300:9300/tcp - 0.0.0.0:9300:9300/tcp
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-idstools': 'so-idstools':
final_octet: 25 final_octet: 25
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-influxdb': 'so-influxdb':
final_octet: 26 final_octet: 26
port_bindings: port_bindings:
- 0.0.0.0:8086:8086 - 0.0.0.0:8086:8086
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-kibana': 'so-kibana':
final_octet: 27 final_octet: 27
port_bindings: port_bindings:
- 0.0.0.0:5601:5601 - 0.0.0.0:5601:5601
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-kratos': 'so-kratos':
final_octet: 28 final_octet: 28
port_bindings: port_bindings:
@@ -46,6 +52,7 @@ docker:
- 0.0.0.0:4434:4434 - 0.0.0.0:4434:4434
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-logstash': 'so-logstash':
final_octet: 29 final_octet: 29
port_bindings: port_bindings:
@@ -61,12 +68,14 @@ docker:
- 0.0.0.0:9600:9600 - 0.0.0.0:9600:9600
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-mysql': 'so-mysql':
final_octet: 30 final_octet: 30
port_bindings: port_bindings:
- 0.0.0.0:3306:3306 - 0.0.0.0:3306:3306
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-nginx': 'so-nginx':
final_octet: 31 final_octet: 31
port_bindings: port_bindings:
@@ -76,12 +85,14 @@ docker:
- 7788:7788 - 7788:7788
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-playbook': 'so-playbook':
final_octet: 32 final_octet: 32
port_bindings: port_bindings:
- 0.0.0.0:3000:3000 - 0.0.0.0:3000:3000
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-redis': 'so-redis':
final_octet: 33 final_octet: 33
port_bindings: port_bindings:
@@ -89,63 +100,101 @@ docker:
- 0.0.0.0:9696:9696 - 0.0.0.0:9696:9696
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-sensoroni':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-soc': 'so-soc':
final_octet: 34 final_octet: 34
port_bindings: port_bindings:
- 0.0.0.0:9822:9822 - 0.0.0.0:9822:9822
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-soctopus': 'so-soctopus':
final_octet: 35 final_octet: 35
port_bindings: port_bindings:
- 0.0.0.0:7000:7000 - 0.0.0.0:7000:7000
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-strelka-backend': 'so-strelka-backend':
final_octet: 36 final_octet: 36
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-strelka-filestream': 'so-strelka-filestream':
final_octet: 37 final_octet: 37
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-strelka-frontend': 'so-strelka-frontend':
final_octet: 38 final_octet: 38
port_bindings: port_bindings:
- 0.0.0.0:57314:57314 - 0.0.0.0:57314:57314
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-strelka-manager': 'so-strelka-manager':
final_octet: 39 final_octet: 39
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-strelka-gatekeeper': 'so-strelka-gatekeeper':
final_octet: 40 final_octet: 40
port_bindings: port_bindings:
- 0.0.0.0:6381:6379 - 0.0.0.0:6381:6379
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-strelka-coordinator': 'so-strelka-coordinator':
final_octet: 41 final_octet: 41
port_bindings: port_bindings:
- 0.0.0.0:6380:6379 - 0.0.0.0:6380:6379
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-elastalert': 'so-elastalert':
final_octet: 42 final_octet: 42
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-curator': 'so-curator':
final_octet: 43 final_octet: 43
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-elastic-fleet-package-registry': 'so-elastic-fleet-package-registry':
final_octet: 44 final_octet: 44
port_bindings: port_bindings:
- 0.0.0.0:8080:8080/tcp - 0.0.0.0:8080:8080/tcp
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-idh': 'so-idh':
final_octet: 45 final_octet: 45
custom_bind_mounts: [] custom_bind_mounts: []
extra_hosts: [] extra_hosts: []
extra_env: []
'so-telegraf':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-steno':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-suricata':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-zeek':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []

View File

@@ -40,6 +40,12 @@ docker:
helpLink: docker.html helpLink: docker.html
multiline: True multiline: True
forcedType: "[]string" forcedType: "[]string"
extra_env:
description: List of additional ENV entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
so-dockerregistry: *dockerOptions so-dockerregistry: *dockerOptions
so-elastalert: *dockerOptions so-elastalert: *dockerOptions
so-elastic-fleet-package-registry: *dockerOptions so-elastic-fleet-package-registry: *dockerOptions

View File

@@ -31,8 +31,24 @@ so-elastalert:
- /opt/so/log/elastalert:/var/log/elastalert:rw - /opt/so/log/elastalert:/var/log/elastalert:rw
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro - /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro - /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-elastalert'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-elastalert'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-elastalert'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- require: - require:
- cmd: wait_for_elasticsearch - cmd: wait_for_elasticsearch
- file: elastarules - file: elastarules

View File

@@ -24,11 +24,27 @@ so-elastic-fleet-package-registry:
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }} - ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-elastic-fleet-package-registry'].port_bindings %} {% for BINDING in DOCKER.containers['so-elastic-fleet-package-registry'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
- binds:
{% for BIND in DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-elastic-fleet-package-registry_so-status.disabled: delete_so-elastic-fleet-package-registry_so-status.disabled:
file.uncomment: file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf

View File

@@ -28,6 +28,11 @@ so-elastic-fleet:
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-elastic-fleet'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-elastic-fleet'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-elastic-fleet'].port_bindings %} {% for BINDING in DOCKER.containers['so-elastic-fleet'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
@@ -35,6 +40,11 @@ so-elastic-fleet:
- binds: - binds:
- /etc/pki:/etc/pki:ro - /etc/pki:/etc/pki:ro
#- /opt/so/conf/elastic-fleet/state:/usr/share/elastic-agent/state:rw #- /opt/so/conf/elastic-fleet/state:/usr/share/elastic-agent/state:rw
{% if DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- environment: - environment:
- FLEET_SERVER_ENABLE=true - FLEET_SERVER_ENABLE=true
- FLEET_URL=https://{{ GLOBALS.node_ip }}:8220 - FLEET_URL=https://{{ GLOBALS.node_ip }}:8220
@@ -45,6 +55,11 @@ so-elastic-fleet:
- FLEET_SERVER_CERT=/etc/pki/elasticfleet.crt - FLEET_SERVER_CERT=/etc/pki/elasticfleet.crt
- FLEET_SERVER_CERT_KEY=/etc/pki/elasticfleet.key - FLEET_SERVER_CERT_KEY=/etc/pki/elasticfleet.key
- FLEET_CA=/etc/pki/tls/certs/intca.crt - FLEET_CA=/etc/pki/tls/certs/intca.crt
{% if DOCKER.containers['so-elastic-fleet'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-elastic-fleet'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
{% endif %} {% endif %}
delete_so-elastic-fleet_so-status.disabled: delete_so-elastic-fleet_so-status.disabled:

View File

@@ -26,6 +26,11 @@ so-elasticsearch:
- sobridge: - sobridge:
- ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }} - ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
- extra_hosts: {{ LOGSTASH_NODES }} - extra_hosts: {{ LOGSTASH_NODES }}
{% if DOCKER.containers['so-elasticsearch'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-elasticsearch'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- environment: - environment:
{% if LOGSTASH_NODES | length == 1 %} {% if LOGSTASH_NODES | length == 1 %}
- discovery.type=single-node - discovery.type=single-node
@@ -35,6 +40,11 @@ so-elasticsearch:
- memlock=-1:-1 - memlock=-1:-1
- nofile=65536:65536 - nofile=65536:65536
- nproc=4096 - nproc=4096
{% if DOCKER.containers['so-elasticsearch'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-elasticsearch'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-elasticsearch'].port_bindings %} {% for BINDING in DOCKER.containers['so-elasticsearch'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
@@ -60,6 +70,11 @@ so-elasticsearch:
- {{ repo }}:{{ repo }}:rw - {{ repo }}:{{ repo }}:rw
{% endfor %} {% endfor %}
{% endif %} {% endif %}
{% if DOCKER.containers['so-elasticsearch'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elasticsearch'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: cacertz - file: cacertz
- file: esyml - file: esyml

View File

@@ -20,6 +20,23 @@ so-idh:
- binds: - binds:
- /nsm/idh:/var/tmp:rw - /nsm/idh:/var/tmp:rw
- /opt/so/conf/idh/opencanary.conf:/etc/opencanaryd/opencanary.conf:ro - /opt/so/conf/idh/opencanary.conf:/etc/opencanaryd/opencanary.conf:ro
{% if DOCKER.containers['so-idh'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-idh'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-idh'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-idh'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-idh'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-idh'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: opencanary_config - file: opencanary_config
- require: - require:

View File

@@ -24,14 +24,14 @@ idstools_sbin:
- group: 939 - group: 939
- file_mode: 755 - file_mode: 755
#idstools_sbin_jinja: idstools_sbin_jinja:
# file.recurse: file.recurse:
# - name: /usr/sbin - name: /usr/sbin
# - source: salt://idstools/tools/sbin_jinja - source: salt://idstools/tools/sbin_jinja
# - user: 934 - user: 934
# - group: 939 - group: 939
# - file_mode: 755 - file_mode: 755
# - template: jinja - template: jinja
{% else %} {% else %}

View File

@@ -26,10 +26,33 @@ so-idstools:
- http_proxy={{ proxy }} - http_proxy={{ proxy }}
- https_proxy={{ proxy }} - https_proxy={{ proxy }}
- no_proxy={{ salt['pillar.get']('manager:no_proxy') }} - no_proxy={{ salt['pillar.get']('manager:no_proxy') }}
{% if DOCKER.containers['so-elastalert'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
{% elif DOCKER.containers['so-idstools'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-idstools'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %} {% endif %}
- binds: - binds:
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro - /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
- /opt/so/rules/nids:/opt/so/rules/nids:rw - /opt/so/rules/nids:/opt/so/rules/nids:rw
- /nsm/rules/:/nsm/rules/:rw
{% if DOCKER.containers['so-idstools'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-idstools'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-idstools'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: idstoolsetcsync - file: idstoolsetcsync

View File

@@ -1,35 +1,15 @@
{%- from 'vars/globals.map.jinja' import GLOBALS %} {%- from 'vars/globals.map.jinja' import GLOBALS -%}
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %} {%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%}
{%- if GLOBALS.airgap is sameas true -%}
--merged=/opt/so/rules/nids/all.rules --merged=/opt/so/rules/nids/all.rules
--local=/opt/so/rules/nids/local.rules --local=/opt/so/rules/nids/local.rules
{%- if GLOBALS.md_engine == "SURICATA" %} {%- if GLOBALS.md_engine == "SURICATA" %}
--local=/opt/so/rules/nids/sorules/extraction.rules --local=/opt/so/rules/nids/sorules/extraction.rules
--local=/opt/so/rules/nids/sorules/filters.rules --local=/opt/so/rules/nids/sorules/filters.rules
{%- endif %} {%- endif %}
--url=http://{{ GLOBALS.manager }}:7788/rules/emerging-all.rules --url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules
--disable=/opt/so/idstools/etc/disable.conf --disable=/opt/so/idstools/etc/disable.conf
--enable=/opt/so/idstools/etc/enable.conf --enable=/opt/so/idstools/etc/enable.conf
--modify=/opt/so/idstools/etc/modify.conf --modify=/opt/so/idstools/etc/modify.conf
{%- else -%}
--suricata-version=6.0
--merged=/opt/so/rules/nids/all.rules
--local=/opt/so/rules/nids/local.rules
{%- if GLOBALS.md_engine == "SURICATA" %}
--local=/opt/so/rules/nids/sorules/extraction.rules
--local=/opt/so/rules/nids/sorules/filters.rules
{%- endif %}
--disable=/opt/so/idstools/etc/disable.conf
--enable=/opt/so/idstools/etc/enable.conf
--modify=/opt/so/idstools/etc/modify.conf
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
--etopen
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
--etpro={{ IDSTOOLSMERGED.config.oinkcode }}
{%- elif IDSTOOLSMERGED.config.ruleset == 'TALOS' %}
--url=https://www.snort.org/rules/snortrules-snapshot-2983.tar.gz?oinkcode={{ IDSTOOLSMERGED.config.oinkcode }}
{%- endif %}
{%- endif %}
{%- if IDSTOOLSMERGED.config.urls | length > 0 %} {%- if IDSTOOLSMERGED.config.urls | length > 0 %}
{%- for URL in IDSTOOLSMERGED.config.urls %} {%- for URL in IDSTOOLSMERGED.config.urls %}
--url={{ URL }} --url={{ URL }}

View File

@@ -1,10 +0,0 @@
#!/bin/bash
. /usr/sbin/so-common
argstr=""
for arg in "$@"; do
argstr="${argstr} \"${arg}\""
done
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"

View File

@@ -0,0 +1,32 @@
#!/bin/bash
. /usr/sbin/so-common
{%- from 'vars/globals.map.jinja' import GLOBALS %}
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %}
{%- set proxy = salt['pillar.get']('manager:proxy') %}
mkdir -p /nsm/rules/suricata
chown -R socore:socore /nsm/rules/suricata
# Download the rules from the internet
{%- if GLOBALS.airgap != 'True' %}
{%- if proxy %}
export http_proxy={{ proxy }}
export https_proxy={{ proxy }}
export no_proxy= salt['pillar.get']('manager:no_proxy')
{%- endif %}
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
{%- elif IDSTOOLSMERGED.config.ruleset == 'TALOS' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --url=https://www.snort.org/rules/snortrules-snapshot-2983.tar.gz?oinkcode={{ IDSTOOLSMERGED.config.oinkcode }}
{%- endif %}
{%- endif %}
argstr=""
for arg in "$@"; do
argstr="${argstr} \"${arg}\""
done
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"

View File

@@ -30,16 +30,32 @@ so-influxdb:
- DOCKER_INFLUXDB_INIT_ORG=Security Onion - DOCKER_INFLUXDB_INIT_ORG=Security Onion
- DOCKER_INFLUXDB_INIT_BUCKET=telegraf/so_short_term - DOCKER_INFLUXDB_INIT_BUCKET=telegraf/so_short_term
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN={{ TOKEN }} - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN={{ TOKEN }}
{% if DOCKER.containers['so-influxdb'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-influxdb'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- binds: - binds:
- /opt/so/log/influxdb/:/log:rw - /opt/so/log/influxdb/:/log:rw
- /opt/so/conf/influxdb/config.yaml:/conf/config.yaml:ro - /opt/so/conf/influxdb/config.yaml:/conf/config.yaml:ro
- /nsm/influxdb:/var/lib/influxdb2:rw - /nsm/influxdb:/var/lib/influxdb2:rw
- /etc/pki/influxdb.crt:/conf/influxdb.crt:ro - /etc/pki/influxdb.crt:/conf/influxdb.crt:ro
- /etc/pki/influxdb.key:/conf/influxdb.key:ro - /etc/pki/influxdb.key:/conf/influxdb.key:ro
{% if DOCKER.containers['so-influxdb'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-influxdb'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-influxdb'].port_bindings %} {% for BINDING in DOCKER.containers['so-influxdb'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKER.containers['so-influxdb'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-influxdb'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: influxdbconf - file: influxdbconf
- require: - require:

View File

@@ -25,13 +25,28 @@ so-kibana:
- ELASTICSEARCH_HOST={{ GLOBALS.manager }} - ELASTICSEARCH_HOST={{ GLOBALS.manager }}
- ELASTICSEARCH_PORT=9200 - ELASTICSEARCH_PORT=9200
- MANAGER={{ GLOBALS.manager }} - MANAGER={{ GLOBALS.manager }}
{% if DOCKER.containers['so-kibana'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-kibana'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-kibana'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-kibana'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- binds: - binds:
- /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw - /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw
- /opt/so/log/kibana:/var/log/kibana:rw - /opt/so/log/kibana:/var/log/kibana:rw
- /opt/so/conf/kibana/customdashboards:/usr/share/kibana/custdashboards:ro - /opt/so/conf/kibana/customdashboards:/usr/share/kibana/custdashboards:ro
- /sys/fs/cgroup:/sys/fs/cgroup:ro - /sys/fs/cgroup:/sys/fs/cgroup:ro
{% if DOCKER.containers['so-kibana'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-kibana'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-kibana'].port_bindings %} {% for BINDING in DOCKER.containers['so-kibana'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}

View File

@@ -25,10 +25,27 @@ so-kratos:
- /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro - /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro
- /opt/so/log/kratos/:/kratos-log:rw - /opt/so/log/kratos/:/kratos-log:rw
- /nsm/kratos/db:/kratos-data:rw - /nsm/kratos/db:/kratos-data:rw
{% if DOCKER.containers['so-kratos'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-kratos'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-kratos'].port_bindings %} {% for BINDING in DOCKER.containers['so-kratos'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKER.containers['so-kratos'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-kratos'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-kratos'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-kratos'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- restart_policy: unless-stopped - restart_policy: unless-stopped
- watch: - watch:
- file: kratosschema - file: kratosschema

View File

@@ -26,8 +26,18 @@ so-logstash:
- ipv4_address: {{ DOCKER.containers['so-logstash'].ip }} - ipv4_address: {{ DOCKER.containers['so-logstash'].ip }}
- user: logstash - user: logstash
- extra_hosts: {{ REDIS_NODES }} - extra_hosts: {{ REDIS_NODES }}
{% if DOCKER.containers['so-logstash'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-logstash'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- environment: - environment:
- LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }} - LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }}
{% if DOCKER.containers['so-logstash'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-logstash'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-logstash'].port_bindings %} {% for BINDING in DOCKER.containers['so-logstash'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
@@ -65,6 +75,11 @@ so-logstash:
- /opt/so/log/fleet/:/osquery/logs:ro - /opt/so/log/fleet/:/osquery/logs:ro
- /opt/so/log/strelka:/strelka:ro - /opt/so/log/strelka:/strelka:ro
{% endif %} {% endif %}
{% if DOCKER.containers['so-logstash'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-logstash'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: lsetcsync - file: lsetcsync
{% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %} {% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}

View File

@@ -15,7 +15,6 @@ POSTVERSION=$INSTALLEDVERSION
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}') INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}')
BATCHSIZE=5 BATCHSIZE=5
SOUP_LOG=/root/soup.log SOUP_LOG=/root/soup.log
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
WHATWOULDYOUSAYYAHDOHERE=soup WHATWOULDYOUSAYYAHDOHERE=soup
whiptail_title='Security Onion UPdater' whiptail_title='Security Onion UPdater'
NOTIFYCUSTOMELASTICCONFIG=false NOTIFYCUSTOMELASTICCONFIG=false
@@ -304,11 +303,7 @@ check_log_size_limit() {
check_os_updates() { check_os_updates() {
# Check to see if there are OS updates # Check to see if there are OS updates
NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated." NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
if [[ $OS == 'ubuntu' ]]; then OSUPDATES=$(yum -q list updates | wc -l)
OSUPDATES=$(apt list --upgradeable | grep -v "^Listing..." | grep -v "^docker-ce" | grep -v "^wazuh-" | grep -v "^salt-" | wc -l)
else
OSUPDATES=$(yum -q list updates | wc -l)
fi
if [[ "$OSUPDATES" -gt 0 ]]; then if [[ "$OSUPDATES" -gt 0 ]]; then
if [[ -z $UNATTENDED ]]; then if [[ -z $UNATTENDED ]]; then
echo "$NEEDUPDATES" echo "$NEEDUPDATES"
@@ -362,117 +357,12 @@ clone_to_tmp() {
fi fi
} }
elastalert_indices_check() {
# Stop Elastalert to prevent Elastalert indices from being re-created
if grep -q "^so-elastalert$" /opt/so/conf/so-status/so-status.conf ; then
so-elastalert-stop || true
fi
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
so-elasticsearch-query / -k --output /dev/null
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
# Unable to connect to Elasticsearch
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
exit 1
fi
# Check Elastalert indices
echo "Deleting Elastalert indices to prevent issues with upgrade to Elastic 8..."
CHECK_COUNT=0
while [[ "$CHECK_COUNT" -le 2 ]]; do
# Delete Elastalert indices
for i in $(so-elasticsearch-query _cat/indices | grep elastalert | awk '{print $3}'); do
so-elasticsearch-query $i -XDELETE;
done
# Check to ensure Elastalert indices are deleted
COUNT=0
ELASTALERT_INDICES_DELETED="no"
while [[ "$COUNT" -le 240 ]]; do
RESPONSE=$(so-elasticsearch-query elastalert*)
if [[ "$RESPONSE" == "{}" ]]; then
ELASTALERT_INDICES_DELETED="yes"
echo "Elastalert indices successfully deleted."
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
((CHECK_COUNT+=1))
done
# If we were unable to delete the Elastalert indices, exit the script
if [ "$ELASTALERT_INDICES_DELETED" == "no" ]; then
echo
echo -e "Unable to connect to delete Elastalert indices. Exiting."
echo
exit 1
fi
}
enable_highstate() { enable_highstate() {
echo "Enabling highstate." echo "Enabling highstate."
salt-call state.enable highstate -l info --local salt-call state.enable highstate -l info --local
echo "" echo ""
} }
es_version_check() {
CHECK_ES=$(echo $INSTALLEDVERSION | awk -F. '{print $3}')
if [ "$CHECK_ES" -lt "110" ]; then
echo "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version 2.3.130 before updating to 2.3.140 or higher."
echo ""
echo "If your deployment has Internet access, you can use the following command to update to 2.3.130:"
echo "sudo BRANCH=2.3.130-20220607 soup"
echo ""
echo "Otherwise, if your deployment is configured for airgap, you can instead download the 2.3.130 ISO image from https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso."
echo ""
echo "*** Once you have updated to 2.3.130, you can then update to 2.3.140 or higher as you would normally. ***"
exit 1
fi
}
es_indices_check() {
echo "Checking for unsupported Elasticsearch indices..."
UNSUPPORTED_INDICES=$(for INDEX in $(so-elasticsearch-indices-list | awk '{print $3}'); do so-elasticsearch-query $INDEX/_settings?human |grep '"created_string":"6' | jq -r 'keys'[0]; done)
if [ -z "$UNSUPPORTED_INDICES" ]; then
echo "No unsupported indices found."
else
echo "The following indices were created with Elasticsearch 6, and are not supported when upgrading to Elasticsearch 8. These indices may need to be deleted, migrated, or re-indexed before proceeding with the upgrade. Please see $DOC_BASE_URL/soup.html#elastic-8 for more details."
echo
echo "$UNSUPPORTED_INDICES"
exit 1
fi
}
generate_and_clean_tarballs() {
local new_version
new_version=$(cat $UPDATE_DIR/VERSION)
[ -d /opt/so/repo ] || mkdir -p /opt/so/repo
tar -czf "/opt/so/repo/$new_version.tar.gz" -C "$UPDATE_DIR" .
find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \;
}
highstate() { highstate() {
# Run a highstate. # Run a highstate.
salt-call state.highstate -l info queue=True salt-call state.highstate -l info queue=True
@@ -480,39 +370,26 @@ highstate() {
masterlock() { masterlock() {
echo "Locking Salt Master" echo "Locking Salt Master"
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then TOPFILE=/opt/so/saltstack/default/salt/top.sls
TOPFILE=/opt/so/saltstack/default/salt/top.sls BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup mv -v $TOPFILE $BACKUPTOPFILE
mv -v $TOPFILE $BACKUPTOPFILE echo "base:" > $TOPFILE
echo "base:" > $TOPFILE echo " $MINIONID:" >> $TOPFILE
echo " $MINIONID:" >> $TOPFILE echo " - ca" >> $TOPFILE
echo " - ca" >> $TOPFILE echo " - ssl" >> $TOPFILE
echo " - ssl" >> $TOPFILE echo " - elasticsearch" >> $TOPFILE
echo " - elasticsearch" >> $TOPFILE
fi
} }
masterunlock() { masterunlock() {
echo "Unlocking Salt Master" echo "Unlocking Salt Master"
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then mv -v $BACKUPTOPFILE $TOPFILE
mv -v $BACKUPTOPFILE $TOPFILE
fi
} }
preupgrade_changes() { preupgrade_changes() {
# This function is to add any new pillar items if needed. # This function is to add any new pillar items if needed.
echo "Checking to see if changes are needed." echo "Checking to see if changes are needed."
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_to_2.3.20 [[ "$INSTALLEDVERSION" == 2.4.2 ]] && up_to_2.4.3
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_to_2.3.30
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_to_2.3.50
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_to_2.3.80
[[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90
[[ "$INSTALLEDVERSION" == 2.3.90 || "$INSTALLEDVERSION" == 2.3.91 ]] && up_to_2.3.100
[[ "$INSTALLEDVERSION" == 2.3.100 ]] && up_to_2.3.110
[[ "$INSTALLEDVERSION" == 2.3.110 ]] && up_to_2.3.120
[[ "$INSTALLEDVERSION" == 2.3.120 ]] && up_to_2.3.130
[[ "$INSTALLEDVERSION" == 2.3.130 ]] && up_to_2.3.140
true true
} }
@@ -520,100 +397,17 @@ postupgrade_changes() {
# This function is to add any new pillar items if needed. # This function is to add any new pillar items if needed.
echo "Running post upgrade processes." echo "Running post upgrade processes."
[[ "$POSTVERSION" == 2.3.0 || "$POSTVERSION" == 2.3.1 || "$POSTVERSION" == 2.3.2 || "$POSTVERSION" == 2.3.10 || "$POSTVERSION" == 2.3.20 ]] && post_to_2.3.21 [[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3
[[ "$POSTVERSION" == 2.3.21 || "$POSTVERSION" == 2.3.30 ]] && post_to_2.3.40
[[ "$POSTVERSION" == 2.3.40 || "$POSTVERSION" == 2.3.50 || "$POSTVERSION" == 2.3.51 || "$POSTVERSION" == 2.3.52 ]] && post_to_2.3.60
[[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90
[[ "$POSTVERSION" == 2.3.90 || "$POSTVERSION" == 2.3.91 ]] && post_to_2.3.100
[[ "$POSTVERSION" == 2.3.100 ]] && post_to_2.3.110
[[ "$POSTVERSION" == 2.3.110 ]] && post_to_2.3.120
[[ "$POSTVERSION" == 2.3.120 ]] && post_to_2.3.130
[[ "$POSTVERSION" == 2.3.130 ]] && post_to_2.3.140
true true
} }
post_to_2.3.21() { post_to_2.4.3() {
salt-call state.apply playbook.OLD_db_init echo "Nothing to apply"
rm -f /opt/so/rules/elastalert/playbook/*.yaml POSTVERSION=2.4.3
so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 &
POSTVERSION=2.3.21
} }
post_to_2.3.40() {
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
so-kibana-space-defaults
POSTVERSION=2.3.40
}
post_to_2.3.60() {
for table in identity_recovery_addresses selfservice_recovery_flows selfservice_registration_flows selfservice_verification_flows identities identity_verification_tokens identity_credentials selfservice_settings_flows identity_recovery_tokens continuity_containers identity_credential_identifiers identity_verifiable_addresses courier_messages selfservice_errors sessions selfservice_login_flows
do
echo "Forcing Kratos network migration: $table"
sqlite3 /opt/so/conf/kratos/db/db.sqlite "update $table set nid=(select id from networks limit 1);"
done
POSTVERSION=2.3.60
}
post_to_2.3.90() {
# Create FleetDM service account
FLEET_MANAGER=$(lookup_pillar fleet_manager)
if [[ "$FLEET_MANAGER" == "True" ]]; then
FLEET_SA_EMAIL=$(lookup_pillar_secret fleet_sa_email)
FLEET_SA_PW=$(lookup_pillar_secret fleet_sa_password)
MYSQL_PW=$(lookup_pillar_secret mysql)
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_SA_PW'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PW fleet -e \
"INSERT INTO users (password,salt,email,name,global_role) VALUES ('$FLEET_HASH','','$FLEET_SA_EMAIL','$FLEET_SA_EMAIL','admin')" 2>&1)
if [[ $? -eq 0 ]]; then
echo "Successfully added service account to Fleet"
else
echo "Unable to add service account to Fleet"
echo "$MYSQL_OUTPUT"
fi
fi
POSTVERSION=2.3.90
}
post_to_2.3.100() {
echo "Post Processing for 2.3.100"
POSTVERSION=2.3.100
}
post_to_2.3.110() {
echo "Post Processing for 2.3.110"
echo "Removing old Elasticsearch index templates"
[ -d /opt/so/saltstack/default/salt/elasticsearch/templates/so ] && rm -rf /opt/so/saltstack/default/salt/elasticsearch/templates/so
echo "Updating Kibana dashboards"
salt-call state.apply kibana.so_savedobjects_defaults queue=True
POSTVERSION=2.3.110
}
post_to_2.3.120() {
echo "Post Processing for 2.3.120"
POSTVERSION=2.3.120
sed -i '/so-thehive-es/d;/so-thehive/d;/so-cortex/d' /opt/so/conf/so-status/so-status.conf
}
post_to_2.3.130() {
echo "Post Processing for 2.3.130"
POSTVERSION=2.3.130
}
post_to_2.3.140() {
echo "Post Processing for 2.3.140"
FORCE_SYNC=true so-user sync
so-kibana-restart
so-kibana-space-defaults
POSTVERSION=2.3.140
}
stop_salt_master() { stop_salt_master() {
# kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts # kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
@@ -656,235 +450,9 @@ stop_salt_minion() {
set -e set -e
} }
up_to_2.3.20(){
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
# Remove PCAP from global
sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls
sed '/sensor_checkin_interval_ms:/d' /opt/so/saltstack/local/pillar/global.sls
# Add checking interval to glbal up_to_2.4.3() {
echo "sensoroni:" >> /opt/so/saltstack/local/pillar/global.sls echo "Nothing to do for 2.4.3"
echo " node_checkin_interval_ms: 10000" >> /opt/so/saltstack/local/pillar/global.sls
# Update pillar fiels for new sensoroni functionality
for file in /opt/so/saltstack/local/pillar/minions/*; do
echo "sensoroni:" >> $file
echo " node_description:" >> $file
local SOMEADDRESS=$(cat $file | grep mainip | tail -n 1 | awk '{print $2'})
echo " node_address: $SOMEADDRESS" >> $file
done
# Remove old firewall config to reduce confusion
rm -f /opt/so/saltstack/default/pillar/firewall/ports.sls
# Fix daemon.json by managing it
echo "docker:" >> /opt/so/saltstack/local/pillar/global.sls
DOCKERGREP=$(cat /etc/docker/daemon.json | grep base | awk {'print $3'} | cut -f1 -d"," | tr -d '"')
if [ -z "$DOCKERGREP" ]; then
echo " range: '172.17.0.0/24'" >> /opt/so/saltstack/local/pillar/global.sls
echo " bip: '172.17.0.1/24'" >> /opt/so/saltstack/local/pillar/global.sls
else
DOCKERSTUFF="${DOCKERGREP//\"}"
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
echo " range: '$DOCKERSTUFF/24'" >> /opt/so/saltstack/local/pillar/global.sls
echo " bip: '$DOCKERSTUFFBIP'" >> /opt/so/saltstack/local/pillar/global.sls
fi
INSTALLEDVERSION=2.3.20
}
up_to_2.3.30() {
# Replace any curly brace scalars with the same scalar in single quotes
readarray -t minion_pillars <<< "$(find /opt/so/saltstack/local/pillar/minions -type f -name '*.sls')"
for pillar in "${minion_pillars[@]}"; do
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
done
# Change the IMAGEREPO
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
# Strelka rule repo pillar addition
if [[ $is_airgap -eq 0 ]]; then
# Add manager as default Strelka YARA rule repo
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
else
# Add Github repo for Strelka YARA rules
sed -i "/^strelka:/a \\ repos: \n - https://github.com/Neo23x0/signature-base" /opt/so/saltstack/local/pillar/global.sls;
fi
check_log_size_limit
INSTALLEDVERSION=2.3.30
}
up_to_2.3.50() {
cat <<EOF > /tmp/supersed.txt
/so-zeek:/ {
p;
n;
/shards:/ {
p;
n;
/warm:/ {
p;
n;
/close:/ {
s/close: 365/close: 45/;
p;
n;
/delete:/ {
s/delete: 45/delete: 365/;
p;
d;
}
}
}
}
}
p;
EOF
sed -n -i -f /tmp/supersed.txt /opt/so/saltstack/local/pillar/global.sls
rm /tmp/supersed.txt
INSTALLEDVERSION=2.3.50
}
up_to_2.3.80() {
# Remove watermark settings from global.sls
sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls
# Add new indices to the global
sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
# Do some pillar formatting
tc=$(grep -w true_cluster /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print tolower($2)'}| xargs)
if [[ "$tc" == "true" ]]; then
tcname=$(grep -w true_cluster_name /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print $2'})
sed -i "/^elasticsearch:/a \\ config: \n cluster: \n name: $tcname" /opt/so/saltstack/local/pillar/global.sls
sed -i '/ true_cluster_name/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ esclustername/d' /opt/so/saltstack/local/pillar/global.sls
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ ${file} != *"manager.sls"* ]]; then
noderoutetype=$(grep -w node_route_type $file | awk -F: {'print $2'})
if [ -n "$noderoutetype" ]; then
sed -i "/^elasticsearch:/a \\ config: \n node: \n attr: \n box_type: $noderoutetype" $file
sed -i '/ node_route_type/d' $file
noderoutetype=''
fi
fi
done
fi
# check for local es config to inform user that the config in local is now ignored and those options need to be placed in the pillar
if [ -f "/opt/so/saltstack/local/salt/elasticsearch/files/elasticsearch.yml" ]; then
NOTIFYCUSTOMELASTICCONFIG=true
fi
INSTALLEDVERSION=2.3.80
}
up_to_2.3.90() {
for i in manager managersearch eval standalone; do
echo "Checking for compgen match of /opt/so/saltstack/local/pillar/minions/*_$i.sls"
if compgen -G "/opt/so/saltstack/local/pillar/minions/*_$i.sls"; then
echo "Found compgen match for /opt/so/saltstack/local/pillar/minions/*_$i.sls"
for f in $(compgen -G "/opt/so/saltstack/local/pillar/minions/*_$i.sls"); do
if grep -qozP "^soc:\n.*es_index_patterns: '\*:so-\*,\*:endgame-\*'" "$f"; then
echo "soc:es_index_patterns already present in $f"
else
echo "Appending soc pillar data to $f"
echo "soc:" >> "$f"
sed -i "/^soc:/a \\ es_index_patterns: '*:so-*,*:endgame-*'" "$f"
fi
done
fi
done
# Create Endgame Hostgroup
echo "Adding endgame hostgroup with so-firewall"
if so-firewall addhostgroup endgame 2>&1 | grep -q 'Already exists'; then
echo 'endgame hostgroup already exists'
else
echo 'endgame hostgroup added'
fi
# Force influx to generate a new cert
echo "Moving influxdb.crt and influxdb.key to generate new certs"
mv -vf /etc/pki/influxdb.crt /etc/pki/influxdb.crt.2390upgrade
mv -vf /etc/pki/influxdb.key /etc/pki/influxdb.key.2390upgrade
# remove old common ingest pipeline in default
rm -vf /opt/so/saltstack/default/salt/elasticsearch/files/ingest/common
# if custom common, move from local ingest to local ingest-dynamic
mkdir -vp /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic
if [[ -f "/opt/so/saltstack/local/salt/elasticsearch/files/ingest/common" ]]; then
mv -v /opt/so/saltstack/local/salt/elasticsearch/files/ingest/common /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
# since json file, we need to wrap with raw
sed -i '1s/^/{% raw %}\n/' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
sed -i -e '$a{% endraw %}\n' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
fi
# Generate FleetDM Service Account creds if they do not exist
if grep -q "fleet_sa_email" /opt/so/saltstack/local/pillar/secrets.sls; then
echo "FleetDM Service Account credentials already created..."
else
echo "Generating FleetDM Service Account credentials..."
FLEETSAPASS=$(get_random_value)
printf '%s\n'\
" fleet_sa_email: service.account@securityonion.invalid"\
" fleet_sa_password: $FLEETSAPASS"\
>> /opt/so/saltstack/local/pillar/secrets.sls
fi
sed -i -re 's/^(playbook_admin.*|playbook_automation.*)/ \1/g' /opt/so/saltstack/local/pillar/secrets.sls
INSTALLEDVERSION=2.3.90
}
up_to_2.3.100() {
fix_wazuh
echo "Adding receiver hostgroup with so-firewall"
if so-firewall addhostgroup receiver 2>&1 | grep -q 'Already exists'; then
echo 'receiver hostgroup already exists'
else
echo 'receiver hostgroup added'
fi
echo "Adding receiver to assigned_hostgroups.local.map.yaml"
grep -qxF " receiver:" /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml || sed -i -e '$a\ receiver:' /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml
INSTALLEDVERSION=2.3.100
}
up_to_2.3.110() {
sed -i 's|shards|index_template:\n template:\n settings:\n index:\n number_of_shards|g' /opt/so/saltstack/local/pillar/global.sls
INSTALLEDVERSION=2.3.110
}
up_to_2.3.120() {
# Stop thehive services since these will be broken in .120
so-thehive-stop
so-thehive-es-stop
so-cortex-stop
INSTALLEDVERSION=2.3.120
}
up_to_2.3.130() {
# Remove file for nav update
rm -f /opt/so/conf/navigator/layers/nav_layer_playbook.json
INSTALLEDVERSION=2.3.130
}
up_to_2.3.140() {
elastalert_indices_check
## ##
INSTALLEDVERSION=2.3.140 INSTALLEDVERSION=2.3.140
} }
@@ -993,24 +561,6 @@ upgrade_salt() {
echo "" echo ""
yum versionlock add "salt-*" yum versionlock add "salt-*"
# Else do Ubuntu things # Else do Ubuntu things
elif [[ $OS == 'ubuntu' ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi fi
echo "Checking if Salt was upgraded." echo "Checking if Salt was upgraded."
@@ -1030,46 +580,6 @@ upgrade_salt() {
} }
update_repo() {
if [[ "$OS" == "centos" ]]; then
echo "Performing repo changes."
# Import GPG Keys
gpg_rpm_import
echo "Disabling fastestmirror."
disable_fastestmirror
echo "Deleting unneeded repo files."
DELREPOS=('CentOS-Base' 'CentOS-CR' 'CentOS-Debuginfo' 'docker-ce' 'CentOS-fasttrack' 'CentOS-Media' 'CentOS-Sources' 'CentOS-Vault' 'CentOS-x86_64-kernel' 'epel' 'epel-testing' 'saltstack' 'salt-latest' 'wazuh')
for DELREPO in "${DELREPOS[@]}"; do
if [[ -f "/etc/yum.repos.d/$DELREPO.repo" ]]; then
echo "Deleting $DELREPO.repo"
rm -f "/etc/yum.repos.d/$DELREPO.repo"
fi
done
if [[ $is_airgap -eq 1 ]]; then
# Copy the new repo file if not airgap
cp $UPDATE_DIR/salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
yum clean all
yum repolist
fi
elif [[ "$OS" == "ubuntu" ]]; then
ubuntu_version=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then
OSVER=bionic
elif grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
OSVER=focal
else
echo "We do not support your current version of Ubuntu."
exit 1
fi
rm -f /etc/apt/sources.list.d/salt.list
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list
apt-get update
fi
}
verify_latest_update_script() { verify_latest_update_script() {
# Check to see if the update scripts match. If not run the new one. # Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}') CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
@@ -1096,51 +606,37 @@ verify_latest_update_script() {
fi fi
} }
apply_hotfix() { # Keeping this block in case we need to do a hotfix that requires salt update
if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then #apply_hotfix() {
fix_wazuh # if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then # fix_wazuh
2_3_10_hotfix_1 # elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
else # 2_3_10_hotfix_1
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)" # else
fi # echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
} # fi
#}
fix_wazuh() {
FILE="/nsm/wazuh/etc/ossec.conf"
echo "Detecting if $FILE needs corrected..."
if [ -f "$FILE" ]; then
if head -1 $FILE | grep -q "xml version"; then
echo "$FILE has an XML header; removing"
sed -i 1d $FILE
docker restart so-wazuh # cannot use so-wazuh-restart here because the salt-master service is stopped
else
echo "$FILE does not have an XML header, so no changes are necessary."
fi
else
echo "$FILE does not exist, so no changes are necessary."
fi
}
#upgrade salt to 3004.1 #upgrade salt to 3004.1
2_3_10_hotfix_1() { #2_3_10_hotfix_1() {
systemctl_func "stop" "$cron_service_name" # systemctl_func "stop" "$cron_service_name"
# update mine items prior to stopping salt-minion and salt-master # # update mine items prior to stopping salt-minion and salt-master
update_salt_mine # update_salt_mine
stop_salt_minion # stop_salt_minion
stop_salt_master # stop_salt_master
update_repo # update_repo
# Does salt need upgraded. If so update it. # # Does salt need upgraded. If so update it.
if [[ $UPGRADESALT -eq 1 ]]; then # if [[ $UPGRADESALT -eq 1 ]]; then
echo "Upgrading Salt" # echo "Upgrading Salt"
# Update the repo files so it can actually upgrade # # Update the repo files so it can actually upgrade
upgrade_salt # upgrade_salt
fi # fi
systemctl_func "start" "salt-master" # systemctl_func "start" "salt-master"
systemctl_func "start" "salt-minion" # systemctl_func "start" "salt-minion"
systemctl_func "start" "$cron_service_name" # systemctl_func "start" "$cron_service_name"
} #}
main() { main() {
trap 'check_err $?' EXIT trap 'check_err $?' EXIT
@@ -1198,23 +694,9 @@ main() {
fi fi
echo "Verifying we have the latest soup script." echo "Verifying we have the latest soup script."
verify_latest_update_script verify_latest_update_script
es_version_check echo "Checking for OS updates."
es_indices_check
elastalert_indices_check
echo ""
set_palette
check_elastic_license
echo ""
check_os_updates check_os_updates
echo "Generating new repo archive"
generate_and_clean_tarballs
if [ -f /usr/sbin/so-image-common ]; then
. /usr/sbin/so-image-common
else
add_common
fi
echo "Let's see if we need to update Security Onion." echo "Let's see if we need to update Security Onion."
upgrade_check upgrade_check
upgrade_space upgrade_space
@@ -1224,7 +706,6 @@ main() {
set -e set -e
if [[ $is_airgap -eq 0 ]]; then if [[ $is_airgap -eq 0 ]]; then
update_centos_repo
yum clean all yum clean all
check_os_updates check_os_updates
fi fi

View File

@@ -33,6 +33,11 @@ so-mysql:
- ipv4_address: {{ DOCKER.containers['so-mysql'].ip }} - ipv4_address: {{ DOCKER.containers['so-mysql'].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-mysql'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-mysql'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-mysql'].port_bindings %} {% for BINDING in DOCKER.containers['so-mysql'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
@@ -40,11 +45,21 @@ so-mysql:
- environment: - environment:
- MYSQL_ROOT_HOST={{ GLOBALS.so_docker_bip }} - MYSQL_ROOT_HOST={{ GLOBALS.so_docker_bip }}
- MYSQL_ROOT_PASSWORD=/etc/mypass - MYSQL_ROOT_PASSWORD=/etc/mypass
{% if DOCKER.containers['so-mysql'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-mysql'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- binds: - binds:
- /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro - /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro
- /opt/so/conf/mysql/etc/mypass:/etc/mypass - /opt/so/conf/mysql/etc/mypass:/etc/mypass
- /nsm/mysql:/var/lib/mysql:rw - /nsm/mysql:/var/lib/mysql:rw
- /opt/so/log/mysql:/var/log/mysql:rw - /opt/so/log/mysql:/var/log/mysql:rw
{% if DOCKER.containers['so-mysql'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-mysql'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- watch: - watch:
- /opt/so/conf/mysql/etc - /opt/so/conf/mysql/etc
- require: - require:

View File

@@ -12,6 +12,15 @@ include:
- nginx.config - nginx.config
- nginx.sostatus - nginx.sostatus
make-rule-dir-nginx:
file.directory:
- name: /nsm/rules
- user: socore
- group: socore
- recurse:
- user
- group
so-nginx: so-nginx:
docker_container.running: docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }}
@@ -21,6 +30,11 @@ so-nginx:
- ipv4_address: {{ DOCKER.containers['so-nginx'].ip }} - ipv4_address: {{ DOCKER.containers['so-nginx'].ip }}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-nginx'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-nginx'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- binds: - binds:
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro - /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/so/log/nginx/:/var/log/nginx:rw - /opt/so/log/nginx/:/var/log/nginx:rw
@@ -37,7 +51,19 @@ so-nginx:
- /opt/so/conf/navigator/enterprise-attack.json:/opt/socore/html/navigator/assets/enterprise-attack.json:ro - /opt/so/conf/navigator/enterprise-attack.json:/opt/socore/html/navigator/assets/enterprise-attack.json:ro
- /opt/so/conf/navigator/pre-attack.json:/opt/socore/html/navigator/assets/pre-attack.json:ro - /opt/so/conf/navigator/pre-attack.json:/opt/socore/html/navigator/assets/pre-attack.json:ro
- /nsm/repo:/opt/socore/html/repo:ro - /nsm/repo:/opt/socore/html/repo:ro
- /nsm/rules:/nsm/rules:ro
{% endif %} {% endif %}
{% if DOCKER.containers['so-nginx'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-nginx'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-nginx'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-nginx'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- cap_add: NET_BIND_SERVICE - cap_add: NET_BIND_SERVICE
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-nginx'].port_bindings %} {% for BINDING in DOCKER.containers['so-nginx'].port_bindings %}

View File

@@ -84,8 +84,8 @@ http {
server { server {
listen 7788; listen 7788;
server_name {{ GLOBALS.url_base }}; server_name {{ GLOBALS.url_base }};
root /opt/socore/html/repo; root /nsm/rules;
location /rules/ { location / {
allow all; allow all;
sendfile on; sendfile on;
sendfile_max_chunk 1m; sendfile_max_chunk 1m;

View File

@@ -5,7 +5,9 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- pcap.config - pcap.config
@@ -24,6 +26,23 @@ so-steno:
- /nsm/pcapindex:/nsm/pcapindex:rw - /nsm/pcapindex:/nsm/pcapindex:rw
- /nsm/pcaptmp:/tmp:rw - /nsm/pcaptmp:/tmp:rw
- /opt/so/log/stenographer:/var/log/stenographer:rw - /opt/so/log/stenographer:/var/log/stenographer:rw
{% if DOCKER.containers['so-steno'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-steno'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-steno'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-steno'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-steno'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-steno'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: stenoconf - file: stenoconf
- require: - require:

View File

@@ -34,13 +34,28 @@ so-playbook:
- ipv4_address: {{ DOCKER.containers['so-playbook'].ip }} - ipv4_address: {{ DOCKER.containers['so-playbook'].ip }}
- binds: - binds:
- /opt/so/log/playbook:/playbook/log:rw - /opt/so/log/playbook:/playbook/log:rw
{% if DOCKER.containers['so-playbook'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-playbook'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- extra_hosts: - extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-playbook'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-playbook'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- environment: - environment:
- REDMINE_DB_MYSQL={{ GLOBALS.manager }} - REDMINE_DB_MYSQL={{ GLOBALS.manager }}
- REDMINE_DB_DATABASE=playbook - REDMINE_DB_DATABASE=playbook
- REDMINE_DB_USERNAME=playbookdbuser - REDMINE_DB_USERNAME=playbookdbuser
- REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }} - REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }}
{% if DOCKER.containers['so-playbook'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-playbook'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-playbook'].port_bindings %} {% for BINDING in DOCKER.containers['so-playbook'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}

View File

@@ -35,6 +35,23 @@ so-redis:
{% else %} {% else %}
- /etc/ssl/certs/intca.crt:/certs/ca.crt:ro - /etc/ssl/certs/intca.crt:/certs/ca.crt:ro
{% endif %} {% endif %}
{% if DOCKER.containers['so-redis'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-redis'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-redis'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-redis'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-redis'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf" - entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
- watch: - watch:
- file: /opt/so/conf/redis/etc - file: /opt/so/conf/redis/etc

View File

@@ -30,9 +30,25 @@ so-dockerregistry:
- /nsm/docker-registry/docker:/var/lib/registry/docker:rw - /nsm/docker-registry/docker:/var/lib/registry/docker:rw
- /etc/pki/registry.crt:/etc/pki/registry.crt:ro - /etc/pki/registry.crt:/etc/pki/registry.crt:ro
- /etc/pki/registry.key:/etc/pki/registry.key:ro - /etc/pki/registry.key:/etc/pki/registry.key:ro
{% if DOCKER.containers['so-dockerregistry'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-dockerregistry'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-dockerregistry'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-dockerregistry'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- client_timeout: 180 - client_timeout: 180
- environment: - environment:
- HOME=/root - HOME=/root
{% if DOCKER.containers['so-dockerregistry'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-dockerregistry'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- retry: - retry:
attempts: 5 attempts: 5
interval: 30 interval: 30

View File

@@ -4,6 +4,8 @@
# Elastic License 2.0. # Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- sensoroni.config - sensoroni.config
@@ -21,6 +23,23 @@ so-sensoroni:
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro - /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
- /opt/so/conf/sensoroni/analyzers:/opt/sensoroni/analyzers:rw - /opt/so/conf/sensoroni/analyzers:/opt/sensoroni/analyzers:rw
- /opt/so/log/sensoroni:/opt/sensoroni/logs:rw - /opt/so/log/sensoroni:/opt/sensoroni/logs:rw
{% if DOCKER.containers['so-sensoroni'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-sensoroni'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-sensoroni'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-sensoroni'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-sensoroni'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-sensoroni'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: /opt/so/conf/sensoroni/sensoroni.json - file: /opt/so/conf/sensoroni/sensoroni.json
- require: - require:

View File

@@ -32,11 +32,27 @@ so-soc:
- /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw - /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw
- /opt/so/conf/soc/salt:/opt/sensoroni/salt:rw - /opt/so/conf/soc/salt:/opt/sensoroni/salt:rw
- /opt/so/saltstack:/opt/so/saltstack:rw - /opt/so/saltstack:/opt/so/saltstack:rw
{% if DOCKER.containers['so-soc'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-soc'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- extra_hosts: {{ DOCKER_EXTRA_HOSTS }} - extra_hosts: {{ DOCKER_EXTRA_HOSTS }}
{% if DOCKER.containers['so-soc'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-soc'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-soc'].port_bindings %} {% for BINDING in DOCKER.containers['so-soc'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKER.containers['so-soc'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-soc'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: /opt/so/conf/soc/* - file: /opt/so/conf/soc/*
- require: - require:

View File

@@ -29,6 +29,11 @@ so-soctopus:
{% if GLOBALS.airgap %} {% if GLOBALS.airgap %}
- /nsm/repo/rules/sigma:/soctopus/sigma - /nsm/repo/rules/sigma:/soctopus/sigma
{% endif %} {% endif %}
{% if DOCKER.containers['so-soctopus'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-soctopus'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-soctopus'].port_bindings %} {% for BINDING in DOCKER.containers['so-soctopus'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
@@ -36,6 +41,17 @@ so-soctopus:
- extra_hosts: - extra_hosts:
- {{GLOBALS.url_base}}:{{GLOBALS.manager_ip}} - {{GLOBALS.url_base}}:{{GLOBALS.manager_ip}}
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-soctopus'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-soctopus'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-soctopus'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-soctopus'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- require: - require:
- file: soctopusconf - file: soctopusconf
- file: navigatordefaultlayer - file: navigatordefaultlayer

View File

@@ -18,6 +18,11 @@ strelka_backend:
- binds: - binds:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro - /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/rules/:/etc/yara/:ro - /opt/so/conf/strelka/rules/:/etc/yara/:ro
{% if DOCKER.containers['so-strelka-backend'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-strelka-backend'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- name: so-strelka-backend - name: so-strelka-backend
- networks: - networks:
- sobridge: - sobridge:
@@ -25,6 +30,17 @@ strelka_backend:
- command: strelka-backend - command: strelka-backend
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-backend'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-backend'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-backend'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-backend'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- restart_policy: on-failure - restart_policy: on-failure
delete_so-strelka-backend_so-status.disabled: delete_so-strelka-backend_so-status.disabled:

View File

@@ -43,14 +43,14 @@ strelka_sbin:
- group: 939 - group: 939
- file_mode: 755 - file_mode: 755
#strelka_sbin_jinja: strelka_sbin_jinja:
# file.recurse: file.recurse:
# - name: /usr/sbin - name: /usr/sbin
# - source: salt://strelka/tools/sbin_jinja - source: salt://strelka/tools/sbin_jinja
# - user: 939 - user: 939
# - group: 939 - group: 939
# - file_mode: 755 - file_mode: 755
# - template: jinja - template: jinja
{% else %} {% else %}

View File

@@ -22,11 +22,27 @@ strelka_coordinator:
- entrypoint: redis-server --save "" --appendonly no - entrypoint: redis-server --save "" --appendonly no
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-coordinator'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-coordinator'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-strelka-coordinator'].port_bindings %} {% for BINDING in DOCKER.containers['so-strelka-coordinator'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKER.containers['so-strelka-coordinator'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-coordinator'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %}
- binds:
{% for BIND in DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
delete_so-strelka-coordinator_so-status.disabled: delete_so-strelka-coordinator_so-status.disabled:
file.uncomment: file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf - name: /opt/so/conf/so-status/so-status.conf

View File

@@ -542,8 +542,7 @@ strelka:
enabled: False enabled: False
rules: rules:
enabled: True enabled: True
repos: repos: []
- https://github.com/Neo23x0/signature-base
excluded: excluded:
- apt_flame2_orchestrator.yar - apt_flame2_orchestrator.yar
- apt_tetris.yar - apt_tetris.yar

View File

@@ -18,6 +18,11 @@ strelka_filestream:
- binds: - binds:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro - /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka - /nsm/strelka:/nsm/strelka
{% if DOCKER.containers['so-strelka-filestream'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-strelka-filestream'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- name: so-strelka-filestream - name: so-strelka-filestream
- networks: - networks:
- sobridge: - sobridge:
@@ -25,6 +30,17 @@ strelka_filestream:
- command: strelka-filestream - command: strelka-filestream
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-filestream'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-filestream'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-filestream'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-filestream'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-strelka-filestream_so-status.disabled: delete_so-strelka-filestream_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -18,6 +18,11 @@ strelka_frontend:
- binds: - binds:
- /opt/so/conf/strelka/frontend/:/etc/strelka/:ro - /opt/so/conf/strelka/frontend/:/etc/strelka/:ro
- /nsm/strelka/log/:/var/log/strelka/:rw - /nsm/strelka/log/:/var/log/strelka/:rw
{% if DOCKER.containers['so-strelka-frontend'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-strelka-frontend'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- privileged: True - privileged: True
- name: so-strelka-frontend - name: so-strelka-frontend
- networks: - networks:
@@ -26,10 +31,21 @@ strelka_frontend:
- command: strelka-frontend - command: strelka-frontend
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-frontend'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-frontend'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-strelka-frontend'].port_bindings %} {% for BINDING in DOCKER.containers['so-strelka-frontend'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKER.containers['so-strelka-frontend'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-frontend'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-strelka-frontend_so-status.disabled: delete_so-strelka-frontend_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -22,10 +22,27 @@ strelka_gatekeeper:
- entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru - entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-gatekeeper'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-gatekeeper'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings: - port_bindings:
{% for BINDING in DOCKER.containers['so-strelka-gatekeeper'].port_bindings %} {% for BINDING in DOCKER.containers['so-strelka-gatekeeper'].port_bindings %}
- {{ BINDING }} - {{ BINDING }}
{% endfor %} {% endfor %}
{% if DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %}
- binds:
{% for BIND in DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-gatekeeper'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-gatekeeper'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-strelka-gatekeeper_so-status.disabled: delete_so-strelka-gatekeeper_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -17,6 +17,11 @@ strelka_manager:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }} - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }}
- binds: - binds:
- /opt/so/conf/strelka/manager/:/etc/strelka/:ro - /opt/so/conf/strelka/manager/:/etc/strelka/:ro
{% if DOCKER.containers['so-strelka-manager'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-strelka-manager'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- name: so-strelka-manager - name: so-strelka-manager
- networks: - networks:
- sobridge: - sobridge:
@@ -24,6 +29,17 @@ strelka_manager:
- command: strelka-manager - command: strelka-manager
- extra_hosts: - extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-manager'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-manager'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-manager'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-manager'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-strelka-manager_so-status.disabled: delete_so-strelka-manager_so-status.disabled:
file.uncomment: file.uncomment:

View File

@@ -0,0 +1,21 @@
#!/bin/bash
NOROOT=1
. /usr/sbin/so-common
{%- set proxy = salt['pillar.get']('manager:proxy') %}
# Download the rules from the internet
{%- if proxy %}
export http_proxy={{ proxy }}
export https_proxy={{ proxy }}
export no_proxy= salt['pillar.get']('manager:no_proxy')
{%- endif %}
mkdir -p /tmp/yara
cd /tmp/yara
git clone https://github.com/Security-Onion-Solutions/securityonion-yara.git
mkdir -p /nsm/rules/yara
rsync -shav --progress /tmp/yara/securityonion-yara/yara /nsm/rules/
cd /tmp
rm -rf /tmp/yara

View File

@@ -6,6 +6,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- suricata.config - suricata.config
@@ -17,6 +19,11 @@ so-suricata:
- privileged: True - privileged: True
- environment: - environment:
- INTERFACE={{ GLOBALS.sensor.interface }} - INTERFACE={{ GLOBALS.sensor.interface }}
{% if DOCKER.containers['so-suricata'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-suricata'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- binds: - binds:
- /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro - /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro - /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
@@ -25,7 +32,18 @@ so-suricata:
- /nsm/suricata/:/nsm/:rw - /nsm/suricata/:/nsm/:rw
- /nsm/suricata/extracted:/var/log/suricata//filestore:rw - /nsm/suricata/extracted:/var/log/suricata//filestore:rw
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro - /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
{% if DOCKER.containers['so-suricata'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-suricata'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- network_mode: host - network_mode: host
{% if DOCKER.containers['so-suricata'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-suricata'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: suriconfig - file: suriconfig
- file: surithresholding - file: surithresholding

View File

@@ -6,6 +6,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- telegraf.config - telegraf.config
@@ -22,6 +24,11 @@ so-telegraf:
- HOST_SYS=/host/sys - HOST_SYS=/host/sys
- HOST_MOUNT_PREFIX=/host - HOST_MOUNT_PREFIX=/host
- GODEBUG=x509ignoreCN=0 - GODEBUG=x509ignoreCN=0
{% if DOCKER.containers['so-telegraf'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-telegraf'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- network_mode: host - network_mode: host
- init: True - init: True
- binds: - binds:
@@ -47,6 +54,17 @@ so-telegraf:
- /opt/so/log/suricata:/var/log/suricata:ro - /opt/so/log/suricata:/var/log/suricata:ro
- /opt/so/log/raid:/var/log/raid:ro - /opt/so/log/raid:/var/log/raid:ro
- /opt/so/log/sostatus:/var/log/sostatus:ro - /opt/so/log/sostatus:/var/log/sostatus:ro
{% if DOCKER.containers['so-telegraf'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-telegraf'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-telegraf'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-telegraf'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: tgrafconf - file: tgrafconf
- file: tgrafsyncscripts - file: tgrafsyncscripts

View File

@@ -76,7 +76,10 @@ zeek:
- LogAscii::use_json = T; - LogAscii::use_json = T;
- CaptureLoss::watch_interval = 5 mins; - CaptureLoss::watch_interval = 5 mins;
networks: networks:
HOME_NET: 192.168.0.0/16,10.0.0.0/8,172.16.0.0/12 HOME_NET:
- 192.168.0.0/16
- 10.0.0.0/8
- 172.16.0.0/12
file_extraction: file_extraction:
- application/x-dosexec: exe - application/x-dosexec: exe
- application/pdf: pdf - application/pdf: pdf

View File

@@ -6,6 +6,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %} {% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %} {% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include: include:
- zeek.config - zeek.config
@@ -32,7 +34,24 @@ so-zeek:
- /opt/so/conf/zeek/policy/cve-2020-0601:/opt/zeek/share/zeek/policy/cve-2020-0601:ro - /opt/so/conf/zeek/policy/cve-2020-0601:/opt/zeek/share/zeek/policy/cve-2020-0601:ro
- /opt/so/conf/zeek/policy/intel:/opt/zeek/share/zeek/policy/intel:rw - /opt/so/conf/zeek/policy/intel:/opt/zeek/share/zeek/policy/intel:rw
- /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro - /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro
{% if DOCKER.containers['so-zeek'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-zeek'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- network_mode: host - network_mode: host
{% if DOCKER.containers['so-zeek'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-zeek'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-zeek'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-zeek'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch: - watch:
- file: /opt/so/conf/zeek/local.zeek - file: /opt/so/conf/zeek/local.zeek
- file: /opt/so/conf/zeek/node.cfg - file: /opt/so/conf/zeek/node.cfg

View File

@@ -1,5 +1,5 @@
{%- if NETWORKS.HOME_NET %} {%- if NETWORKS.HOME_NET %}
{%- for HN in NETWORKS.HOME_NET.split(',') %} {%- for HN in NETWORKS.HOME_NET %}
{{ HN }} {{ HN }}
{%- endfor %} {%- endfor %}
{%- endif %} {%- endif %}

View File

@@ -21,6 +21,15 @@ zeek:
forcedType: "[]string" forcedType: "[]string"
advanced: True advanced: True
helpLink: zeek.html helpLink: zeek.html
networks:
HOME_NET:
description: List of IP or CIDR blocks to define as the HOME_NET.
forcedType: "[]string"
advanced: False
helpLink: zeek.html
multiline: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
node: node:
lb_procs: lb_procs:
description: Contains the number of CPU cores or workers used by Zeek. This setting should only be applied to individual nodes and will be ignored if CPU affinity is enabled. description: Contains the number of CPU cores or workers used by Zeek. This setting should only be applied to individual nodes and will be ignored if CPU affinity is enabled.

View File

@@ -1542,15 +1542,9 @@ create_strelka_pillar() {
"strelka:"\ "strelka:"\
" enabled: $STRELKA"\ " enabled: $STRELKA"\
" rules: 1" > "$strelka_pillar_file" " rules: 1" > "$strelka_pillar_file"
if [[ $is_airgap ]]; then printf '%s\n'\
printf '%s\n'\ " repos:"\
" repos:"\ " - 'https://$HOSTNAME:7788/yara'" >> "$strelka_pillar_file"
" - 'https://$HOSTNAME/repo/rules/strelka'" >> "$strelka_pillar_file"
else
printf '%s\n'\
" repos:"\
" - 'https://github.com/Neo23x0/signature-base'" >> "$strelka_pillar_file"
fi
} }
backup_pillar() { backup_pillar() {

View File

@@ -644,6 +644,13 @@ if ! [[ -f $install_opt_file ]]; then
logCmd "salt-call state.apply -l info manager" logCmd "salt-call state.apply -l info manager"
logCmd "salt-call state.apply influxdb -l info" logCmd "salt-call state.apply influxdb -l info"
logCmd "salt-call state.highstate -l info" logCmd "salt-call state.highstate -l info"
if [[ ! $is_airgap ]]; then
title "Downloading IDS Rules"
logCmd "so-rule-update"
title "Downloading YARA rules"
logCmd "runuser -l socore 'so-yara-update'"
title "Restarting Strelka to use new rules"
fi
title "Setting up Kibana Default Space" title "Setting up Kibana Default Space"
logCmd "so-kibana-space-defaults" logCmd "so-kibana-space-defaults"
add_web_user add_web_user