diff --git a/salt/curator/enabled.sls b/salt/curator/enabled.sls index b60058692..b2574569f 100644 --- a/salt/curator/enabled.sls +++ b/salt/curator/enabled.sls @@ -28,6 +28,23 @@ so-curator: - /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro - /opt/so/conf/curator/action/:/etc/curator/action:ro - /opt/so/log/curator:/var/log/curator:rw + {% if DOCKER.containers['so-curator'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-curator'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-curator'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-curator'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-curator'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-curator'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - require: - file: actionconfs - file: curconf diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index ad3506737..21b94a2ba 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -10,12 +10,14 @@ docker: - 0.0.0.0:5000:5000 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-elastic-fleet': final_octet: 21 port_bindings: - 0.0.0.0:8220:8220/tcp custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-elasticsearch': final_octet: 22 port_bindings: @@ -23,22 +25,26 @@ docker: - 0.0.0.0:9300:9300/tcp custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-idstools': final_octet: 25 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-influxdb': final_octet: 26 port_bindings: - 0.0.0.0:8086:8086 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-kibana': final_octet: 27 port_bindings: - 0.0.0.0:5601:5601 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-kratos': final_octet: 28 port_bindings: @@ -46,6 +52,7 @@ docker: - 0.0.0.0:4434:4434 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-logstash': final_octet: 29 port_bindings: @@ -61,12 +68,14 @@ docker: - 0.0.0.0:9600:9600 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-mysql': final_octet: 30 port_bindings: - 0.0.0.0:3306:3306 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-nginx': final_octet: 31 port_bindings: @@ -76,12 +85,14 @@ docker: - 7788:7788 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-playbook': final_octet: 32 port_bindings: - 0.0.0.0:3000:3000 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-redis': final_octet: 33 port_bindings: @@ -89,63 +100,101 @@ docker: - 0.0.0.0:9696:9696 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] + 'so-sensoroni': + final_octet: 99 + custom_bind_mounts: [] + extra_hosts: [] + extra_env: [] 'so-soc': final_octet: 34 port_bindings: - 0.0.0.0:9822:9822 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-soctopus': final_octet: 35 port_bindings: - 0.0.0.0:7000:7000 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-strelka-backend': final_octet: 36 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-strelka-filestream': final_octet: 37 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-strelka-frontend': final_octet: 38 port_bindings: - 0.0.0.0:57314:57314 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-strelka-manager': final_octet: 39 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-strelka-gatekeeper': final_octet: 40 port_bindings: - 0.0.0.0:6381:6379 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-strelka-coordinator': final_octet: 41 port_bindings: - 0.0.0.0:6380:6379 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-elastalert': final_octet: 42 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-curator': final_octet: 43 custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-elastic-fleet-package-registry': final_octet: 44 port_bindings: - 0.0.0.0:8080:8080/tcp custom_bind_mounts: [] extra_hosts: [] + extra_env: [] 'so-idh': final_octet: 45 custom_bind_mounts: [] - extra_hosts: [] \ No newline at end of file + extra_hosts: [] + extra_env: [] + 'so-telegraf': + final_octet: 99 + custom_bind_mounts: [] + extra_hosts: [] + extra_env: [] + 'so-steno': + final_octet: 99 + custom_bind_mounts: [] + extra_hosts: [] + extra_env: [] + 'so-suricata': + final_octet: 99 + custom_bind_mounts: [] + extra_hosts: [] + extra_env: [] + 'so-zeek': + final_octet: 99 + custom_bind_mounts: [] + extra_hosts: [] + extra_env: [] \ No newline at end of file diff --git a/salt/docker/soc_docker.yaml b/salt/docker/soc_docker.yaml index c8d18abde..b6f5ca0ca 100644 --- a/salt/docker/soc_docker.yaml +++ b/salt/docker/soc_docker.yaml @@ -40,6 +40,12 @@ docker: helpLink: docker.html multiline: True forcedType: "[]string" + extra_env: + description: List of additional ENV entries for the container. + advanced: True + helpLink: docker.html + multiline: True + forcedType: "[]string" so-dockerregistry: *dockerOptions so-elastalert: *dockerOptions so-elastic-fleet-package-registry: *dockerOptions diff --git a/salt/elastalert/enabled.sls b/salt/elastalert/enabled.sls index 3e043b46c..e4b3642db 100644 --- a/salt/elastalert/enabled.sls +++ b/salt/elastalert/enabled.sls @@ -31,8 +31,24 @@ so-elastalert: - /opt/so/log/elastalert:/var/log/elastalert:rw - /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro - /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro + {% if DOCKER.containers['so-elastalert'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - extra_hosts: - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} + {% if DOCKER.containers['so-elastalert'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-elastalert'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-elastalert'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - require: - cmd: wait_for_elasticsearch - file: elastarules diff --git a/salt/elastic-fleet-package-registry/enabled.sls b/salt/elastic-fleet-package-registry/enabled.sls index 5f663e78f..3cd90ba87 100644 --- a/salt/elastic-fleet-package-registry/enabled.sls +++ b/salt/elastic-fleet-package-registry/enabled.sls @@ -24,11 +24,27 @@ so-elastic-fleet-package-registry: - ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }} - extra_hosts: - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} + {% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-elastic-fleet-package-registry'].port_bindings %} - {{ BINDING }} {% endfor %} - + {% if DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %} + - binds: + {% for BIND in DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} delete_so-elastic-fleet-package-registry_so-status.disabled: file.uncomment: - name: /opt/so/conf/so-status/so-status.conf diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index a3982e760..e93ebd4db 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -28,6 +28,11 @@ so-elastic-fleet: - extra_hosts: - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} + {% if DOCKER.containers['so-elastic-fleet'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-elastic-fleet'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-elastic-fleet'].port_bindings %} - {{ BINDING }} @@ -35,6 +40,11 @@ so-elastic-fleet: - binds: - /etc/pki:/etc/pki:ro #- /opt/so/conf/elastic-fleet/state:/usr/share/elastic-agent/state:rw + {% if DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - environment: - FLEET_SERVER_ENABLE=true - FLEET_URL=https://{{ GLOBALS.node_ip }}:8220 @@ -45,6 +55,11 @@ so-elastic-fleet: - FLEET_SERVER_CERT=/etc/pki/elasticfleet.crt - FLEET_SERVER_CERT_KEY=/etc/pki/elasticfleet.key - FLEET_CA=/etc/pki/tls/certs/intca.crt + {% if DOCKER.containers['so-elastic-fleet'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-elastic-fleet'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} {% endif %} delete_so-elastic-fleet_so-status.disabled: diff --git a/salt/elasticsearch/enabled.sls b/salt/elasticsearch/enabled.sls index fa51a4124..4777e3bce 100644 --- a/salt/elasticsearch/enabled.sls +++ b/salt/elasticsearch/enabled.sls @@ -26,6 +26,11 @@ so-elasticsearch: - sobridge: - ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }} - extra_hosts: {{ LOGSTASH_NODES }} + {% if DOCKER.containers['so-elasticsearch'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-elasticsearch'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - environment: {% if LOGSTASH_NODES | length == 1 %} - discovery.type=single-node @@ -35,6 +40,11 @@ so-elasticsearch: - memlock=-1:-1 - nofile=65536:65536 - nproc=4096 + {% if DOCKER.containers['so-elasticsearch'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-elasticsearch'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-elasticsearch'].port_bindings %} - {{ BINDING }} @@ -60,6 +70,11 @@ so-elasticsearch: - {{ repo }}:{{ repo }}:rw {% endfor %} {% endif %} + {% if DOCKER.containers['so-elasticsearch'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-elasticsearch'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - watch: - file: cacertz - file: esyml diff --git a/salt/idh/enabled.sls b/salt/idh/enabled.sls index 82bee138b..480e7eedc 100644 --- a/salt/idh/enabled.sls +++ b/salt/idh/enabled.sls @@ -20,6 +20,23 @@ so-idh: - binds: - /nsm/idh:/var/tmp:rw - /opt/so/conf/idh/opencanary.conf:/etc/opencanaryd/opencanary.conf:ro + {% if DOCKER.containers['so-idh'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-idh'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-idh'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-idh'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-idh'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-idh'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - watch: - file: opencanary_config - require: diff --git a/salt/idstools/config.sls b/salt/idstools/config.sls index 94692ee9f..e162d1139 100644 --- a/salt/idstools/config.sls +++ b/salt/idstools/config.sls @@ -24,14 +24,14 @@ idstools_sbin: - group: 939 - file_mode: 755 -#idstools_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://idstools/tools/sbin_jinja -# - user: 934 -# - group: 939 -# - file_mode: 755 -# - template: jinja +idstools_sbin_jinja: + file.recurse: + - name: /usr/sbin + - source: salt://idstools/tools/sbin_jinja + - user: 934 + - group: 939 + - file_mode: 755 + - template: jinja {% else %} diff --git a/salt/idstools/enabled.sls b/salt/idstools/enabled.sls index b56d6c2e5..bf5650773 100644 --- a/salt/idstools/enabled.sls +++ b/salt/idstools/enabled.sls @@ -26,10 +26,33 @@ so-idstools: - http_proxy={{ proxy }} - https_proxy={{ proxy }} - no_proxy={{ salt['pillar.get']('manager:no_proxy') }} + {% if DOCKER.containers['so-elastalert'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} + {% elif DOCKER.containers['so-idstools'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-idstools'].extra_env %} + - {{ XTRAENV }} + {% endfor %} {% endif %} - binds: - /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro - /opt/so/rules/nids:/opt/so/rules/nids:rw + - /nsm/rules/:/nsm/rules/:rw + {% if DOCKER.containers['so-idstools'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + - extra_hosts: + - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} + {% if DOCKER.containers['so-idstools'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-idstools'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - watch: - file: idstoolsetcsync diff --git a/salt/idstools/etc/rulecat.conf b/salt/idstools/etc/rulecat.conf index 537c3f9e8..8be3aa1ce 100644 --- a/salt/idstools/etc/rulecat.conf +++ b/salt/idstools/etc/rulecat.conf @@ -1,35 +1,15 @@ -{%- from 'vars/globals.map.jinja' import GLOBALS %} -{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %} -{%- if GLOBALS.airgap is sameas true -%} +{%- from 'vars/globals.map.jinja' import GLOBALS -%} +{%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%} --merged=/opt/so/rules/nids/all.rules --local=/opt/so/rules/nids/local.rules {%- if GLOBALS.md_engine == "SURICATA" %} --local=/opt/so/rules/nids/sorules/extraction.rules --local=/opt/so/rules/nids/sorules/filters.rules {%- endif %} ---url=http://{{ GLOBALS.manager }}:7788/rules/emerging-all.rules +--url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules --disable=/opt/so/idstools/etc/disable.conf --enable=/opt/so/idstools/etc/enable.conf --modify=/opt/so/idstools/etc/modify.conf -{%- else -%} ---suricata-version=6.0 ---merged=/opt/so/rules/nids/all.rules ---local=/opt/so/rules/nids/local.rules -{%- if GLOBALS.md_engine == "SURICATA" %} ---local=/opt/so/rules/nids/sorules/extraction.rules ---local=/opt/so/rules/nids/sorules/filters.rules -{%- endif %} ---disable=/opt/so/idstools/etc/disable.conf ---enable=/opt/so/idstools/etc/enable.conf ---modify=/opt/so/idstools/etc/modify.conf -{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %} ---etopen -{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %} ---etpro={{ IDSTOOLSMERGED.config.oinkcode }} -{%- elif IDSTOOLSMERGED.config.ruleset == 'TALOS' %} ---url=https://www.snort.org/rules/snortrules-snapshot-2983.tar.gz?oinkcode={{ IDSTOOLSMERGED.config.oinkcode }} -{%- endif %} -{%- endif %} {%- if IDSTOOLSMERGED.config.urls | length > 0 %} {%- for URL in IDSTOOLSMERGED.config.urls %} --url={{ URL }} diff --git a/salt/idstools/tools/sbin/so-rule-update b/salt/idstools/tools/sbin/so-rule-update deleted file mode 100755 index a3c2616a4..000000000 --- a/salt/idstools/tools/sbin/so-rule-update +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -. /usr/sbin/so-common - -argstr="" -for arg in "$@"; do - argstr="${argstr} \"${arg}\"" -done - -docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}" diff --git a/salt/idstools/tools/sbin_jinja/so-rule-update b/salt/idstools/tools/sbin_jinja/so-rule-update new file mode 100755 index 000000000..6a5976a1c --- /dev/null +++ b/salt/idstools/tools/sbin_jinja/so-rule-update @@ -0,0 +1,32 @@ +#!/bin/bash +. /usr/sbin/so-common + +{%- from 'vars/globals.map.jinja' import GLOBALS %} +{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %} +{%- set proxy = salt['pillar.get']('manager:proxy') %} + +mkdir -p /nsm/rules/suricata +chown -R socore:socore /nsm/rules/suricata +# Download the rules from the internet +{%- if GLOBALS.airgap != 'True' %} +{%- if proxy %} +export http_proxy={{ proxy }} +export https_proxy={{ proxy }} +export no_proxy= salt['pillar.get']('manager:no_proxy') +{%- endif %} +{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %} +docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force +{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %} +docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }} +{%- elif IDSTOOLSMERGED.config.ruleset == 'TALOS' %} +docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --url=https://www.snort.org/rules/snortrules-snapshot-2983.tar.gz?oinkcode={{ IDSTOOLSMERGED.config.oinkcode }} +{%- endif %} +{%- endif %} + + +argstr="" +for arg in "$@"; do + argstr="${argstr} \"${arg}\"" +done + +docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}" diff --git a/salt/influxdb/enabled.sls b/salt/influxdb/enabled.sls index 209406932..70f4c404f 100644 --- a/salt/influxdb/enabled.sls +++ b/salt/influxdb/enabled.sls @@ -30,16 +30,32 @@ so-influxdb: - DOCKER_INFLUXDB_INIT_ORG=Security Onion - DOCKER_INFLUXDB_INIT_BUCKET=telegraf/so_short_term - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN={{ TOKEN }} + {% if DOCKER.containers['so-influxdb'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-influxdb'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - binds: - /opt/so/log/influxdb/:/log:rw - /opt/so/conf/influxdb/config.yaml:/conf/config.yaml:ro - /nsm/influxdb:/var/lib/influxdb2:rw - /etc/pki/influxdb.crt:/conf/influxdb.crt:ro - /etc/pki/influxdb.key:/conf/influxdb.key:ro + {% if DOCKER.containers['so-influxdb'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-influxdb'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-influxdb'].port_bindings %} - {{ BINDING }} {% endfor %} + {% if DOCKER.containers['so-influxdb'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-influxdb'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - watch: - file: influxdbconf - require: diff --git a/salt/kibana/enabled.sls b/salt/kibana/enabled.sls index 8f7091a0f..56aac26cc 100644 --- a/salt/kibana/enabled.sls +++ b/salt/kibana/enabled.sls @@ -25,13 +25,28 @@ so-kibana: - ELASTICSEARCH_HOST={{ GLOBALS.manager }} - ELASTICSEARCH_PORT=9200 - MANAGER={{ GLOBALS.manager }} + {% if DOCKER.containers['so-kibana'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-kibana'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - extra_hosts: - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} + {% if DOCKER.containers['so-kibana'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-kibana'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - binds: - /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw - /opt/so/log/kibana:/var/log/kibana:rw - /opt/so/conf/kibana/customdashboards:/usr/share/kibana/custdashboards:ro - /sys/fs/cgroup:/sys/fs/cgroup:ro + {% if DOCKER.containers['so-kibana'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-kibana'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-kibana'].port_bindings %} - {{ BINDING }} diff --git a/salt/kratos/enabled.sls b/salt/kratos/enabled.sls index 9358c9349..52d53a4db 100644 --- a/salt/kratos/enabled.sls +++ b/salt/kratos/enabled.sls @@ -25,10 +25,27 @@ so-kratos: - /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro - /opt/so/log/kratos/:/kratos-log:rw - /nsm/kratos/db:/kratos-data:rw + {% if DOCKER.containers['so-kratos'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-kratos'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-kratos'].port_bindings %} - {{ BINDING }} {% endfor %} + {% if DOCKER.containers['so-kratos'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-kratos'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-kratos'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-kratos'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - restart_policy: unless-stopped - watch: - file: kratosschema diff --git a/salt/logstash/enabled.sls b/salt/logstash/enabled.sls index 65905cd6c..c0129c6e1 100644 --- a/salt/logstash/enabled.sls +++ b/salt/logstash/enabled.sls @@ -26,8 +26,18 @@ so-logstash: - ipv4_address: {{ DOCKER.containers['so-logstash'].ip }} - user: logstash - extra_hosts: {{ REDIS_NODES }} + {% if DOCKER.containers['so-logstash'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-logstash'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - environment: - LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }} + {% if DOCKER.containers['so-logstash'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-logstash'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-logstash'].port_bindings %} - {{ BINDING }} @@ -65,6 +75,11 @@ so-logstash: - /opt/so/log/fleet/:/osquery/logs:ro - /opt/so/log/strelka:/strelka:ro {% endif %} + {% if DOCKER.containers['so-logstash'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-logstash'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - watch: - file: lsetcsync {% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %} diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 4a23d033d..516facae4 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -15,7 +15,6 @@ POSTVERSION=$INSTALLEDVERSION INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}') BATCHSIZE=5 SOUP_LOG=/root/soup.log -INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log WHATWOULDYOUSAYYAHDOHERE=soup whiptail_title='Security Onion UPdater' NOTIFYCUSTOMELASTICCONFIG=false @@ -304,11 +303,7 @@ check_log_size_limit() { check_os_updates() { # Check to see if there are OS updates NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated." - if [[ $OS == 'ubuntu' ]]; then - OSUPDATES=$(apt list --upgradeable | grep -v "^Listing..." | grep -v "^docker-ce" | grep -v "^wazuh-" | grep -v "^salt-" | wc -l) - else - OSUPDATES=$(yum -q list updates | wc -l) - fi + OSUPDATES=$(yum -q list updates | wc -l) if [[ "$OSUPDATES" -gt 0 ]]; then if [[ -z $UNATTENDED ]]; then echo "$NEEDUPDATES" @@ -362,117 +357,12 @@ clone_to_tmp() { fi } -elastalert_indices_check() { - - # Stop Elastalert to prevent Elastalert indices from being re-created - if grep -q "^so-elastalert$" /opt/so/conf/so-status/so-status.conf ; then - so-elastalert-stop || true - fi - - # Wait for ElasticSearch to initialize - echo -n "Waiting for ElasticSearch..." - COUNT=0 - ELASTICSEARCH_CONNECTED="no" - while [[ "$COUNT" -le 240 ]]; do - so-elasticsearch-query / -k --output /dev/null - if [ $? -eq 0 ]; then - ELASTICSEARCH_CONNECTED="yes" - echo "connected!" - break - else - ((COUNT+=1)) - sleep 1 - echo -n "." - fi - done - - # Unable to connect to Elasticsearch - if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then - echo - echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" - echo - exit 1 - fi - - # Check Elastalert indices - echo "Deleting Elastalert indices to prevent issues with upgrade to Elastic 8..." - CHECK_COUNT=0 - while [[ "$CHECK_COUNT" -le 2 ]]; do - # Delete Elastalert indices - for i in $(so-elasticsearch-query _cat/indices | grep elastalert | awk '{print $3}'); do - so-elasticsearch-query $i -XDELETE; - done - - # Check to ensure Elastalert indices are deleted - COUNT=0 - ELASTALERT_INDICES_DELETED="no" - while [[ "$COUNT" -le 240 ]]; do - RESPONSE=$(so-elasticsearch-query elastalert*) - if [[ "$RESPONSE" == "{}" ]]; then - ELASTALERT_INDICES_DELETED="yes" - echo "Elastalert indices successfully deleted." - break - else - ((COUNT+=1)) - sleep 1 - echo -n "." - fi - done - ((CHECK_COUNT+=1)) - done - - # If we were unable to delete the Elastalert indices, exit the script - if [ "$ELASTALERT_INDICES_DELETED" == "no" ]; then - echo - echo -e "Unable to connect to delete Elastalert indices. Exiting." - echo - exit 1 - fi -} - enable_highstate() { echo "Enabling highstate." salt-call state.enable highstate -l info --local echo "" } -es_version_check() { - CHECK_ES=$(echo $INSTALLEDVERSION | awk -F. '{print $3}') - - if [ "$CHECK_ES" -lt "110" ]; then - echo "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version 2.3.130 before updating to 2.3.140 or higher." - echo "" - echo "If your deployment has Internet access, you can use the following command to update to 2.3.130:" - echo "sudo BRANCH=2.3.130-20220607 soup" - echo "" - echo "Otherwise, if your deployment is configured for airgap, you can instead download the 2.3.130 ISO image from https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso." - echo "" - echo "*** Once you have updated to 2.3.130, you can then update to 2.3.140 or higher as you would normally. ***" - exit 1 - fi -} - -es_indices_check() { - echo "Checking for unsupported Elasticsearch indices..." - UNSUPPORTED_INDICES=$(for INDEX in $(so-elasticsearch-indices-list | awk '{print $3}'); do so-elasticsearch-query $INDEX/_settings?human |grep '"created_string":"6' | jq -r 'keys'[0]; done) - if [ -z "$UNSUPPORTED_INDICES" ]; then - echo "No unsupported indices found." - else - echo "The following indices were created with Elasticsearch 6, and are not supported when upgrading to Elasticsearch 8. These indices may need to be deleted, migrated, or re-indexed before proceeding with the upgrade. Please see $DOC_BASE_URL/soup.html#elastic-8 for more details." - echo - echo "$UNSUPPORTED_INDICES" - exit 1 - fi -} - -generate_and_clean_tarballs() { - local new_version - new_version=$(cat $UPDATE_DIR/VERSION) - [ -d /opt/so/repo ] || mkdir -p /opt/so/repo - tar -czf "/opt/so/repo/$new_version.tar.gz" -C "$UPDATE_DIR" . - find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \; -} - highstate() { # Run a highstate. salt-call state.highstate -l info queue=True @@ -480,39 +370,26 @@ highstate() { masterlock() { echo "Locking Salt Master" - if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then - TOPFILE=/opt/so/saltstack/default/salt/top.sls - BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup - mv -v $TOPFILE $BACKUPTOPFILE - echo "base:" > $TOPFILE - echo " $MINIONID:" >> $TOPFILE - echo " - ca" >> $TOPFILE - echo " - ssl" >> $TOPFILE - echo " - elasticsearch" >> $TOPFILE - fi + TOPFILE=/opt/so/saltstack/default/salt/top.sls + BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup + mv -v $TOPFILE $BACKUPTOPFILE + echo "base:" > $TOPFILE + echo " $MINIONID:" >> $TOPFILE + echo " - ca" >> $TOPFILE + echo " - ssl" >> $TOPFILE + echo " - elasticsearch" >> $TOPFILE } masterunlock() { echo "Unlocking Salt Master" - if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then - mv -v $BACKUPTOPFILE $TOPFILE - fi + mv -v $BACKUPTOPFILE $TOPFILE } preupgrade_changes() { # This function is to add any new pillar items if needed. echo "Checking to see if changes are needed." - [[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_to_2.3.20 - [[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_to_2.3.30 - [[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_to_2.3.50 - [[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_to_2.3.80 - [[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90 - [[ "$INSTALLEDVERSION" == 2.3.90 || "$INSTALLEDVERSION" == 2.3.91 ]] && up_to_2.3.100 - [[ "$INSTALLEDVERSION" == 2.3.100 ]] && up_to_2.3.110 - [[ "$INSTALLEDVERSION" == 2.3.110 ]] && up_to_2.3.120 - [[ "$INSTALLEDVERSION" == 2.3.120 ]] && up_to_2.3.130 - [[ "$INSTALLEDVERSION" == 2.3.130 ]] && up_to_2.3.140 + [[ "$INSTALLEDVERSION" == 2.4.2 ]] && up_to_2.4.3 true } @@ -520,100 +397,17 @@ postupgrade_changes() { # This function is to add any new pillar items if needed. echo "Running post upgrade processes." - [[ "$POSTVERSION" == 2.3.0 || "$POSTVERSION" == 2.3.1 || "$POSTVERSION" == 2.3.2 || "$POSTVERSION" == 2.3.10 || "$POSTVERSION" == 2.3.20 ]] && post_to_2.3.21 - [[ "$POSTVERSION" == 2.3.21 || "$POSTVERSION" == 2.3.30 ]] && post_to_2.3.40 - [[ "$POSTVERSION" == 2.3.40 || "$POSTVERSION" == 2.3.50 || "$POSTVERSION" == 2.3.51 || "$POSTVERSION" == 2.3.52 ]] && post_to_2.3.60 - [[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90 - [[ "$POSTVERSION" == 2.3.90 || "$POSTVERSION" == 2.3.91 ]] && post_to_2.3.100 - [[ "$POSTVERSION" == 2.3.100 ]] && post_to_2.3.110 - [[ "$POSTVERSION" == 2.3.110 ]] && post_to_2.3.120 - [[ "$POSTVERSION" == 2.3.120 ]] && post_to_2.3.130 - [[ "$POSTVERSION" == 2.3.130 ]] && post_to_2.3.140 + [[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3 true } -post_to_2.3.21() { - salt-call state.apply playbook.OLD_db_init - rm -f /opt/so/rules/elastalert/playbook/*.yaml - so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 & - POSTVERSION=2.3.21 +post_to_2.4.3() { + echo "Nothing to apply" + POSTVERSION=2.4.3 } -post_to_2.3.40() { - so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 & - so-kibana-space-defaults - POSTVERSION=2.3.40 -} - -post_to_2.3.60() { - for table in identity_recovery_addresses selfservice_recovery_flows selfservice_registration_flows selfservice_verification_flows identities identity_verification_tokens identity_credentials selfservice_settings_flows identity_recovery_tokens continuity_containers identity_credential_identifiers identity_verifiable_addresses courier_messages selfservice_errors sessions selfservice_login_flows - do - echo "Forcing Kratos network migration: $table" - sqlite3 /opt/so/conf/kratos/db/db.sqlite "update $table set nid=(select id from networks limit 1);" - done - - POSTVERSION=2.3.60 -} - -post_to_2.3.90() { - # Create FleetDM service account - FLEET_MANAGER=$(lookup_pillar fleet_manager) - if [[ "$FLEET_MANAGER" == "True" ]]; then - FLEET_SA_EMAIL=$(lookup_pillar_secret fleet_sa_email) - FLEET_SA_PW=$(lookup_pillar_secret fleet_sa_password) - MYSQL_PW=$(lookup_pillar_secret mysql) - - FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_SA_PW'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1) - MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PW fleet -e \ - "INSERT INTO users (password,salt,email,name,global_role) VALUES ('$FLEET_HASH','','$FLEET_SA_EMAIL','$FLEET_SA_EMAIL','admin')" 2>&1) - - if [[ $? -eq 0 ]]; then - echo "Successfully added service account to Fleet" - else - echo "Unable to add service account to Fleet" - echo "$MYSQL_OUTPUT" - fi - fi - - POSTVERSION=2.3.90 -} - -post_to_2.3.100() { - echo "Post Processing for 2.3.100" - POSTVERSION=2.3.100 -} - -post_to_2.3.110() { - echo "Post Processing for 2.3.110" - echo "Removing old Elasticsearch index templates" - [ -d /opt/so/saltstack/default/salt/elasticsearch/templates/so ] && rm -rf /opt/so/saltstack/default/salt/elasticsearch/templates/so - echo "Updating Kibana dashboards" - salt-call state.apply kibana.so_savedobjects_defaults queue=True - POSTVERSION=2.3.110 -} - -post_to_2.3.120() { - echo "Post Processing for 2.3.120" - POSTVERSION=2.3.120 - sed -i '/so-thehive-es/d;/so-thehive/d;/so-cortex/d' /opt/so/conf/so-status/so-status.conf -} - -post_to_2.3.130() { - echo "Post Processing for 2.3.130" - POSTVERSION=2.3.130 -} - -post_to_2.3.140() { - echo "Post Processing for 2.3.140" - FORCE_SYNC=true so-user sync - so-kibana-restart - so-kibana-space-defaults - POSTVERSION=2.3.140 -} - - stop_salt_master() { # kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts @@ -656,235 +450,9 @@ stop_salt_minion() { set -e } -up_to_2.3.20(){ - DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 - # Remove PCAP from global - sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls - sed '/sensor_checkin_interval_ms:/d' /opt/so/saltstack/local/pillar/global.sls - # Add checking interval to glbal - echo "sensoroni:" >> /opt/so/saltstack/local/pillar/global.sls - echo " node_checkin_interval_ms: 10000" >> /opt/so/saltstack/local/pillar/global.sls - - # Update pillar fiels for new sensoroni functionality - for file in /opt/so/saltstack/local/pillar/minions/*; do - echo "sensoroni:" >> $file - echo " node_description:" >> $file - local SOMEADDRESS=$(cat $file | grep mainip | tail -n 1 | awk '{print $2'}) - echo " node_address: $SOMEADDRESS" >> $file - done - - # Remove old firewall config to reduce confusion - rm -f /opt/so/saltstack/default/pillar/firewall/ports.sls - - # Fix daemon.json by managing it - echo "docker:" >> /opt/so/saltstack/local/pillar/global.sls - DOCKERGREP=$(cat /etc/docker/daemon.json | grep base | awk {'print $3'} | cut -f1 -d"," | tr -d '"') - if [ -z "$DOCKERGREP" ]; then - echo " range: '172.17.0.0/24'" >> /opt/so/saltstack/local/pillar/global.sls - echo " bip: '172.17.0.1/24'" >> /opt/so/saltstack/local/pillar/global.sls - else - DOCKERSTUFF="${DOCKERGREP//\"}" - DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 - echo " range: '$DOCKERSTUFF/24'" >> /opt/so/saltstack/local/pillar/global.sls - echo " bip: '$DOCKERSTUFFBIP'" >> /opt/so/saltstack/local/pillar/global.sls - - fi - - INSTALLEDVERSION=2.3.20 -} - -up_to_2.3.30() { - # Replace any curly brace scalars with the same scalar in single quotes - readarray -t minion_pillars <<< "$(find /opt/so/saltstack/local/pillar/minions -type f -name '*.sls')" - for pillar in "${minion_pillars[@]}"; do - sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar" - done - - # Change the IMAGEREPO - sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls - sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls - - # Strelka rule repo pillar addition - if [[ $is_airgap -eq 0 ]]; then - # Add manager as default Strelka YARA rule repo - sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls; - else - # Add Github repo for Strelka YARA rules - sed -i "/^strelka:/a \\ repos: \n - https://github.com/Neo23x0/signature-base" /opt/so/saltstack/local/pillar/global.sls; - fi - check_log_size_limit - INSTALLEDVERSION=2.3.30 -} - -up_to_2.3.50() { - - cat < /tmp/supersed.txt -/so-zeek:/ { - p; - n; - /shards:/ { - p; - n; - /warm:/ { - p; - n; - /close:/ { - s/close: 365/close: 45/; - p; - n; - /delete:/ { - s/delete: 45/delete: 365/; - p; - d; - } - } - } - } -} -p; -EOF - sed -n -i -f /tmp/supersed.txt /opt/so/saltstack/local/pillar/global.sls - rm /tmp/supersed.txt - INSTALLEDVERSION=2.3.50 -} - -up_to_2.3.80() { - - # Remove watermark settings from global.sls - sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls - - # Add new indices to the global - sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls - sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls - sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls - sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls - - # Do some pillar formatting - tc=$(grep -w true_cluster /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print tolower($2)'}| xargs) - - if [[ "$tc" == "true" ]]; then - tcname=$(grep -w true_cluster_name /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print $2'}) - sed -i "/^elasticsearch:/a \\ config: \n cluster: \n name: $tcname" /opt/so/saltstack/local/pillar/global.sls - sed -i '/ true_cluster_name/d' /opt/so/saltstack/local/pillar/global.sls - sed -i '/ esclustername/d' /opt/so/saltstack/local/pillar/global.sls - - for file in /opt/so/saltstack/local/pillar/minions/*.sls; do - if [[ ${file} != *"manager.sls"* ]]; then - noderoutetype=$(grep -w node_route_type $file | awk -F: {'print $2'}) - if [ -n "$noderoutetype" ]; then - sed -i "/^elasticsearch:/a \\ config: \n node: \n attr: \n box_type: $noderoutetype" $file - sed -i '/ node_route_type/d' $file - noderoutetype='' - fi - fi - done - fi - - # check for local es config to inform user that the config in local is now ignored and those options need to be placed in the pillar - if [ -f "/opt/so/saltstack/local/salt/elasticsearch/files/elasticsearch.yml" ]; then - NOTIFYCUSTOMELASTICCONFIG=true - fi - - INSTALLEDVERSION=2.3.80 -} - -up_to_2.3.90() { - for i in manager managersearch eval standalone; do - echo "Checking for compgen match of /opt/so/saltstack/local/pillar/minions/*_$i.sls" - if compgen -G "/opt/so/saltstack/local/pillar/minions/*_$i.sls"; then - echo "Found compgen match for /opt/so/saltstack/local/pillar/minions/*_$i.sls" - for f in $(compgen -G "/opt/so/saltstack/local/pillar/minions/*_$i.sls"); do - if grep -qozP "^soc:\n.*es_index_patterns: '\*:so-\*,\*:endgame-\*'" "$f"; then - echo "soc:es_index_patterns already present in $f" - else - echo "Appending soc pillar data to $f" - echo "soc:" >> "$f" - sed -i "/^soc:/a \\ es_index_patterns: '*:so-*,*:endgame-*'" "$f" - fi - done - fi - done - - # Create Endgame Hostgroup - echo "Adding endgame hostgroup with so-firewall" - if so-firewall addhostgroup endgame 2>&1 | grep -q 'Already exists'; then - echo 'endgame hostgroup already exists' - else - echo 'endgame hostgroup added' - fi - - # Force influx to generate a new cert - echo "Moving influxdb.crt and influxdb.key to generate new certs" - mv -vf /etc/pki/influxdb.crt /etc/pki/influxdb.crt.2390upgrade - mv -vf /etc/pki/influxdb.key /etc/pki/influxdb.key.2390upgrade - - # remove old common ingest pipeline in default - rm -vf /opt/so/saltstack/default/salt/elasticsearch/files/ingest/common - # if custom common, move from local ingest to local ingest-dynamic - mkdir -vp /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic - if [[ -f "/opt/so/saltstack/local/salt/elasticsearch/files/ingest/common" ]]; then - mv -v /opt/so/saltstack/local/salt/elasticsearch/files/ingest/common /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common - # since json file, we need to wrap with raw - sed -i '1s/^/{% raw %}\n/' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common - sed -i -e '$a{% endraw %}\n' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common - fi - - # Generate FleetDM Service Account creds if they do not exist - if grep -q "fleet_sa_email" /opt/so/saltstack/local/pillar/secrets.sls; then - echo "FleetDM Service Account credentials already created..." - else - echo "Generating FleetDM Service Account credentials..." - FLEETSAPASS=$(get_random_value) - printf '%s\n'\ - " fleet_sa_email: service.account@securityonion.invalid"\ - " fleet_sa_password: $FLEETSAPASS"\ - >> /opt/so/saltstack/local/pillar/secrets.sls - - fi - - sed -i -re 's/^(playbook_admin.*|playbook_automation.*)/ \1/g' /opt/so/saltstack/local/pillar/secrets.sls - - INSTALLEDVERSION=2.3.90 -} - -up_to_2.3.100() { - fix_wazuh - - echo "Adding receiver hostgroup with so-firewall" - if so-firewall addhostgroup receiver 2>&1 | grep -q 'Already exists'; then - echo 'receiver hostgroup already exists' - else - echo 'receiver hostgroup added' - fi - - echo "Adding receiver to assigned_hostgroups.local.map.yaml" - grep -qxF " receiver:" /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml || sed -i -e '$a\ receiver:' /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml - - INSTALLEDVERSION=2.3.100 -} - -up_to_2.3.110() { - sed -i 's|shards|index_template:\n template:\n settings:\n index:\n number_of_shards|g' /opt/so/saltstack/local/pillar/global.sls - INSTALLEDVERSION=2.3.110 -} - -up_to_2.3.120() { - # Stop thehive services since these will be broken in .120 - so-thehive-stop - so-thehive-es-stop - so-cortex-stop - INSTALLEDVERSION=2.3.120 -} - -up_to_2.3.130() { - # Remove file for nav update - rm -f /opt/so/conf/navigator/layers/nav_layer_playbook.json - INSTALLEDVERSION=2.3.130 -} - -up_to_2.3.140() { - elastalert_indices_check +up_to_2.4.3() { + echo "Nothing to do for 2.4.3" ## INSTALLEDVERSION=2.3.140 } @@ -993,24 +561,6 @@ upgrade_salt() { echo "" yum versionlock add "salt-*" # Else do Ubuntu things - elif [[ $OS == 'ubuntu' ]]; then - echo "Removing apt hold for Salt." - echo "" - apt-mark unhold "salt-common" - apt-mark unhold "salt-master" - apt-mark unhold "salt-minion" - echo "Updating Salt packages." - echo "" - set +e - run_check_net_err \ - "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ - "Could not update salt, please check $SOUP_LOG for details." - set -e - echo "Applying apt hold for Salt." - echo "" - apt-mark hold "salt-common" - apt-mark hold "salt-master" - apt-mark hold "salt-minion" fi echo "Checking if Salt was upgraded." @@ -1030,46 +580,6 @@ upgrade_salt() { } -update_repo() { - if [[ "$OS" == "centos" ]]; then - echo "Performing repo changes." - # Import GPG Keys - gpg_rpm_import - echo "Disabling fastestmirror." - disable_fastestmirror - echo "Deleting unneeded repo files." - DELREPOS=('CentOS-Base' 'CentOS-CR' 'CentOS-Debuginfo' 'docker-ce' 'CentOS-fasttrack' 'CentOS-Media' 'CentOS-Sources' 'CentOS-Vault' 'CentOS-x86_64-kernel' 'epel' 'epel-testing' 'saltstack' 'salt-latest' 'wazuh') - - for DELREPO in "${DELREPOS[@]}"; do - if [[ -f "/etc/yum.repos.d/$DELREPO.repo" ]]; then - echo "Deleting $DELREPO.repo" - rm -f "/etc/yum.repos.d/$DELREPO.repo" - fi - done - if [[ $is_airgap -eq 1 ]]; then - # Copy the new repo file if not airgap - cp $UPDATE_DIR/salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/ - yum clean all - yum repolist - fi - elif [[ "$OS" == "ubuntu" ]]; then - ubuntu_version=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}') - - if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then - OSVER=bionic - elif grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then - OSVER=focal - else - echo "We do not support your current version of Ubuntu." - exit 1 - fi - - rm -f /etc/apt/sources.list.d/salt.list - echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list - apt-get update - fi -} - verify_latest_update_script() { # Check to see if the update scripts match. If not run the new one. CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}') @@ -1096,51 +606,37 @@ verify_latest_update_script() { fi } -apply_hotfix() { - if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then - fix_wazuh - elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then - 2_3_10_hotfix_1 - else - echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)" - fi -} +# Keeping this block in case we need to do a hotfix that requires salt update +#apply_hotfix() { +# if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then +# fix_wazuh +# elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then +# 2_3_10_hotfix_1 +# else +# echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)" +# fi +#} -fix_wazuh() { - FILE="/nsm/wazuh/etc/ossec.conf" - echo "Detecting if $FILE needs corrected..." - if [ -f "$FILE" ]; then - if head -1 $FILE | grep -q "xml version"; then - echo "$FILE has an XML header; removing" - sed -i 1d $FILE - docker restart so-wazuh # cannot use so-wazuh-restart here because the salt-master service is stopped - else - echo "$FILE does not have an XML header, so no changes are necessary." - fi - else - echo "$FILE does not exist, so no changes are necessary." - fi -} #upgrade salt to 3004.1 -2_3_10_hotfix_1() { - systemctl_func "stop" "$cron_service_name" - # update mine items prior to stopping salt-minion and salt-master - update_salt_mine - stop_salt_minion - stop_salt_master - update_repo - # Does salt need upgraded. If so update it. - if [[ $UPGRADESALT -eq 1 ]]; then - echo "Upgrading Salt" - # Update the repo files so it can actually upgrade - upgrade_salt - fi - systemctl_func "start" "salt-master" - systemctl_func "start" "salt-minion" - systemctl_func "start" "$cron_service_name" +#2_3_10_hotfix_1() { +# systemctl_func "stop" "$cron_service_name" +# # update mine items prior to stopping salt-minion and salt-master +# update_salt_mine +# stop_salt_minion +# stop_salt_master +# update_repo +# # Does salt need upgraded. If so update it. +# if [[ $UPGRADESALT -eq 1 ]]; then +# echo "Upgrading Salt" +# # Update the repo files so it can actually upgrade +# upgrade_salt +# fi +# systemctl_func "start" "salt-master" +# systemctl_func "start" "salt-minion" +# systemctl_func "start" "$cron_service_name" -} +#} main() { trap 'check_err $?' EXIT @@ -1198,23 +694,9 @@ main() { fi echo "Verifying we have the latest soup script." verify_latest_update_script - es_version_check - es_indices_check - elastalert_indices_check - echo "" - set_palette - check_elastic_license - echo "" + echo "Checking for OS updates." check_os_updates - - echo "Generating new repo archive" - generate_and_clean_tarballs - if [ -f /usr/sbin/so-image-common ]; then - . /usr/sbin/so-image-common - else - add_common - fi - + echo "Let's see if we need to update Security Onion." upgrade_check upgrade_space @@ -1224,7 +706,6 @@ main() { set -e if [[ $is_airgap -eq 0 ]]; then - update_centos_repo yum clean all check_os_updates fi diff --git a/salt/mysql/enabled.sls b/salt/mysql/enabled.sls index 12112121f..f9890c300 100644 --- a/salt/mysql/enabled.sls +++ b/salt/mysql/enabled.sls @@ -33,6 +33,11 @@ so-mysql: - ipv4_address: {{ DOCKER.containers['so-mysql'].ip }} - extra_hosts: - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} + {% if DOCKER.containers['so-mysql'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-mysql'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-mysql'].port_bindings %} - {{ BINDING }} @@ -40,11 +45,21 @@ so-mysql: - environment: - MYSQL_ROOT_HOST={{ GLOBALS.so_docker_bip }} - MYSQL_ROOT_PASSWORD=/etc/mypass + {% if DOCKER.containers['so-mysql'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-mysql'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - binds: - /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro - /opt/so/conf/mysql/etc/mypass:/etc/mypass - /nsm/mysql:/var/lib/mysql:rw - /opt/so/log/mysql:/var/log/mysql:rw + {% if DOCKER.containers['so-mysql'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-mysql'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - watch: - /opt/so/conf/mysql/etc - require: diff --git a/salt/nginx/enabled.sls b/salt/nginx/enabled.sls index 592388cf6..93c5e4ebc 100644 --- a/salt/nginx/enabled.sls +++ b/salt/nginx/enabled.sls @@ -12,6 +12,15 @@ include: - nginx.config - nginx.sostatus +make-rule-dir-nginx: + file.directory: + - name: /nsm/rules + - user: socore + - group: socore + - recurse: + - user + - group + so-nginx: docker_container.running: - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }} @@ -21,6 +30,11 @@ so-nginx: - ipv4_address: {{ DOCKER.containers['so-nginx'].ip }} - extra_hosts: - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} + {% if DOCKER.containers['so-nginx'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-nginx'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - binds: - /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro - /opt/so/log/nginx/:/var/log/nginx:rw @@ -37,7 +51,19 @@ so-nginx: - /opt/so/conf/navigator/enterprise-attack.json:/opt/socore/html/navigator/assets/enterprise-attack.json:ro - /opt/so/conf/navigator/pre-attack.json:/opt/socore/html/navigator/assets/pre-attack.json:ro - /nsm/repo:/opt/socore/html/repo:ro + - /nsm/rules:/nsm/rules:ro {% endif %} + {% if DOCKER.containers['so-nginx'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-nginx'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-nginx'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-nginx'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - cap_add: NET_BIND_SERVICE - port_bindings: {% for BINDING in DOCKER.containers['so-nginx'].port_bindings %} diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 7a8a24a1f..4aeb894dd 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -84,8 +84,8 @@ http { server { listen 7788; server_name {{ GLOBALS.url_base }}; - root /opt/socore/html/repo; - location /rules/ { + root /nsm/rules; + location / { allow all; sendfile on; sendfile_max_chunk 1m; diff --git a/salt/pcap/enabled.sls b/salt/pcap/enabled.sls index b4027065f..b7b030516 100644 --- a/salt/pcap/enabled.sls +++ b/salt/pcap/enabled.sls @@ -5,7 +5,9 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} + include: - pcap.config @@ -24,6 +26,23 @@ so-steno: - /nsm/pcapindex:/nsm/pcapindex:rw - /nsm/pcaptmp:/tmp:rw - /opt/so/log/stenographer:/var/log/stenographer:rw + {% if DOCKER.containers['so-steno'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-steno'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-steno'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-steno'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-steno'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-steno'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - watch: - file: stenoconf - require: diff --git a/salt/playbook/enabled.sls b/salt/playbook/enabled.sls index 22da3c0ff..434cb18e4 100644 --- a/salt/playbook/enabled.sls +++ b/salt/playbook/enabled.sls @@ -34,13 +34,28 @@ so-playbook: - ipv4_address: {{ DOCKER.containers['so-playbook'].ip }} - binds: - /opt/so/log/playbook:/playbook/log:rw + {% if DOCKER.containers['so-playbook'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-playbook'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - extra_hosts: - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} + {% if DOCKER.containers['so-playbook'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-playbook'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - environment: - REDMINE_DB_MYSQL={{ GLOBALS.manager }} - REDMINE_DB_DATABASE=playbook - REDMINE_DB_USERNAME=playbookdbuser - REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }} + {% if DOCKER.containers['so-playbook'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-playbook'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-playbook'].port_bindings %} - {{ BINDING }} diff --git a/salt/redis/enabled.sls b/salt/redis/enabled.sls index 26f95e59f..c9ba37094 100644 --- a/salt/redis/enabled.sls +++ b/salt/redis/enabled.sls @@ -35,6 +35,23 @@ so-redis: {% else %} - /etc/ssl/certs/intca.crt:/certs/ca.crt:ro {% endif %} + {% if DOCKER.containers['so-redis'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-redis'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-redis'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-redis'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-redis'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - entrypoint: "redis-server /usr/local/etc/redis/redis.conf" - watch: - file: /opt/so/conf/redis/etc diff --git a/salt/registry/enabled.sls b/salt/registry/enabled.sls index 0ce3ee318..4d9867676 100644 --- a/salt/registry/enabled.sls +++ b/salt/registry/enabled.sls @@ -30,9 +30,25 @@ so-dockerregistry: - /nsm/docker-registry/docker:/var/lib/registry/docker:rw - /etc/pki/registry.crt:/etc/pki/registry.crt:ro - /etc/pki/registry.key:/etc/pki/registry.key:ro + {% if DOCKER.containers['so-dockerregistry'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-dockerregistry'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-dockerregistry'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-dockerregistry'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - client_timeout: 180 - environment: - HOME=/root + {% if DOCKER.containers['so-dockerregistry'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-dockerregistry'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - retry: attempts: 5 interval: 30 diff --git a/salt/sensoroni/enabled.sls b/salt/sensoroni/enabled.sls index e506de49d..2111e8f1b 100644 --- a/salt/sensoroni/enabled.sls +++ b/salt/sensoroni/enabled.sls @@ -4,6 +4,8 @@ # Elastic License 2.0. {% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} + include: - sensoroni.config @@ -21,6 +23,23 @@ so-sensoroni: - /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro - /opt/so/conf/sensoroni/analyzers:/opt/sensoroni/analyzers:rw - /opt/so/log/sensoroni:/opt/sensoroni/logs:rw + {% if DOCKER.containers['so-sensoroni'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-sensoroni'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-sensoroni'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-sensoroni'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-sensoroni'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-sensoroni'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - watch: - file: /opt/so/conf/sensoroni/sensoroni.json - require: diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 2e4528080..bc55f2d94 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -32,11 +32,27 @@ so-soc: - /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw - /opt/so/conf/soc/salt:/opt/sensoroni/salt:rw - /opt/so/saltstack:/opt/so/saltstack:rw + {% if DOCKER.containers['so-soc'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-soc'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - extra_hosts: {{ DOCKER_EXTRA_HOSTS }} + {% if DOCKER.containers['so-soc'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-soc'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-soc'].port_bindings %} - {{ BINDING }} {% endfor %} + {% if DOCKER.containers['so-soc'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-soc'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - watch: - file: /opt/so/conf/soc/* - require: diff --git a/salt/soctopus/enabled.sls b/salt/soctopus/enabled.sls index 9c2ee4de7..2184de581 100644 --- a/salt/soctopus/enabled.sls +++ b/salt/soctopus/enabled.sls @@ -29,6 +29,11 @@ so-soctopus: {% if GLOBALS.airgap %} - /nsm/repo/rules/sigma:/soctopus/sigma {% endif %} + {% if DOCKER.containers['so-soctopus'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-soctopus'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-soctopus'].port_bindings %} - {{ BINDING }} @@ -36,6 +41,17 @@ so-soctopus: - extra_hosts: - {{GLOBALS.url_base}}:{{GLOBALS.manager_ip}} - {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }} + {% if DOCKER.containers['so-soctopus'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-soctopus'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-soctopus'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-soctopus'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - require: - file: soctopusconf - file: navigatordefaultlayer diff --git a/salt/strelka/backend/enabled.sls b/salt/strelka/backend/enabled.sls index 2ba998e30..c2272cb84 100644 --- a/salt/strelka/backend/enabled.sls +++ b/salt/strelka/backend/enabled.sls @@ -18,6 +18,11 @@ strelka_backend: - binds: - /opt/so/conf/strelka/backend/:/etc/strelka/:ro - /opt/so/conf/strelka/rules/:/etc/yara/:ro + {% if DOCKER.containers['so-strelka-backend'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-strelka-backend'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - name: so-strelka-backend - networks: - sobridge: @@ -25,6 +30,17 @@ strelka_backend: - command: strelka-backend - extra_hosts: - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} + {% if DOCKER.containers['so-strelka-backend'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-strelka-backend'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-strelka-backend'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-strelka-backend'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - restart_policy: on-failure delete_so-strelka-backend_so-status.disabled: diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index aa51e4b03..53afb0ea3 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -43,14 +43,14 @@ strelka_sbin: - group: 939 - file_mode: 755 -#strelka_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://strelka/tools/sbin_jinja -# - user: 939 -# - group: 939 -# - file_mode: 755 -# - template: jinja +strelka_sbin_jinja: + file.recurse: + - name: /usr/sbin + - source: salt://strelka/tools/sbin_jinja + - user: 939 + - group: 939 + - file_mode: 755 + - template: jinja {% else %} diff --git a/salt/strelka/coordinator/enabled.sls b/salt/strelka/coordinator/enabled.sls index 9f2627344..7a156bc9a 100644 --- a/salt/strelka/coordinator/enabled.sls +++ b/salt/strelka/coordinator/enabled.sls @@ -22,11 +22,27 @@ strelka_coordinator: - entrypoint: redis-server --save "" --appendonly no - extra_hosts: - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} + {% if DOCKER.containers['so-strelka-coordinator'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-strelka-coordinator'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-strelka-coordinator'].port_bindings %} - {{ BINDING }} {% endfor %} - + {% if DOCKER.containers['so-strelka-coordinator'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-strelka-coordinator'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %} + - binds: + {% for BIND in DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} delete_so-strelka-coordinator_so-status.disabled: file.uncomment: - name: /opt/so/conf/so-status/so-status.conf diff --git a/salt/strelka/defaults.yaml b/salt/strelka/defaults.yaml index 2379bd012..d8b238b03 100644 --- a/salt/strelka/defaults.yaml +++ b/salt/strelka/defaults.yaml @@ -542,8 +542,7 @@ strelka: enabled: False rules: enabled: True - repos: - - https://github.com/Neo23x0/signature-base + repos: [] excluded: - apt_flame2_orchestrator.yar - apt_tetris.yar diff --git a/salt/strelka/filestream/enabled.sls b/salt/strelka/filestream/enabled.sls index 6c6ee0b97..f04631eca 100644 --- a/salt/strelka/filestream/enabled.sls +++ b/salt/strelka/filestream/enabled.sls @@ -18,6 +18,11 @@ strelka_filestream: - binds: - /opt/so/conf/strelka/filestream/:/etc/strelka/:ro - /nsm/strelka:/nsm/strelka + {% if DOCKER.containers['so-strelka-filestream'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-strelka-filestream'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - name: so-strelka-filestream - networks: - sobridge: @@ -25,6 +30,17 @@ strelka_filestream: - command: strelka-filestream - extra_hosts: - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} + {% if DOCKER.containers['so-strelka-filestream'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-strelka-filestream'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-strelka-filestream'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-strelka-filestream'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} delete_so-strelka-filestream_so-status.disabled: file.uncomment: diff --git a/salt/strelka/frontend/enabled.sls b/salt/strelka/frontend/enabled.sls index 28b7dc19c..e4ecc7ca5 100644 --- a/salt/strelka/frontend/enabled.sls +++ b/salt/strelka/frontend/enabled.sls @@ -18,6 +18,11 @@ strelka_frontend: - binds: - /opt/so/conf/strelka/frontend/:/etc/strelka/:ro - /nsm/strelka/log/:/var/log/strelka/:rw + {% if DOCKER.containers['so-strelka-frontend'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-strelka-frontend'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - privileged: True - name: so-strelka-frontend - networks: @@ -26,10 +31,21 @@ strelka_frontend: - command: strelka-frontend - extra_hosts: - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} + {% if DOCKER.containers['so-strelka-frontend'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-strelka-frontend'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-strelka-frontend'].port_bindings %} - {{ BINDING }} {% endfor %} + {% if DOCKER.containers['so-strelka-frontend'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-strelka-frontend'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} delete_so-strelka-frontend_so-status.disabled: file.uncomment: diff --git a/salt/strelka/gatekeeper/enabled.sls b/salt/strelka/gatekeeper/enabled.sls index 326fb752f..b309403f4 100644 --- a/salt/strelka/gatekeeper/enabled.sls +++ b/salt/strelka/gatekeeper/enabled.sls @@ -22,10 +22,27 @@ strelka_gatekeeper: - entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru - extra_hosts: - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} + {% if DOCKER.containers['so-strelka-gatekeeper'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-strelka-gatekeeper'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-strelka-gatekeeper'].port_bindings %} - {{ BINDING }} {% endfor %} + {% if DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %} + - binds: + {% for BIND in DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-strelka-gatekeeper'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-strelka-gatekeeper'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} delete_so-strelka-gatekeeper_so-status.disabled: file.uncomment: diff --git a/salt/strelka/manager/enabled.sls b/salt/strelka/manager/enabled.sls index 0c78c9dcb..aec44b4b0 100644 --- a/salt/strelka/manager/enabled.sls +++ b/salt/strelka/manager/enabled.sls @@ -17,6 +17,11 @@ strelka_manager: - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }} - binds: - /opt/so/conf/strelka/manager/:/etc/strelka/:ro + {% if DOCKER.containers['so-strelka-manager'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-strelka-manager'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - name: so-strelka-manager - networks: - sobridge: @@ -24,6 +29,17 @@ strelka_manager: - command: strelka-manager - extra_hosts: - {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }} + {% if DOCKER.containers['so-strelka-manager'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-strelka-manager'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-strelka-manager'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-strelka-manager'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} delete_so-strelka-manager_so-status.disabled: file.uncomment: diff --git a/salt/strelka/tools/sbin_jinja/so-yara-update b/salt/strelka/tools/sbin_jinja/so-yara-update new file mode 100644 index 000000000..9ec6fa41f --- /dev/null +++ b/salt/strelka/tools/sbin_jinja/so-yara-update @@ -0,0 +1,21 @@ +#!/bin/bash +NOROOT=1 +. /usr/sbin/so-common + +{%- set proxy = salt['pillar.get']('manager:proxy') %} + +# Download the rules from the internet +{%- if proxy %} +export http_proxy={{ proxy }} +export https_proxy={{ proxy }} +export no_proxy= salt['pillar.get']('manager:no_proxy') +{%- endif %} + +mkdir -p /tmp/yara +cd /tmp/yara +git clone https://github.com/Security-Onion-Solutions/securityonion-yara.git +mkdir -p /nsm/rules/yara +rsync -shav --progress /tmp/yara/securityonion-yara/yara /nsm/rules/ +cd /tmp +rm -rf /tmp/yara + diff --git a/salt/suricata/enabled.sls b/salt/suricata/enabled.sls index bfe91d244..ce309e41a 100644 --- a/salt/suricata/enabled.sls +++ b/salt/suricata/enabled.sls @@ -6,6 +6,8 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} + include: - suricata.config @@ -17,6 +19,11 @@ so-suricata: - privileged: True - environment: - INTERFACE={{ GLOBALS.sensor.interface }} + {% if DOCKER.containers['so-suricata'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-suricata'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - binds: - /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro - /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro @@ -25,7 +32,18 @@ so-suricata: - /nsm/suricata/:/nsm/:rw - /nsm/suricata/extracted:/var/log/suricata//filestore:rw - /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro + {% if DOCKER.containers['so-suricata'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-suricata'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - network_mode: host + {% if DOCKER.containers['so-suricata'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-suricata'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - watch: - file: suriconfig - file: surithresholding diff --git a/salt/telegraf/enabled.sls b/salt/telegraf/enabled.sls index 04459d7ff..209c85fb0 100644 --- a/salt/telegraf/enabled.sls +++ b/salt/telegraf/enabled.sls @@ -6,6 +6,8 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} + include: - telegraf.config @@ -22,6 +24,11 @@ so-telegraf: - HOST_SYS=/host/sys - HOST_MOUNT_PREFIX=/host - GODEBUG=x509ignoreCN=0 + {% if DOCKER.containers['so-telegraf'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-telegraf'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - network_mode: host - init: True - binds: @@ -47,6 +54,17 @@ so-telegraf: - /opt/so/log/suricata:/var/log/suricata:ro - /opt/so/log/raid:/var/log/raid:ro - /opt/so/log/sostatus:/var/log/sostatus:ro + {% if DOCKER.containers['so-telegraf'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-telegraf'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-telegraf'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-telegraf'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - watch: - file: tgrafconf - file: tgrafsyncscripts diff --git a/salt/zeek/defaults.yaml b/salt/zeek/defaults.yaml index ca3168b8b..7fa524580 100644 --- a/salt/zeek/defaults.yaml +++ b/salt/zeek/defaults.yaml @@ -76,7 +76,10 @@ zeek: - LogAscii::use_json = T; - CaptureLoss::watch_interval = 5 mins; networks: - HOME_NET: 192.168.0.0/16,10.0.0.0/8,172.16.0.0/12 + HOME_NET: + - 192.168.0.0/16 + - 10.0.0.0/8 + - 172.16.0.0/12 file_extraction: - application/x-dosexec: exe - application/pdf: pdf diff --git a/salt/zeek/enabled.sls b/salt/zeek/enabled.sls index 611402fbc..7d444ff43 100644 --- a/salt/zeek/enabled.sls +++ b/salt/zeek/enabled.sls @@ -6,6 +6,8 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} + include: - zeek.config @@ -31,8 +33,25 @@ so-zeek: - /opt/so/conf/zeek/policy/custom:/opt/zeek/share/zeek/policy/custom:ro - /opt/so/conf/zeek/policy/cve-2020-0601:/opt/zeek/share/zeek/policy/cve-2020-0601:ro - /opt/so/conf/zeek/policy/intel:/opt/zeek/share/zeek/policy/intel:rw - - /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro + - /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro + {% if DOCKER.containers['so-zeek'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-zeek'].custom_bind_mounts %} + - {{ BIND }} + {% endfor %} + {% endif %} - network_mode: host + {% if DOCKER.containers['so-zeek'].extra_hosts %} + - extra_hosts: + {% for XTRAHOST in DOCKER.containers['so-zeek'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} + {% if DOCKER.containers['so-zeek'].extra_env %} + - environment: + {% for XTRAENV in DOCKER.containers['so-zeek'].extra_env %} + - {{ XTRAENV }} + {% endfor %} + {% endif %} - watch: - file: /opt/so/conf/zeek/local.zeek - file: /opt/so/conf/zeek/node.cfg diff --git a/salt/zeek/files/networks.cfg.jinja b/salt/zeek/files/networks.cfg.jinja index 5818380ce..1174f2f61 100644 --- a/salt/zeek/files/networks.cfg.jinja +++ b/salt/zeek/files/networks.cfg.jinja @@ -1,5 +1,5 @@ {%- if NETWORKS.HOME_NET %} -{%- for HN in NETWORKS.HOME_NET.split(',') %} +{%- for HN in NETWORKS.HOME_NET %} {{ HN }} {%- endfor %} {%- endif %} diff --git a/salt/zeek/soc_zeek.yaml b/salt/zeek/soc_zeek.yaml index 8410d4e75..fabd7c209 100644 --- a/salt/zeek/soc_zeek.yaml +++ b/salt/zeek/soc_zeek.yaml @@ -21,6 +21,15 @@ zeek: forcedType: "[]string" advanced: True helpLink: zeek.html + networks: + HOME_NET: + description: List of IP or CIDR blocks to define as the HOME_NET. + forcedType: "[]string" + advanced: False + helpLink: zeek.html + multiline: True + regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$ + regexFailureMessage: You must enter a valid IP address or CIDR. node: lb_procs: description: Contains the number of CPU cores or workers used by Zeek. This setting should only be applied to individual nodes and will be ignored if CPU affinity is enabled. diff --git a/setup/so-functions b/setup/so-functions index 1eb65c0fb..247cf6c94 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1542,15 +1542,9 @@ create_strelka_pillar() { "strelka:"\ " enabled: $STRELKA"\ " rules: 1" > "$strelka_pillar_file" - if [[ $is_airgap ]]; then - printf '%s\n'\ - " repos:"\ - " - 'https://$HOSTNAME/repo/rules/strelka'" >> "$strelka_pillar_file" - else - printf '%s\n'\ - " repos:"\ - " - 'https://github.com/Neo23x0/signature-base'" >> "$strelka_pillar_file" - fi + printf '%s\n'\ + " repos:"\ + " - 'https://$HOSTNAME:7788/yara'" >> "$strelka_pillar_file" } backup_pillar() { diff --git a/setup/so-setup b/setup/so-setup index d8f07b36a..d38296562 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -644,6 +644,13 @@ if ! [[ -f $install_opt_file ]]; then logCmd "salt-call state.apply -l info manager" logCmd "salt-call state.apply influxdb -l info" logCmd "salt-call state.highstate -l info" + if [[ ! $is_airgap ]]; then + title "Downloading IDS Rules" + logCmd "so-rule-update" + title "Downloading YARA rules" + logCmd "runuser -l socore 'so-yara-update'" + title "Restarting Strelka to use new rules" + fi title "Setting up Kibana Default Space" logCmd "so-kibana-space-defaults" add_web_user