Merge branch '2.4/dev' into kilo

This commit is contained in:
Jason Ertel
2023-05-30 11:51:57 -04:00
50 changed files with 906 additions and 726 deletions

View File

@@ -28,6 +28,23 @@ so-curator:
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
- /opt/so/conf/curator/action/:/etc/curator/action:ro
- /opt/so/log/curator:/var/log/curator:rw
{% if DOCKER.containers['so-curator'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-curator'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-curator'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-curator'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-curator'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-curator'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- require:
- file: actionconfs
- file: curconf

View File

@@ -10,12 +10,14 @@ docker:
- 0.0.0.0:5000:5000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-elastic-fleet':
final_octet: 21
port_bindings:
- 0.0.0.0:8220:8220/tcp
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-elasticsearch':
final_octet: 22
port_bindings:
@@ -23,22 +25,26 @@ docker:
- 0.0.0.0:9300:9300/tcp
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-idstools':
final_octet: 25
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-influxdb':
final_octet: 26
port_bindings:
- 0.0.0.0:8086:8086
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-kibana':
final_octet: 27
port_bindings:
- 0.0.0.0:5601:5601
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-kratos':
final_octet: 28
port_bindings:
@@ -46,6 +52,7 @@ docker:
- 0.0.0.0:4434:4434
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-logstash':
final_octet: 29
port_bindings:
@@ -61,12 +68,14 @@ docker:
- 0.0.0.0:9600:9600
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-mysql':
final_octet: 30
port_bindings:
- 0.0.0.0:3306:3306
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-nginx':
final_octet: 31
port_bindings:
@@ -76,12 +85,14 @@ docker:
- 7788:7788
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-playbook':
final_octet: 32
port_bindings:
- 0.0.0.0:3000:3000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-redis':
final_octet: 33
port_bindings:
@@ -89,63 +100,101 @@ docker:
- 0.0.0.0:9696:9696
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-sensoroni':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-soc':
final_octet: 34
port_bindings:
- 0.0.0.0:9822:9822
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-soctopus':
final_octet: 35
port_bindings:
- 0.0.0.0:7000:7000
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-backend':
final_octet: 36
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-filestream':
final_octet: 37
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-frontend':
final_octet: 38
port_bindings:
- 0.0.0.0:57314:57314
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-manager':
final_octet: 39
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-gatekeeper':
final_octet: 40
port_bindings:
- 0.0.0.0:6381:6379
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-strelka-coordinator':
final_octet: 41
port_bindings:
- 0.0.0.0:6380:6379
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-elastalert':
final_octet: 42
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-curator':
final_octet: 43
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-elastic-fleet-package-registry':
final_octet: 44
port_bindings:
- 0.0.0.0:8080:8080/tcp
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-idh':
final_octet: 45
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-telegraf':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-steno':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-suricata':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-zeek':
final_octet: 99
custom_bind_mounts: []
extra_hosts: []
extra_env: []

View File

@@ -40,6 +40,12 @@ docker:
helpLink: docker.html
multiline: True
forcedType: "[]string"
extra_env:
description: List of additional ENV entries for the container.
advanced: True
helpLink: docker.html
multiline: True
forcedType: "[]string"
so-dockerregistry: *dockerOptions
so-elastalert: *dockerOptions
so-elastic-fleet-package-registry: *dockerOptions

View File

@@ -31,8 +31,24 @@ so-elastalert:
- /opt/so/log/elastalert:/var/log/elastalert:rw
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-elastalert'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-elastalert'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-elastalert'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- require:
- cmd: wait_for_elasticsearch
- file: elastarules

View File

@@ -24,11 +24,27 @@ so-elastic-fleet-package-registry:
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }}
- extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-elastic-fleet-package-registry'].port_bindings %}
- {{ BINDING }}
{% endfor %}
{% if DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
- binds:
{% for BIND in DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-elastic-fleet-package-registry_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf

View File

@@ -28,6 +28,11 @@ so-elastic-fleet:
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-elastic-fleet'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-elastic-fleet'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-elastic-fleet'].port_bindings %}
- {{ BINDING }}
@@ -35,6 +40,11 @@ so-elastic-fleet:
- binds:
- /etc/pki:/etc/pki:ro
#- /opt/so/conf/elastic-fleet/state:/usr/share/elastic-agent/state:rw
{% if DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- environment:
- FLEET_SERVER_ENABLE=true
- FLEET_URL=https://{{ GLOBALS.node_ip }}:8220
@@ -45,6 +55,11 @@ so-elastic-fleet:
- FLEET_SERVER_CERT=/etc/pki/elasticfleet.crt
- FLEET_SERVER_CERT_KEY=/etc/pki/elasticfleet.key
- FLEET_CA=/etc/pki/tls/certs/intca.crt
{% if DOCKER.containers['so-elastic-fleet'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-elastic-fleet'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
{% endif %}
delete_so-elastic-fleet_so-status.disabled:

View File

@@ -26,6 +26,11 @@ so-elasticsearch:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
- extra_hosts: {{ LOGSTASH_NODES }}
{% if DOCKER.containers['so-elasticsearch'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-elasticsearch'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- environment:
{% if LOGSTASH_NODES | length == 1 %}
- discovery.type=single-node
@@ -35,6 +40,11 @@ so-elasticsearch:
- memlock=-1:-1
- nofile=65536:65536
- nproc=4096
{% if DOCKER.containers['so-elasticsearch'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-elasticsearch'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-elasticsearch'].port_bindings %}
- {{ BINDING }}
@@ -60,6 +70,11 @@ so-elasticsearch:
- {{ repo }}:{{ repo }}:rw
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-elasticsearch'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-elasticsearch'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- watch:
- file: cacertz
- file: esyml

View File

@@ -6,8 +6,13 @@ global:
managerip:
description: The IP address of the grid manager.
global: True
advanced: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
mdengine:
description: What engine to use for meta data generation. Options are ZEEK and SURICATA.
regex: ^(ZEEK|SURICATA)$
regexFailureMessage: You must enter either ZEEK or SURICATA.
global: True
ids:
description: Which IDS engine to use. Currently only Suricata is supported.

View File

@@ -20,6 +20,23 @@ so-idh:
- binds:
- /nsm/idh:/var/tmp:rw
- /opt/so/conf/idh/opencanary.conf:/etc/opencanaryd/opencanary.conf:ro
{% if DOCKER.containers['so-idh'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-idh'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-idh'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-idh'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-idh'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-idh'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch:
- file: opencanary_config
- require:

View File

@@ -24,14 +24,14 @@ idstools_sbin:
- group: 939
- file_mode: 755
#idstools_sbin_jinja:
# file.recurse:
# - name: /usr/sbin
# - source: salt://idstools/tools/sbin_jinja
# - user: 934
# - group: 939
# - file_mode: 755
# - template: jinja
idstools_sbin_jinja:
file.recurse:
- name: /usr/sbin
- source: salt://idstools/tools/sbin_jinja
- user: 934
- group: 939
- file_mode: 755
- template: jinja
{% else %}

View File

@@ -26,10 +26,33 @@ so-idstools:
- http_proxy={{ proxy }}
- https_proxy={{ proxy }}
- no_proxy={{ salt['pillar.get']('manager:no_proxy') }}
{% if DOCKER.containers['so-elastalert'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
{% elif DOCKER.containers['so-idstools'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-idstools'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- binds:
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
- /opt/so/rules/nids:/opt/so/rules/nids:rw
- /nsm/rules/:/nsm/rules/:rw
{% if DOCKER.containers['so-idstools'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-idstools'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-idstools'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- watch:
- file: idstoolsetcsync

View File

@@ -1,35 +1,15 @@
{%- from 'vars/globals.map.jinja' import GLOBALS %}
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %}
{%- if GLOBALS.airgap is sameas true -%}
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%}
--merged=/opt/so/rules/nids/all.rules
--local=/opt/so/rules/nids/local.rules
{%- if GLOBALS.md_engine == "SURICATA" %}
--local=/opt/so/rules/nids/sorules/extraction.rules
--local=/opt/so/rules/nids/sorules/filters.rules
{%- endif %}
--url=http://{{ GLOBALS.manager }}:7788/rules/emerging-all.rules
--url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules
--disable=/opt/so/idstools/etc/disable.conf
--enable=/opt/so/idstools/etc/enable.conf
--modify=/opt/so/idstools/etc/modify.conf
{%- else -%}
--suricata-version=6.0
--merged=/opt/so/rules/nids/all.rules
--local=/opt/so/rules/nids/local.rules
{%- if GLOBALS.md_engine == "SURICATA" %}
--local=/opt/so/rules/nids/sorules/extraction.rules
--local=/opt/so/rules/nids/sorules/filters.rules
{%- endif %}
--disable=/opt/so/idstools/etc/disable.conf
--enable=/opt/so/idstools/etc/enable.conf
--modify=/opt/so/idstools/etc/modify.conf
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
--etopen
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
--etpro={{ IDSTOOLSMERGED.config.oinkcode }}
{%- elif IDSTOOLSMERGED.config.ruleset == 'TALOS' %}
--url=https://www.snort.org/rules/snortrules-snapshot-2983.tar.gz?oinkcode={{ IDSTOOLSMERGED.config.oinkcode }}
{%- endif %}
{%- endif %}
{%- if IDSTOOLSMERGED.config.urls | length > 0 %}
{%- for URL in IDSTOOLSMERGED.config.urls %}
--url={{ URL }}

View File

@@ -1,10 +0,0 @@
#!/bin/bash
. /usr/sbin/so-common
argstr=""
for arg in "$@"; do
argstr="${argstr} \"${arg}\""
done
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"

View File

@@ -0,0 +1,32 @@
#!/bin/bash
. /usr/sbin/so-common
{%- from 'vars/globals.map.jinja' import GLOBALS %}
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %}
{%- set proxy = salt['pillar.get']('manager:proxy') %}
mkdir -p /nsm/rules/suricata
chown -R socore:socore /nsm/rules/suricata
# Download the rules from the internet
{%- if GLOBALS.airgap != 'True' %}
{%- if proxy %}
export http_proxy={{ proxy }}
export https_proxy={{ proxy }}
export no_proxy= salt['pillar.get']('manager:no_proxy')
{%- endif %}
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
{%- elif IDSTOOLSMERGED.config.ruleset == 'TALOS' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --url=https://www.snort.org/rules/snortrules-snapshot-2983.tar.gz?oinkcode={{ IDSTOOLSMERGED.config.oinkcode }}
{%- endif %}
{%- endif %}
argstr=""
for arg in "$@"; do
argstr="${argstr} \"${arg}\""
done
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"

View File

@@ -30,16 +30,32 @@ so-influxdb:
- DOCKER_INFLUXDB_INIT_ORG=Security Onion
- DOCKER_INFLUXDB_INIT_BUCKET=telegraf/so_short_term
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN={{ TOKEN }}
{% if DOCKER.containers['so-influxdb'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-influxdb'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- binds:
- /opt/so/log/influxdb/:/log:rw
- /opt/so/conf/influxdb/config.yaml:/conf/config.yaml:ro
- /nsm/influxdb:/var/lib/influxdb2:rw
- /etc/pki/influxdb.crt:/conf/influxdb.crt:ro
- /etc/pki/influxdb.key:/conf/influxdb.key:ro
{% if DOCKER.containers['so-influxdb'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-influxdb'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-influxdb'].port_bindings %}
- {{ BINDING }}
{% endfor %}
{% if DOCKER.containers['so-influxdb'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-influxdb'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- watch:
- file: influxdbconf
- require:

View File

@@ -25,13 +25,28 @@ so-kibana:
- ELASTICSEARCH_HOST={{ GLOBALS.manager }}
- ELASTICSEARCH_PORT=9200
- MANAGER={{ GLOBALS.manager }}
{% if DOCKER.containers['so-kibana'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-kibana'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-kibana'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-kibana'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- binds:
- /opt/so/conf/kibana/etc:/usr/share/kibana/config:rw
- /opt/so/log/kibana:/var/log/kibana:rw
- /opt/so/conf/kibana/customdashboards:/usr/share/kibana/custdashboards:ro
- /sys/fs/cgroup:/sys/fs/cgroup:ro
{% if DOCKER.containers['so-kibana'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-kibana'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-kibana'].port_bindings %}
- {{ BINDING }}

View File

@@ -25,10 +25,27 @@ so-kratos:
- /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro
- /opt/so/log/kratos/:/kratos-log:rw
- /nsm/kratos/db:/kratos-data:rw
{% if DOCKER.containers['so-kratos'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-kratos'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-kratos'].port_bindings %}
- {{ BINDING }}
{% endfor %}
{% if DOCKER.containers['so-kratos'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-kratos'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-kratos'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-kratos'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- restart_policy: unless-stopped
- watch:
- file: kratosschema

View File

@@ -26,8 +26,18 @@ so-logstash:
- ipv4_address: {{ DOCKER.containers['so-logstash'].ip }}
- user: logstash
- extra_hosts: {{ REDIS_NODES }}
{% if DOCKER.containers['so-logstash'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-logstash'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- environment:
- LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }}
{% if DOCKER.containers['so-logstash'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-logstash'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-logstash'].port_bindings %}
- {{ BINDING }}
@@ -65,6 +75,11 @@ so-logstash:
- /opt/so/log/fleet/:/osquery/logs:ro
- /opt/so/log/strelka:/strelka:ro
{% endif %}
{% if DOCKER.containers['so-logstash'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-logstash'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- watch:
- file: lsetcsync
{% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}

View File

@@ -15,7 +15,6 @@ POSTVERSION=$INSTALLEDVERSION
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}')
BATCHSIZE=5
SOUP_LOG=/root/soup.log
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
WHATWOULDYOUSAYYAHDOHERE=soup
whiptail_title='Security Onion UPdater'
NOTIFYCUSTOMELASTICCONFIG=false
@@ -304,11 +303,7 @@ check_log_size_limit() {
check_os_updates() {
# Check to see if there are OS updates
NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated."
if [[ $OS == 'ubuntu' ]]; then
OSUPDATES=$(apt list --upgradeable | grep -v "^Listing..." | grep -v "^docker-ce" | grep -v "^wazuh-" | grep -v "^salt-" | wc -l)
else
OSUPDATES=$(yum -q list updates | wc -l)
fi
if [[ "$OSUPDATES" -gt 0 ]]; then
if [[ -z $UNATTENDED ]]; then
echo "$NEEDUPDATES"
@@ -362,117 +357,12 @@ clone_to_tmp() {
fi
}
elastalert_indices_check() {
# Stop Elastalert to prevent Elastalert indices from being re-created
if grep -q "^so-elastalert$" /opt/so/conf/so-status/so-status.conf ; then
so-elastalert-stop || true
fi
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
so-elasticsearch-query / -k --output /dev/null
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
# Unable to connect to Elasticsearch
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
exit 1
fi
# Check Elastalert indices
echo "Deleting Elastalert indices to prevent issues with upgrade to Elastic 8..."
CHECK_COUNT=0
while [[ "$CHECK_COUNT" -le 2 ]]; do
# Delete Elastalert indices
for i in $(so-elasticsearch-query _cat/indices | grep elastalert | awk '{print $3}'); do
so-elasticsearch-query $i -XDELETE;
done
# Check to ensure Elastalert indices are deleted
COUNT=0
ELASTALERT_INDICES_DELETED="no"
while [[ "$COUNT" -le 240 ]]; do
RESPONSE=$(so-elasticsearch-query elastalert*)
if [[ "$RESPONSE" == "{}" ]]; then
ELASTALERT_INDICES_DELETED="yes"
echo "Elastalert indices successfully deleted."
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
((CHECK_COUNT+=1))
done
# If we were unable to delete the Elastalert indices, exit the script
if [ "$ELASTALERT_INDICES_DELETED" == "no" ]; then
echo
echo -e "Unable to connect to delete Elastalert indices. Exiting."
echo
exit 1
fi
}
enable_highstate() {
echo "Enabling highstate."
salt-call state.enable highstate -l info --local
echo ""
}
es_version_check() {
CHECK_ES=$(echo $INSTALLEDVERSION | awk -F. '{print $3}')
if [ "$CHECK_ES" -lt "110" ]; then
echo "You are currently running Security Onion $INSTALLEDVERSION. You will need to update to version 2.3.130 before updating to 2.3.140 or higher."
echo ""
echo "If your deployment has Internet access, you can use the following command to update to 2.3.130:"
echo "sudo BRANCH=2.3.130-20220607 soup"
echo ""
echo "Otherwise, if your deployment is configured for airgap, you can instead download the 2.3.130 ISO image from https://download.securityonion.net/file/securityonion/securityonion-2.3.130-20220607.iso."
echo ""
echo "*** Once you have updated to 2.3.130, you can then update to 2.3.140 or higher as you would normally. ***"
exit 1
fi
}
es_indices_check() {
echo "Checking for unsupported Elasticsearch indices..."
UNSUPPORTED_INDICES=$(for INDEX in $(so-elasticsearch-indices-list | awk '{print $3}'); do so-elasticsearch-query $INDEX/_settings?human |grep '"created_string":"6' | jq -r 'keys'[0]; done)
if [ -z "$UNSUPPORTED_INDICES" ]; then
echo "No unsupported indices found."
else
echo "The following indices were created with Elasticsearch 6, and are not supported when upgrading to Elasticsearch 8. These indices may need to be deleted, migrated, or re-indexed before proceeding with the upgrade. Please see $DOC_BASE_URL/soup.html#elastic-8 for more details."
echo
echo "$UNSUPPORTED_INDICES"
exit 1
fi
}
generate_and_clean_tarballs() {
local new_version
new_version=$(cat $UPDATE_DIR/VERSION)
[ -d /opt/so/repo ] || mkdir -p /opt/so/repo
tar -czf "/opt/so/repo/$new_version.tar.gz" -C "$UPDATE_DIR" .
find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \;
}
highstate() {
# Run a highstate.
salt-call state.highstate -l info queue=True
@@ -480,7 +370,6 @@ highstate() {
masterlock() {
echo "Locking Salt Master"
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
TOPFILE=/opt/so/saltstack/default/salt/top.sls
BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
mv -v $TOPFILE $BACKUPTOPFILE
@@ -489,30 +378,18 @@ masterlock() {
echo " - ca" >> $TOPFILE
echo " - ssl" >> $TOPFILE
echo " - elasticsearch" >> $TOPFILE
fi
}
masterunlock() {
echo "Unlocking Salt Master"
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
mv -v $BACKUPTOPFILE $TOPFILE
fi
}
preupgrade_changes() {
# This function is to add any new pillar items if needed.
echo "Checking to see if changes are needed."
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_to_2.3.20
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_to_2.3.30
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_to_2.3.50
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_to_2.3.80
[[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90
[[ "$INSTALLEDVERSION" == 2.3.90 || "$INSTALLEDVERSION" == 2.3.91 ]] && up_to_2.3.100
[[ "$INSTALLEDVERSION" == 2.3.100 ]] && up_to_2.3.110
[[ "$INSTALLEDVERSION" == 2.3.110 ]] && up_to_2.3.120
[[ "$INSTALLEDVERSION" == 2.3.120 ]] && up_to_2.3.130
[[ "$INSTALLEDVERSION" == 2.3.130 ]] && up_to_2.3.140
[[ "$INSTALLEDVERSION" == 2.4.2 ]] && up_to_2.4.3
true
}
@@ -520,100 +397,17 @@ postupgrade_changes() {
# This function is to add any new pillar items if needed.
echo "Running post upgrade processes."
[[ "$POSTVERSION" == 2.3.0 || "$POSTVERSION" == 2.3.1 || "$POSTVERSION" == 2.3.2 || "$POSTVERSION" == 2.3.10 || "$POSTVERSION" == 2.3.20 ]] && post_to_2.3.21
[[ "$POSTVERSION" == 2.3.21 || "$POSTVERSION" == 2.3.30 ]] && post_to_2.3.40
[[ "$POSTVERSION" == 2.3.40 || "$POSTVERSION" == 2.3.50 || "$POSTVERSION" == 2.3.51 || "$POSTVERSION" == 2.3.52 ]] && post_to_2.3.60
[[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90
[[ "$POSTVERSION" == 2.3.90 || "$POSTVERSION" == 2.3.91 ]] && post_to_2.3.100
[[ "$POSTVERSION" == 2.3.100 ]] && post_to_2.3.110
[[ "$POSTVERSION" == 2.3.110 ]] && post_to_2.3.120
[[ "$POSTVERSION" == 2.3.120 ]] && post_to_2.3.130
[[ "$POSTVERSION" == 2.3.130 ]] && post_to_2.3.140
[[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3
true
}
post_to_2.3.21() {
salt-call state.apply playbook.OLD_db_init
rm -f /opt/so/rules/elastalert/playbook/*.yaml
so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 &
POSTVERSION=2.3.21
post_to_2.4.3() {
echo "Nothing to apply"
POSTVERSION=2.4.3
}
post_to_2.3.40() {
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
so-kibana-space-defaults
POSTVERSION=2.3.40
}
post_to_2.3.60() {
for table in identity_recovery_addresses selfservice_recovery_flows selfservice_registration_flows selfservice_verification_flows identities identity_verification_tokens identity_credentials selfservice_settings_flows identity_recovery_tokens continuity_containers identity_credential_identifiers identity_verifiable_addresses courier_messages selfservice_errors sessions selfservice_login_flows
do
echo "Forcing Kratos network migration: $table"
sqlite3 /opt/so/conf/kratos/db/db.sqlite "update $table set nid=(select id from networks limit 1);"
done
POSTVERSION=2.3.60
}
post_to_2.3.90() {
# Create FleetDM service account
FLEET_MANAGER=$(lookup_pillar fleet_manager)
if [[ "$FLEET_MANAGER" == "True" ]]; then
FLEET_SA_EMAIL=$(lookup_pillar_secret fleet_sa_email)
FLEET_SA_PW=$(lookup_pillar_secret fleet_sa_password)
MYSQL_PW=$(lookup_pillar_secret mysql)
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_SA_PW'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PW fleet -e \
"INSERT INTO users (password,salt,email,name,global_role) VALUES ('$FLEET_HASH','','$FLEET_SA_EMAIL','$FLEET_SA_EMAIL','admin')" 2>&1)
if [[ $? -eq 0 ]]; then
echo "Successfully added service account to Fleet"
else
echo "Unable to add service account to Fleet"
echo "$MYSQL_OUTPUT"
fi
fi
POSTVERSION=2.3.90
}
post_to_2.3.100() {
echo "Post Processing for 2.3.100"
POSTVERSION=2.3.100
}
post_to_2.3.110() {
echo "Post Processing for 2.3.110"
echo "Removing old Elasticsearch index templates"
[ -d /opt/so/saltstack/default/salt/elasticsearch/templates/so ] && rm -rf /opt/so/saltstack/default/salt/elasticsearch/templates/so
echo "Updating Kibana dashboards"
salt-call state.apply kibana.so_savedobjects_defaults queue=True
POSTVERSION=2.3.110
}
post_to_2.3.120() {
echo "Post Processing for 2.3.120"
POSTVERSION=2.3.120
sed -i '/so-thehive-es/d;/so-thehive/d;/so-cortex/d' /opt/so/conf/so-status/so-status.conf
}
post_to_2.3.130() {
echo "Post Processing for 2.3.130"
POSTVERSION=2.3.130
}
post_to_2.3.140() {
echo "Post Processing for 2.3.140"
FORCE_SYNC=true so-user sync
so-kibana-restart
so-kibana-space-defaults
POSTVERSION=2.3.140
}
stop_salt_master() {
# kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts
@@ -656,235 +450,9 @@ stop_salt_minion() {
set -e
}
up_to_2.3.20(){
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
# Remove PCAP from global
sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls
sed '/sensor_checkin_interval_ms:/d' /opt/so/saltstack/local/pillar/global.sls
# Add checking interval to glbal
echo "sensoroni:" >> /opt/so/saltstack/local/pillar/global.sls
echo " node_checkin_interval_ms: 10000" >> /opt/so/saltstack/local/pillar/global.sls
# Update pillar fiels for new sensoroni functionality
for file in /opt/so/saltstack/local/pillar/minions/*; do
echo "sensoroni:" >> $file
echo " node_description:" >> $file
local SOMEADDRESS=$(cat $file | grep mainip | tail -n 1 | awk '{print $2'})
echo " node_address: $SOMEADDRESS" >> $file
done
# Remove old firewall config to reduce confusion
rm -f /opt/so/saltstack/default/pillar/firewall/ports.sls
# Fix daemon.json by managing it
echo "docker:" >> /opt/so/saltstack/local/pillar/global.sls
DOCKERGREP=$(cat /etc/docker/daemon.json | grep base | awk {'print $3'} | cut -f1 -d"," | tr -d '"')
if [ -z "$DOCKERGREP" ]; then
echo " range: '172.17.0.0/24'" >> /opt/so/saltstack/local/pillar/global.sls
echo " bip: '172.17.0.1/24'" >> /opt/so/saltstack/local/pillar/global.sls
else
DOCKERSTUFF="${DOCKERGREP//\"}"
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
echo " range: '$DOCKERSTUFF/24'" >> /opt/so/saltstack/local/pillar/global.sls
echo " bip: '$DOCKERSTUFFBIP'" >> /opt/so/saltstack/local/pillar/global.sls
fi
INSTALLEDVERSION=2.3.20
}
up_to_2.3.30() {
# Replace any curly brace scalars with the same scalar in single quotes
readarray -t minion_pillars <<< "$(find /opt/so/saltstack/local/pillar/minions -type f -name '*.sls')"
for pillar in "${minion_pillars[@]}"; do
sed -i -r "s/ (\{\{.*}})$/ '\1'/g" "$pillar"
done
# Change the IMAGEREPO
sed -i "/ imagerepo: 'securityonion'/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
# Strelka rule repo pillar addition
if [[ $is_airgap -eq 0 ]]; then
# Add manager as default Strelka YARA rule repo
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
else
# Add Github repo for Strelka YARA rules
sed -i "/^strelka:/a \\ repos: \n - https://github.com/Neo23x0/signature-base" /opt/so/saltstack/local/pillar/global.sls;
fi
check_log_size_limit
INSTALLEDVERSION=2.3.30
}
up_to_2.3.50() {
cat <<EOF > /tmp/supersed.txt
/so-zeek:/ {
p;
n;
/shards:/ {
p;
n;
/warm:/ {
p;
n;
/close:/ {
s/close: 365/close: 45/;
p;
n;
/delete:/ {
s/delete: 45/delete: 365/;
p;
d;
}
}
}
}
}
p;
EOF
sed -n -i -f /tmp/supersed.txt /opt/so/saltstack/local/pillar/global.sls
rm /tmp/supersed.txt
INSTALLEDVERSION=2.3.50
}
up_to_2.3.80() {
# Remove watermark settings from global.sls
sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls
# Add new indices to the global
sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
# Do some pillar formatting
tc=$(grep -w true_cluster /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print tolower($2)'}| xargs)
if [[ "$tc" == "true" ]]; then
tcname=$(grep -w true_cluster_name /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print $2'})
sed -i "/^elasticsearch:/a \\ config: \n cluster: \n name: $tcname" /opt/so/saltstack/local/pillar/global.sls
sed -i '/ true_cluster_name/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/ esclustername/d' /opt/so/saltstack/local/pillar/global.sls
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
if [[ ${file} != *"manager.sls"* ]]; then
noderoutetype=$(grep -w node_route_type $file | awk -F: {'print $2'})
if [ -n "$noderoutetype" ]; then
sed -i "/^elasticsearch:/a \\ config: \n node: \n attr: \n box_type: $noderoutetype" $file
sed -i '/ node_route_type/d' $file
noderoutetype=''
fi
fi
done
fi
# check for local es config to inform user that the config in local is now ignored and those options need to be placed in the pillar
if [ -f "/opt/so/saltstack/local/salt/elasticsearch/files/elasticsearch.yml" ]; then
NOTIFYCUSTOMELASTICCONFIG=true
fi
INSTALLEDVERSION=2.3.80
}
up_to_2.3.90() {
for i in manager managersearch eval standalone; do
echo "Checking for compgen match of /opt/so/saltstack/local/pillar/minions/*_$i.sls"
if compgen -G "/opt/so/saltstack/local/pillar/minions/*_$i.sls"; then
echo "Found compgen match for /opt/so/saltstack/local/pillar/minions/*_$i.sls"
for f in $(compgen -G "/opt/so/saltstack/local/pillar/minions/*_$i.sls"); do
if grep -qozP "^soc:\n.*es_index_patterns: '\*:so-\*,\*:endgame-\*'" "$f"; then
echo "soc:es_index_patterns already present in $f"
else
echo "Appending soc pillar data to $f"
echo "soc:" >> "$f"
sed -i "/^soc:/a \\ es_index_patterns: '*:so-*,*:endgame-*'" "$f"
fi
done
fi
done
# Create Endgame Hostgroup
echo "Adding endgame hostgroup with so-firewall"
if so-firewall addhostgroup endgame 2>&1 | grep -q 'Already exists'; then
echo 'endgame hostgroup already exists'
else
echo 'endgame hostgroup added'
fi
# Force influx to generate a new cert
echo "Moving influxdb.crt and influxdb.key to generate new certs"
mv -vf /etc/pki/influxdb.crt /etc/pki/influxdb.crt.2390upgrade
mv -vf /etc/pki/influxdb.key /etc/pki/influxdb.key.2390upgrade
# remove old common ingest pipeline in default
rm -vf /opt/so/saltstack/default/salt/elasticsearch/files/ingest/common
# if custom common, move from local ingest to local ingest-dynamic
mkdir -vp /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic
if [[ -f "/opt/so/saltstack/local/salt/elasticsearch/files/ingest/common" ]]; then
mv -v /opt/so/saltstack/local/salt/elasticsearch/files/ingest/common /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
# since json file, we need to wrap with raw
sed -i '1s/^/{% raw %}\n/' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
sed -i -e '$a{% endraw %}\n' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
fi
# Generate FleetDM Service Account creds if they do not exist
if grep -q "fleet_sa_email" /opt/so/saltstack/local/pillar/secrets.sls; then
echo "FleetDM Service Account credentials already created..."
else
echo "Generating FleetDM Service Account credentials..."
FLEETSAPASS=$(get_random_value)
printf '%s\n'\
" fleet_sa_email: service.account@securityonion.invalid"\
" fleet_sa_password: $FLEETSAPASS"\
>> /opt/so/saltstack/local/pillar/secrets.sls
fi
sed -i -re 's/^(playbook_admin.*|playbook_automation.*)/ \1/g' /opt/so/saltstack/local/pillar/secrets.sls
INSTALLEDVERSION=2.3.90
}
up_to_2.3.100() {
fix_wazuh
echo "Adding receiver hostgroup with so-firewall"
if so-firewall addhostgroup receiver 2>&1 | grep -q 'Already exists'; then
echo 'receiver hostgroup already exists'
else
echo 'receiver hostgroup added'
fi
echo "Adding receiver to assigned_hostgroups.local.map.yaml"
grep -qxF " receiver:" /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml || sed -i -e '$a\ receiver:' /opt/so/saltstack/local/salt/firewall/assigned_hostgroups.local.map.yaml
INSTALLEDVERSION=2.3.100
}
up_to_2.3.110() {
sed -i 's|shards|index_template:\n template:\n settings:\n index:\n number_of_shards|g' /opt/so/saltstack/local/pillar/global.sls
INSTALLEDVERSION=2.3.110
}
up_to_2.3.120() {
# Stop thehive services since these will be broken in .120
so-thehive-stop
so-thehive-es-stop
so-cortex-stop
INSTALLEDVERSION=2.3.120
}
up_to_2.3.130() {
# Remove file for nav update
rm -f /opt/so/conf/navigator/layers/nav_layer_playbook.json
INSTALLEDVERSION=2.3.130
}
up_to_2.3.140() {
elastalert_indices_check
up_to_2.4.3() {
echo "Nothing to do for 2.4.3"
##
INSTALLEDVERSION=2.3.140
}
@@ -993,24 +561,6 @@ upgrade_salt() {
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [[ $OS == 'ubuntu' ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
echo "Checking if Salt was upgraded."
@@ -1030,46 +580,6 @@ upgrade_salt() {
}
update_repo() {
if [[ "$OS" == "centos" ]]; then
echo "Performing repo changes."
# Import GPG Keys
gpg_rpm_import
echo "Disabling fastestmirror."
disable_fastestmirror
echo "Deleting unneeded repo files."
DELREPOS=('CentOS-Base' 'CentOS-CR' 'CentOS-Debuginfo' 'docker-ce' 'CentOS-fasttrack' 'CentOS-Media' 'CentOS-Sources' 'CentOS-Vault' 'CentOS-x86_64-kernel' 'epel' 'epel-testing' 'saltstack' 'salt-latest' 'wazuh')
for DELREPO in "${DELREPOS[@]}"; do
if [[ -f "/etc/yum.repos.d/$DELREPO.repo" ]]; then
echo "Deleting $DELREPO.repo"
rm -f "/etc/yum.repos.d/$DELREPO.repo"
fi
done
if [[ $is_airgap -eq 1 ]]; then
# Copy the new repo file if not airgap
cp $UPDATE_DIR/salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
yum clean all
yum repolist
fi
elif [[ "$OS" == "ubuntu" ]]; then
ubuntu_version=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then
OSVER=bionic
elif grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
OSVER=focal
else
echo "We do not support your current version of Ubuntu."
exit 1
fi
rm -f /etc/apt/sources.list.d/salt.list
echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt3004.2/ $OSVER main" > /etc/apt/sources.list.d/saltstack.list
apt-get update
fi
}
verify_latest_update_script() {
# Check to see if the update scripts match. If not run the new one.
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
@@ -1096,51 +606,37 @@ verify_latest_update_script() {
fi
}
apply_hotfix() {
if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
fix_wazuh
elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
2_3_10_hotfix_1
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
fi
}
# Keeping this block in case we need to do a hotfix that requires salt update
#apply_hotfix() {
# if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
# fix_wazuh
# elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
# 2_3_10_hotfix_1
# else
# echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
# fi
#}
fix_wazuh() {
FILE="/nsm/wazuh/etc/ossec.conf"
echo "Detecting if $FILE needs corrected..."
if [ -f "$FILE" ]; then
if head -1 $FILE | grep -q "xml version"; then
echo "$FILE has an XML header; removing"
sed -i 1d $FILE
docker restart so-wazuh # cannot use so-wazuh-restart here because the salt-master service is stopped
else
echo "$FILE does not have an XML header, so no changes are necessary."
fi
else
echo "$FILE does not exist, so no changes are necessary."
fi
}
#upgrade salt to 3004.1
2_3_10_hotfix_1() {
systemctl_func "stop" "$cron_service_name"
# update mine items prior to stopping salt-minion and salt-master
update_salt_mine
stop_salt_minion
stop_salt_master
update_repo
# Does salt need upgraded. If so update it.
if [[ $UPGRADESALT -eq 1 ]]; then
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
fi
systemctl_func "start" "salt-master"
systemctl_func "start" "salt-minion"
systemctl_func "start" "$cron_service_name"
#2_3_10_hotfix_1() {
# systemctl_func "stop" "$cron_service_name"
# # update mine items prior to stopping salt-minion and salt-master
# update_salt_mine
# stop_salt_minion
# stop_salt_master
# update_repo
# # Does salt need upgraded. If so update it.
# if [[ $UPGRADESALT -eq 1 ]]; then
# echo "Upgrading Salt"
# # Update the repo files so it can actually upgrade
# upgrade_salt
# fi
# systemctl_func "start" "salt-master"
# systemctl_func "start" "salt-minion"
# systemctl_func "start" "$cron_service_name"
}
#}
main() {
trap 'check_err $?' EXIT
@@ -1198,23 +694,9 @@ main() {
fi
echo "Verifying we have the latest soup script."
verify_latest_update_script
es_version_check
es_indices_check
elastalert_indices_check
echo ""
set_palette
check_elastic_license
echo ""
echo "Checking for OS updates."
check_os_updates
echo "Generating new repo archive"
generate_and_clean_tarballs
if [ -f /usr/sbin/so-image-common ]; then
. /usr/sbin/so-image-common
else
add_common
fi
echo "Let's see if we need to update Security Onion."
upgrade_check
upgrade_space
@@ -1224,7 +706,6 @@ main() {
set -e
if [[ $is_airgap -eq 0 ]]; then
update_centos_repo
yum clean all
check_os_updates
fi

View File

@@ -33,6 +33,11 @@ so-mysql:
- ipv4_address: {{ DOCKER.containers['so-mysql'].ip }}
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-mysql'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-mysql'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-mysql'].port_bindings %}
- {{ BINDING }}
@@ -40,11 +45,21 @@ so-mysql:
- environment:
- MYSQL_ROOT_HOST={{ GLOBALS.so_docker_bip }}
- MYSQL_ROOT_PASSWORD=/etc/mypass
{% if DOCKER.containers['so-mysql'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-mysql'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- binds:
- /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro
- /opt/so/conf/mysql/etc/mypass:/etc/mypass
- /nsm/mysql:/var/lib/mysql:rw
- /opt/so/log/mysql:/var/log/mysql:rw
{% if DOCKER.containers['so-mysql'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-mysql'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- watch:
- /opt/so/conf/mysql/etc
- require:

View File

@@ -12,6 +12,15 @@ include:
- nginx.config
- nginx.sostatus
make-rule-dir-nginx:
file.directory:
- name: /nsm/rules
- user: socore
- group: socore
- recurse:
- user
- group
so-nginx:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-nginx:{{ GLOBALS.so_version }}
@@ -21,6 +30,11 @@ so-nginx:
- ipv4_address: {{ DOCKER.containers['so-nginx'].ip }}
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-nginx'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-nginx'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- binds:
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- /opt/so/log/nginx/:/var/log/nginx:rw
@@ -37,6 +51,18 @@ so-nginx:
- /opt/so/conf/navigator/enterprise-attack.json:/opt/socore/html/navigator/assets/enterprise-attack.json:ro
- /opt/so/conf/navigator/pre-attack.json:/opt/socore/html/navigator/assets/pre-attack.json:ro
- /nsm/repo:/opt/socore/html/repo:ro
- /nsm/rules:/nsm/rules:ro
{% endif %}
{% if DOCKER.containers['so-nginx'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-nginx'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-nginx'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-nginx'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- cap_add: NET_BIND_SERVICE
- port_bindings:

View File

@@ -84,8 +84,8 @@ http {
server {
listen 7788;
server_name {{ GLOBALS.url_base }};
root /opt/socore/html/repo;
location /rules/ {
root /nsm/rules;
location / {
allow all;
sendfile on;
sendfile_max_chunk 1m;

View File

@@ -6,6 +6,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- pcap.config
@@ -24,6 +26,23 @@ so-steno:
- /nsm/pcapindex:/nsm/pcapindex:rw
- /nsm/pcaptmp:/tmp:rw
- /opt/so/log/stenographer:/var/log/stenographer:rw
{% if DOCKER.containers['so-steno'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-steno'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-steno'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-steno'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-steno'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-steno'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch:
- file: stenoconf
- require:

View File

@@ -34,13 +34,28 @@ so-playbook:
- ipv4_address: {{ DOCKER.containers['so-playbook'].ip }}
- binds:
- /opt/so/log/playbook:/playbook/log:rw
{% if DOCKER.containers['so-playbook'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-playbook'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- extra_hosts:
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-playbook'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-playbook'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- environment:
- REDMINE_DB_MYSQL={{ GLOBALS.manager }}
- REDMINE_DB_DATABASE=playbook
- REDMINE_DB_USERNAME=playbookdbuser
- REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }}
{% if DOCKER.containers['so-playbook'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-playbook'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-playbook'].port_bindings %}
- {{ BINDING }}

View File

@@ -35,6 +35,23 @@ so-redis:
{% else %}
- /etc/ssl/certs/intca.crt:/certs/ca.crt:ro
{% endif %}
{% if DOCKER.containers['so-redis'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-redis'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-redis'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-redis'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-redis'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
- watch:
- file: /opt/so/conf/redis/etc

View File

@@ -30,9 +30,25 @@ so-dockerregistry:
- /nsm/docker-registry/docker:/var/lib/registry/docker:rw
- /etc/pki/registry.crt:/etc/pki/registry.crt:ro
- /etc/pki/registry.key:/etc/pki/registry.key:ro
{% if DOCKER.containers['so-dockerregistry'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-dockerregistry'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-dockerregistry'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-dockerregistry'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- client_timeout: 180
- environment:
- HOME=/root
{% if DOCKER.containers['so-dockerregistry'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-dockerregistry'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- retry:
attempts: 5
interval: 30

View File

@@ -4,6 +4,8 @@
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- sensoroni.config
@@ -21,6 +23,23 @@ so-sensoroni:
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
- /opt/so/conf/sensoroni/analyzers:/opt/sensoroni/analyzers:rw
- /opt/so/log/sensoroni:/opt/sensoroni/logs:rw
{% if DOCKER.containers['so-sensoroni'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-sensoroni'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-sensoroni'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-sensoroni'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-sensoroni'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-sensoroni'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch:
- file: /opt/so/conf/sensoroni/sensoroni.json
- require:

View File

@@ -32,11 +32,27 @@ so-soc:
- /opt/so/conf/soc/soc_users_roles:/opt/sensoroni/rbac/users_roles:rw
- /opt/so/conf/soc/salt:/opt/sensoroni/salt:rw
- /opt/so/saltstack:/opt/so/saltstack:rw
{% if DOCKER.containers['so-soc'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-soc'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- extra_hosts: {{ DOCKER_EXTRA_HOSTS }}
{% if DOCKER.containers['so-soc'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-soc'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-soc'].port_bindings %}
- {{ BINDING }}
{% endfor %}
{% if DOCKER.containers['so-soc'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-soc'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch:
- file: /opt/so/conf/soc/*
- require:

View File

@@ -29,6 +29,11 @@ so-soctopus:
{% if GLOBALS.airgap %}
- /nsm/repo/rules/sigma:/soctopus/sigma
{% endif %}
{% if DOCKER.containers['so-soctopus'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-soctopus'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-soctopus'].port_bindings %}
- {{ BINDING }}
@@ -36,6 +41,17 @@ so-soctopus:
- extra_hosts:
- {{GLOBALS.url_base}}:{{GLOBALS.manager_ip}}
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
{% if DOCKER.containers['so-soctopus'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-soctopus'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-soctopus'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-soctopus'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- require:
- file: soctopusconf
- file: navigatordefaultlayer

View File

@@ -18,6 +18,11 @@ strelka_backend:
- binds:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/rules/:/etc/yara/:ro
{% if DOCKER.containers['so-strelka-backend'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-strelka-backend'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- name: so-strelka-backend
- networks:
- sobridge:
@@ -25,6 +30,17 @@ strelka_backend:
- command: strelka-backend
- extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-backend'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-backend'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-backend'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-backend'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- restart_policy: on-failure
delete_so-strelka-backend_so-status.disabled:

View File

@@ -43,14 +43,14 @@ strelka_sbin:
- group: 939
- file_mode: 755
#strelka_sbin_jinja:
# file.recurse:
# - name: /usr/sbin
# - source: salt://strelka/tools/sbin_jinja
# - user: 939
# - group: 939
# - file_mode: 755
# - template: jinja
strelka_sbin_jinja:
file.recurse:
- name: /usr/sbin
- source: salt://strelka/tools/sbin_jinja
- user: 939
- group: 939
- file_mode: 755
- template: jinja
{% else %}

View File

@@ -22,11 +22,27 @@ strelka_coordinator:
- entrypoint: redis-server --save "" --appendonly no
- extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-coordinator'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-coordinator'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-strelka-coordinator'].port_bindings %}
- {{ BINDING }}
{% endfor %}
{% if DOCKER.containers['so-strelka-coordinator'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-coordinator'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %}
- binds:
{% for BIND in DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
delete_so-strelka-coordinator_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf

View File

@@ -542,8 +542,7 @@ strelka:
enabled: False
rules:
enabled: True
repos:
- https://github.com/Neo23x0/signature-base
repos: []
excluded:
- apt_flame2_orchestrator.yar
- apt_tetris.yar

View File

@@ -18,6 +18,11 @@ strelka_filestream:
- binds:
- /opt/so/conf/strelka/filestream/:/etc/strelka/:ro
- /nsm/strelka:/nsm/strelka
{% if DOCKER.containers['so-strelka-filestream'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-strelka-filestream'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- name: so-strelka-filestream
- networks:
- sobridge:
@@ -25,6 +30,17 @@ strelka_filestream:
- command: strelka-filestream
- extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-filestream'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-filestream'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-filestream'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-filestream'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-strelka-filestream_so-status.disabled:
file.uncomment:

View File

@@ -18,6 +18,11 @@ strelka_frontend:
- binds:
- /opt/so/conf/strelka/frontend/:/etc/strelka/:ro
- /nsm/strelka/log/:/var/log/strelka/:rw
{% if DOCKER.containers['so-strelka-frontend'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-strelka-frontend'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- privileged: True
- name: so-strelka-frontend
- networks:
@@ -26,10 +31,21 @@ strelka_frontend:
- command: strelka-frontend
- extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-frontend'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-frontend'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-strelka-frontend'].port_bindings %}
- {{ BINDING }}
{% endfor %}
{% if DOCKER.containers['so-strelka-frontend'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-frontend'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-strelka-frontend_so-status.disabled:
file.uncomment:

View File

@@ -22,10 +22,27 @@ strelka_gatekeeper:
- entrypoint: redis-server --save "" --appendonly no --maxmemory-policy allkeys-lru
- extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-gatekeeper'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-gatekeeper'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-strelka-gatekeeper'].port_bindings %}
- {{ BINDING }}
{% endfor %}
{% if DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %}
- binds:
{% for BIND in DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-gatekeeper'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-gatekeeper'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-strelka-gatekeeper_so-status.disabled:
file.uncomment:

View File

@@ -17,6 +17,11 @@ strelka_manager:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-strelka-manager:{{ GLOBALS.so_version }}
- binds:
- /opt/so/conf/strelka/manager/:/etc/strelka/:ro
{% if DOCKER.containers['so-strelka-manager'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-strelka-manager'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- name: so-strelka-manager
- networks:
- sobridge:
@@ -24,6 +29,17 @@ strelka_manager:
- command: strelka-manager
- extra_hosts:
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
{% if DOCKER.containers['so-strelka-manager'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-strelka-manager'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-strelka-manager'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-strelka-manager'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
delete_so-strelka-manager_so-status.disabled:
file.uncomment:

View File

@@ -0,0 +1,21 @@
#!/bin/bash
NOROOT=1
. /usr/sbin/so-common
{%- set proxy = salt['pillar.get']('manager:proxy') %}
# Download the rules from the internet
{%- if proxy %}
export http_proxy={{ proxy }}
export https_proxy={{ proxy }}
export no_proxy= salt['pillar.get']('manager:no_proxy')
{%- endif %}
mkdir -p /tmp/yara
cd /tmp/yara
git clone https://github.com/Security-Onion-Solutions/securityonion-yara.git
mkdir -p /nsm/rules/yara
rsync -shav --progress /tmp/yara/securityonion-yara/yara /nsm/rules/
cd /tmp
rm -rf /tmp/yara

View File

@@ -4,10 +4,12 @@ suricata:
threading:
set-cpu-affinity: "no"
cpu-affinity:
- management-cpu-set:
cpu: []
- worker-cpu-set:
cpu: []
management-cpu-set:
cpu:
- 1
worker-cpu-set:
cpu:
- 2-3
mode: exclusive
prio:
default: high
@@ -22,32 +24,61 @@ suricata:
ring-size: 5000
vars:
address-groups:
HOME_NET: "[192.168.0.0/16,10.0.0.0/8,172.16.0.0/12]"
EXTERNAL_NET: "any"
HTTP_SERVERS: "$HOME_NET"
SMTP_SERVERS: "$HOME_NET"
SQL_SERVERS: "$HOME_NET"
DNS_SERVERS: "$HOME_NET"
TELNET_SERVERS: "$HOME_NET"
AIM_SERVERS: "$EXTERNAL_NET"
DC_SERVERS: "$HOME_NET"
DNP3_SERVER: "$HOME_NET"
DNP3_CLIENT: "$HOME_NET"
MODBUS_CLIENT: "$HOME_NET"
MODBUS_SERVER: "$HOME_NET"
ENIP_CLIENT: "$HOME_NET"
ENIP_SERVER: "$HOME_NET"
HOME_NET:
- 192.168.0.0/16
- 10.0.0.0/8
- 172.16.0.0/12
EXTERNAL_NET:
- any
HTTP_SERVERS:
- $HOME_NET
SMTP_SERVERS:
- $HOME_NET
SQL_SERVERS:
- $HOME_NET
DNS_SERVERS:
- $HOME_NET
TELNET_SERVERS:
- $HOME_NET
AIM_SERVERS:
- $EXTERNAL_NET
DC_SERVERS:
- $HOME_NET
DNP3_SERVER:
- $HOME_NET
DNP3_CLIENT:
- $HOME_NET
MODBUS_CLIENT:
- $HOME_NET
MODBUS_SERVER:
- $HOME_NET
ENIP_CLIENT:
- $HOME_NET
ENIP_SERVER:
- $HOME_NET
port-groups:
HTTP_PORTS: "80"
SHELLCODE_PORTS: "!80"
ORACLE_PORTS: "1521"
SSH_PORTS: "22"
DNP3_PORTS: "20000"
MODBUS_PORTS: "502"
FILE_DATA_PORTS: "[$HTTP_PORTS,110,143]"
FTP_PORTS: "21"
VXLAN_PORTS: "4789"
TEREDO_PORTS: "3544"
HTTP_PORTS:
- 80
SHELLCODE_PORTS:
- "!80"
ORACLE_PORTS:
- 1521
SSH_PORTS:
- 22
DNP3_PORTS:
- 20000
MODBUS_PORTS:
- 502
FILE_DATA_PORTS:
- $HTTP_PORTS
- 110
- 143
FTP_PORTS:
- 21
VXLAN_PORTS:
- 4789
TEREDO_PORTS:
- 3544
default-log-dir: /var/log/suricata/
stats:
enabled: "yes"
@@ -66,7 +97,7 @@ suricata:
community-id: true
community-id-seed: 0
types:
- alert:
alert:
payload: "no"
payload-buffer-size: 4kb
payload-printable: "yes"

View File

@@ -6,6 +6,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- suricata.config
@@ -17,6 +19,11 @@ so-suricata:
- privileged: True
- environment:
- INTERFACE={{ GLOBALS.sensor.interface }}
{% if DOCKER.containers['so-suricata'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-suricata'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- binds:
- /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro
- /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro
@@ -25,7 +32,18 @@ so-suricata:
- /nsm/suricata/:/nsm/:rw
- /nsm/suricata/extracted:/var/log/suricata//filestore:rw
- /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro
{% if DOCKER.containers['so-suricata'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-suricata'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- network_mode: host
{% if DOCKER.containers['so-suricata'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-suricata'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- watch:
- file: suriconfig
- file: surithresholding

View File

@@ -23,6 +23,45 @@
{% do SURICATAMERGED.config.pop('af-packet') %}
{% do SURICATAMERGED.config.update({'af-packet': afpacket}) %}
{# eve-log.types is a list but we convert to dict in defaults to work with ui #}
{# below they are converted back to lists #}
{% load_yaml as evelogtypes %}
{% for le, ld in SURICATAMERGED.config.outputs['eve-log'].types.items() %}
- {{ le }}: {{ ld }}
{% endfor %}
{% endload %}
{% do SURICATAMERGED.config.outputs['eve-log'].pop('types') %}
{% do SURICATAMERGED.config.outputs['eve-log'].update({'types': evelogtypes}) %}
{# threading.cpu-affinity is a list but we convert to dict in defaults to work with ui #}
{# below they are converted back to lists #}
{% load_yaml as cpuaffinity %}
{% for le, ld in SURICATAMERGED.config.threading['cpu-affinity'].items() %}
- {{ le }}: {{ ld }}
{% endfor %}
{% endload %}
{% do SURICATAMERGED.config.threading.pop('cpu-affinity') %}
{% do SURICATAMERGED.config.threading.update({'cpu-affinity': cpuaffinity}) %}
{# Find the index of eve-log and file-store in suricata_mdengine.suricata.config.outputs #}
{# update outputs eve-log.types and filestore with config for Suricata metadata engine #}
{% if GLOBALS.md_engine == 'SURICATA' %}
{% for li in suricata_mdengine.suricata.config.outputs %}
{% if 'eve-log' in li.keys() %}
{% do surimeta_evelog_index.append(loop.index0) %}
{% endif %}
{% if 'file-store' in li.keys() %}
{% do surimeta_filestore_index.append(loop.index0) %}
{% endif %}
{% endfor %}
{% set surimeta_evelog_index = surimeta_evelog_index[0] %}
{% set surimeta_filestore_index = surimeta_filestore_index[0] %}
{% do SURICATAMERGED.config.outputs['eve-log'].types.extend(suricata_mdengine.suricata.config.outputs[surimeta_evelog_index]['eve-log'].types) %}
{% do SURICATAMERGED.config.outputs['file-store'].update({'enabled':suricata_mdengine.suricata.config.outputs[surimeta_filestore_index]['file-store']['enabled']}) %}
{% endif %}
{# outputs is a list but we convert to dict in defaults to work with ui #}
{# below they are converted back to lists #}
{% load_yaml as outputs %}
{% for le, ld in SURICATAMERGED.config.outputs.items() %}
- {{ le }}: {{ ld }}
@@ -31,31 +70,22 @@
{% do SURICATAMERGED.config.pop('outputs') %}
{% do SURICATAMERGED.config.update({'outputs': outputs}) %}
{# Find the index of eve-log so it can be updated later #}
{% for li in SURICATAMERGED.config.outputs %}
{% if 'eve-log' in li.keys() %}
{% do default_evelog_index.append(loop.index0) %}
{% endif %}
{% if 'file-store' in li.keys() %}
{% do default_filestore_index.append(loop.index0) %}
{% endif %}
{% endfor %}
{% set default_evelog_index = default_evelog_index[0] %}
{% set default_filestore_index = default_filestore_index[0] %}
{# Find the index of eve-log so it can be grabbed later #}
{% for li in suricata_mdengine.suricata.config.outputs %}
{% if 'eve-log' in li.keys() %}
{% do surimeta_evelog_index.append(loop.index0) %}
{% endif %}
{% if 'file-store' in li.keys() %}
{% do surimeta_filestore_index.append(loop.index0) %}
{% endif %}
{% endfor %}
{% set surimeta_evelog_index = surimeta_evelog_index[0] %}
{% set surimeta_filestore_index = surimeta_filestore_index[0] %}
{% if GLOBALS.md_engine == 'SURICATA' %}
{% do SURICATAMERGED.config.outputs[default_evelog_index]['eve-log'].types.extend(suricata_mdengine.suricata.config.outputs[surimeta_evelog_index]['eve-log'].types) %}
{% do SURICATAMERGED.config.outputs[default_filestore_index]['file-store'].update({'enabled':suricata_mdengine.suricata.config.outputs[surimeta_filestore_index]['file-store']['enabled']}) %}
{# change address-groups vars from list to comma seperated string #}
{% for k, v in SURICATAMERGED.config.vars['address-groups'].items() %}
{# if address-group value is a list #}
{% if v is iterable and (v is not string and v is not mapping and v | length > 1) %}
{% do SURICATAMERGED.config.vars['address-groups'].update({k: '[' ~ v | join(',') ~ ']'}) %}
{% else %}
{% do SURICATAMERGED.config.vars['address-groups'].update({k: v[0]}) %}
{% endif %}
{% endfor %}
{# change port-groups vars from list to comma seperated string #}
{% for k, v in SURICATAMERGED.config.vars['port-groups'].items() %}
{# if address-group value is a list #}
{% if v is iterable and (v is not string and v is not mapping and v | length > 1) %}
{% do SURICATAMERGED.config.vars['port-groups'].update({k: '[' ~ v | join(',') ~ ']'}) %}
{% else %}
{% do SURICATAMERGED.config.vars['port-groups'].update({k: v[0]}) %}
{% endif %}
{% endfor %}

View File

@@ -12,10 +12,54 @@ suricata:
title: SIDS
helpLink: suricata.html
config:
af-packet:
interface:
description: The network interface that Suricata will monitor.
helpLink: suricata.html
cluster-id:
advanced: True
cluster-type:
advanced: True
regex: ^(cluster_flow|cluster_qm)$
defrag:
advanced: True
regex: ^(yes|no)$
use-mmap:
advanced: True
readonly: True
threads:
description: The amount of worker threads.
helpLink: suricata.html
forcedType: int
tpacket-v3:
advanced: True
readonly: True
ring-size:
description: Buffer size for packets per thread.
forcedType: int
helpLink: suricata.html
threading:
set-cpu-affinity:
description: Bind(yes) or unbind(no) management and worker threads to a core or range of cores.
regex: ^(yes|no)$
helpLink: suricata.html
cpu-affinity:
management-cpu-set:
cpu:
description: Bind management threads to a core or range of cores. This can be a sigle core, list of cores, or list of range of cores. set-cpu-affinity must be set to 'yes' for this to be used.
forcedType: "[]string"
helpLink: suricata.html
worker-cpu-set:
cpu:
description: Bind worker threads to a core or range of cores. This can be a sigle core, list of cores, or list of range of cores. set-cpu-affinity must be set to 'yes' for this to be used.
forcedType: "[]string"
helpLink: suricata.html
vars:
address-groups:
HOME_NET:
description: List of hosts or networks.
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
helpLink: suricata.html
EXTERNAL_NET:
description: List of hosts or networks.
@@ -92,6 +136,8 @@ suricata:
helpLink: suricata.html
outputs:
eve-log:
types:
alert:
xff:
enabled:
description: Enable X-Forward-For support.

View File

@@ -6,6 +6,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- telegraf.config
@@ -22,6 +24,11 @@ so-telegraf:
- HOST_SYS=/host/sys
- HOST_MOUNT_PREFIX=/host
- GODEBUG=x509ignoreCN=0
{% if DOCKER.containers['so-telegraf'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-telegraf'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- network_mode: host
- init: True
- binds:
@@ -47,6 +54,17 @@ so-telegraf:
- /opt/so/log/suricata:/var/log/suricata:ro
- /opt/so/log/raid:/var/log/raid:ro
- /opt/so/log/sostatus:/var/log/sostatus:ro
{% if DOCKER.containers['so-telegraf'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-telegraf'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-telegraf'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-telegraf'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- watch:
- file: tgrafconf
- file: tgrafsyncscripts

View File

@@ -76,7 +76,10 @@ zeek:
- LogAscii::use_json = T;
- CaptureLoss::watch_interval = 5 mins;
networks:
HOME_NET: 192.168.0.0/16,10.0.0.0/8,172.16.0.0/12
HOME_NET:
- 192.168.0.0/16
- 10.0.0.0/8
- 172.16.0.0/12
file_extraction:
- application/x-dosexec: exe
- application/pdf: pdf

View File

@@ -6,6 +6,8 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
include:
- zeek.config
@@ -32,7 +34,24 @@ so-zeek:
- /opt/so/conf/zeek/policy/cve-2020-0601:/opt/zeek/share/zeek/policy/cve-2020-0601:ro
- /opt/so/conf/zeek/policy/intel:/opt/zeek/share/zeek/policy/intel:rw
- /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro
{% if DOCKER.containers['so-zeek'].custom_bind_mounts %}
{% for BIND in DOCKER.containers['so-zeek'].custom_bind_mounts %}
- {{ BIND }}
{% endfor %}
{% endif %}
- network_mode: host
{% if DOCKER.containers['so-zeek'].extra_hosts %}
- extra_hosts:
{% for XTRAHOST in DOCKER.containers['so-zeek'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
{% if DOCKER.containers['so-zeek'].extra_env %}
- environment:
{% for XTRAENV in DOCKER.containers['so-zeek'].extra_env %}
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch:
- file: /opt/so/conf/zeek/local.zeek
- file: /opt/so/conf/zeek/node.cfg

View File

@@ -1,5 +1,5 @@
{%- if NETWORKS.HOME_NET %}
{%- for HN in NETWORKS.HOME_NET.split(',') %}
{%- for HN in NETWORKS.HOME_NET %}
{{ HN }}
{%- endfor %}
{%- endif %}

View File

@@ -21,6 +21,15 @@ zeek:
forcedType: "[]string"
advanced: True
helpLink: zeek.html
networks:
HOME_NET:
description: List of IP or CIDR blocks to define as the HOME_NET.
forcedType: "[]string"
advanced: False
helpLink: zeek.html
multiline: True
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
regexFailureMessage: You must enter a valid IP address or CIDR.
node:
lb_procs:
description: Contains the number of CPU cores or workers used by Zeek. This setting should only be applied to individual nodes and will be ignored if CPU affinity is enabled.

View File

@@ -978,8 +978,8 @@ download_elastic_agent_artifacts() {
logCmd "tar -xf /nsm/elastic-fleet/artifacts/beats/elastic-agent_SO-$SOVERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/"
else
logCmd "mkdir -p /nsm/elastic-fleet/artifacts/beats/elastic-agent/"
logCmd "curl --retry 5 --retry-delay 60 https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.tar.gz --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz"
logCmd "curl --retry 5 --retry-delay 60 https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.md5 --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.md5"
retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.tar.gz --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz" "" ""
retry 15 10 "curl --fail --retry 5 --retry-delay 15 -L https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.md5 --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.md5" "" ""
SOURCEHASH=$(md5sum /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz | awk '{ print $1 }')
HASH=$(cat /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.md5)
@@ -1542,15 +1542,9 @@ create_strelka_pillar() {
"strelka:"\
" enabled: $STRELKA"\
" rules: 1" > "$strelka_pillar_file"
if [[ $is_airgap ]]; then
printf '%s\n'\
" repos:"\
" - 'https://$HOSTNAME/repo/rules/strelka'" >> "$strelka_pillar_file"
else
printf '%s\n'\
" repos:"\
" - 'https://github.com/Neo23x0/signature-base'" >> "$strelka_pillar_file"
fi
" - 'https://$HOSTNAME:7788/yara'" >> "$strelka_pillar_file"
}
backup_pillar() {

View File

@@ -644,6 +644,16 @@ if ! [[ -f $install_opt_file ]]; then
logCmd "salt-call state.apply -l info manager"
logCmd "salt-call state.apply influxdb -l info"
logCmd "salt-call state.highstate -l info"
if [[ ! $is_airgap ]]; then
title "Downloading IDS Rules"
logCmd "so-rule-update"
title "Restarting Suricata to pick up the new rules"
logCmd "so-suricata-restart"
title "Downloading YARA rules"
logCmd "runuser -l socore 'so-yara-update'"
title "Restarting Strelka to use new rules"
logCmd "so-strelka-restart"
fi
title "Setting up Kibana Default Space"
logCmd "so-kibana-space-defaults"
add_web_user

View File

@@ -44,7 +44,8 @@ log_has_errors() {
grep -vE "Exception in callback None" | \
grep -vE "deprecation: ERROR" | \
grep -vE "code: 100" | \
grep -vE "/nsm/repo/rules/sigma/rules*" | \
grep -vE "/nsm/rules/sigma*" | \
grep -vE "/nsm/rules/yara*" | \
grep -vE "Running scope as unit" &> "$error_log"
if [[ $? -eq 0 ]]; then