diff --git a/pillar/firewall/osquery_endpoint.sls b/pillar/firewall/osquery_endpoint.sls
new file mode 100644
index 000000000..cfc6051b8
--- /dev/null
+++ b/pillar/firewall/osquery_endpoint.sls
@@ -0,0 +1,3 @@
+osquery_endpoint:
+ - 127.0.0.1
+
diff --git a/pillar/top.sls b/pillar/top.sls
index b9117b19c..bc68aa644 100644
--- a/pillar/top.sls
+++ b/pillar/top.sls
@@ -10,6 +10,7 @@ base:
- static
- firewall.*
- data.*
+ - auth
'G@role:so-eval':
- masters.{{ grains.host }}
@@ -17,6 +18,7 @@ base:
- firewall.*
- data.*
- brologs
+ - auth
'G@role:so-node':
- nodes.{{ grains.host }}
diff --git a/salt/ca/files/signing_policies.conf b/salt/ca/files/signing_policies.conf
index 04724ef70..a6ecdd4c3 100644
--- a/salt/ca/files/signing_policies.conf
+++ b/salt/ca/files/signing_policies.conf
@@ -51,3 +51,16 @@ x509_signing_policies:
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3000
- copypath: /etc/pki/issued_certs/
+ fleet:
+ - minions: '*'
+ - signing_private_key: /etc/pki/ca.key
+ - signing_cert: /etc/pki/ca.crt
+ - C: US
+ - ST: Utah
+ - L: Salt Lake City
+ - basicConstraints: "critical CA:false"
+ - keyUsage: "critical keyEncipherment"
+ - subjectKeyIdentifier: hash
+ - authorityKeyIdentifier: keyid,issuer:always
+ - days_valid: 3000
+ - copypath: /etc/pki/issued_certs/
diff --git a/salt/common/grafana/etc/grafana.ini b/salt/common/grafana/etc/grafana.ini
index adbbeb316..0327b8bc5 100644
--- a/salt/common/grafana/etc/grafana.ini
+++ b/salt/common/grafana/etc/grafana.ini
@@ -46,7 +46,7 @@
# The full public facing url you use in browser, used for redirects and emails
# If you use reverse proxy and sub path specify full url (with sub path)
-;root_url = http://localhost:3000
+root_url = %(protocol)s://%(domain)s/grafana/
# Log web requests
;router_logging = false
diff --git a/salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json b/salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json
index af541d0ec..83a1fc9e6 100644
--- a/salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json
+++ b/salt/common/grafana/grafana_dashboards/forward_nodes/sensor.json
@@ -1272,7 +1272,7 @@
"thresholds": "259200,432000",
"title": "{{ SERVERNAME }} - PCAP Retention",
"type": "singlestat",
- "valueFontSize": "80%",
+ "valueFontSize": "70%",
"valueMaps": [
{
"op": "=",
@@ -1280,7 +1280,8 @@
"value": "null"
}
],
- "valueName": "current"
+ "valueName": "current",
+ "decimals": 1
},
{
"cacheTimeout": null,
diff --git a/salt/common/init.sls b/salt/common/init.sls
index f0b4fd8fa..00d7f35d0 100644
--- a/salt/common/init.sls
+++ b/salt/common/init.sls
@@ -102,7 +102,7 @@ nginxtmp:
# Start the core docker
so-core:
docker_container.running:
- - image: soshybridhunter/so-core:HH1.0.3
+ - image: soshybridhunter/so-core:HH1.0.5
- hostname: so-core
- user: socore
- binds:
@@ -372,6 +372,7 @@ so-grafana:
- user: socore
- binds:
- /nsm/grafana:/var/lib/grafana:rw
+ - /opt/so/conf/grafana/etc/grafana.ini:/etc/grafana/grafana.ini:ro
- /opt/so/conf/grafana/etc/datasources:/etc/grafana/provisioning/datasources:rw
- /opt/so/conf/grafana/etc/dashboards:/etc/grafana/provisioning/dashboards:rw
- /opt/so/conf/grafana/grafana_dashboards:/etc/grafana/grafana_dashboards:rw
diff --git a/salt/common/nginx/nginx.conf.so-eval b/salt/common/nginx/nginx.conf.so-eval
index bc8dbf39b..50f48497d 100644
--- a/salt/common/nginx/nginx.conf.so-eval
+++ b/salt/common/nginx/nginx.conf.so-eval
@@ -87,7 +87,20 @@ http {
# try_files $uri $uri.html /index.html;
# }
- location / {
+ location /grafana/ {
+ rewrite /grafana/(.*) /$1 break;
+ proxy_pass http://{{ masterip }}:3000/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
+ location /kibana/ {
+ rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
@@ -98,6 +111,31 @@ http {
}
+ location /api/ {
+ proxy_pass https://{{ masterip }}:8080/api/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "Upgrade";
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
+ location /fleet/ {
+ rewrite /fleet/(.*) /$1 break;
+ proxy_pass https://{{ masterip }}:8080/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
error_page 404 /404.html;
location = /40x.html {
}
diff --git a/salt/common/nginx/nginx.conf.so-master b/salt/common/nginx/nginx.conf.so-master
index bc8dbf39b..50f48497d 100644
--- a/salt/common/nginx/nginx.conf.so-master
+++ b/salt/common/nginx/nginx.conf.so-master
@@ -87,7 +87,20 @@ http {
# try_files $uri $uri.html /index.html;
# }
- location / {
+ location /grafana/ {
+ rewrite /grafana/(.*) /$1 break;
+ proxy_pass http://{{ masterip }}:3000/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
+ location /kibana/ {
+ rewrite /kibana/(.*) /$1 break;
proxy_pass http://{{ masterip }}:5601/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
@@ -98,6 +111,31 @@ http {
}
+ location /api/ {
+ proxy_pass https://{{ masterip }}:8080/api/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "Upgrade";
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
+ location /fleet/ {
+ rewrite /fleet/(.*) /$1 break;
+ proxy_pass https://{{ masterip }}:8080/;
+ proxy_read_timeout 90;
+ proxy_connect_timeout 90;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Proxy "";
+
+ }
+
error_page 404 /404.html;
location = /40x.html {
}
diff --git a/salt/common/nginx/nginx.conf.so-SENSOR b/salt/common/nginx/nginx.conf.so-sensor
similarity index 100%
rename from salt/common/nginx/nginx.conf.so-SENSOR
rename to salt/common/nginx/nginx.conf.so-sensor
diff --git a/salt/common/telegraf/etc/telegraf.conf b/salt/common/telegraf/etc/telegraf.conf
index 3d1bf30dc..cf12f89bf 100644
--- a/salt/common/telegraf/etc/telegraf.conf
+++ b/salt/common/telegraf/etc/telegraf.conf
@@ -28,7 +28,7 @@
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
- interval = "10s"
+ interval = "30s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
@@ -620,10 +620,11 @@
{% if grains['role'] == 'so-master' %}
[[inputs.exec]]
commands = [
- "/scripts/redis.sh"
+ "/scripts/redis.sh",
+ "/scripts/influxdbsize.sh"
]
data_format = "influx"
-{% elif grains['role'] == 'so-SENSOR' %}
+{% elif grains['role'] == 'so-sensor' %}
[[inputs.exec]]
commands = [
"/scripts/stenoloss.sh",
@@ -642,7 +643,8 @@
"/scripts/suriloss.sh",
"/scripts/checkfiles.sh",
"/scripts/broloss.sh",
- "/scripts/oldpcap.sh"
+ "/scripts/oldpcap.sh",
+ "/scripts/influxdbsize.sh"
]
data_format = "influx"
{% endif %}
diff --git a/salt/common/telegraf/scripts/influxdbsize.sh b/salt/common/telegraf/scripts/influxdbsize.sh
new file mode 100644
index 000000000..a469da8ae
--- /dev/null
+++ b/salt/common/telegraf/scripts/influxdbsize.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+INFLUXSIZE=$(du -s -B1 /host/nsm/influxdb | awk {'print $1'}
+
+echo "influxsize bytes=$INFLUXSIZE"
diff --git a/salt/elasticsearch/files/curator/action/close.yml b/salt/curator/files/action/close.yml
similarity index 65%
rename from salt/elasticsearch/files/curator/action/close.yml
rename to salt/curator/files/action/close.yml
index a9ca54335..ab4d07050 100644
--- a/salt/elasticsearch/files/curator/action/close.yml
+++ b/salt/curator/files/action/close.yml
@@ -1,3 +1,13 @@
+{% if grains['role'] == 'so-node' %}
+
+{%- set cur_close_days = salt['pillar.get']('node:cur_close_days', '') -%}
+
+{% elif grains['role'] == 'so-eval' %}
+
+{%- set cur_close_days = salt['pillar.get']('master:cur_close_days', '') -%}
+
+{%- endif %}
+
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
@@ -9,7 +19,7 @@ actions:
1:
action: close
description: >-
- Close indices older than 2 days (based on index name), for logstash-
+ Close indices older than {{cur_close_days}} days (based on index name), for logstash-
prefixed indices.
options:
delete_aliases: False
@@ -26,5 +36,5 @@ actions:
direction: older
timestring: '%Y.%m.%d'
unit: days
- unit_count: 2
+ unit_count: {{cur_close_days}}
exclude:
diff --git a/salt/elasticsearch/files/curator/action/delete.yml b/salt/curator/files/action/delete.yml
similarity index 62%
rename from salt/elasticsearch/files/curator/action/delete.yml
rename to salt/curator/files/action/delete.yml
index cb55ec0c3..1b3440f8b 100644
--- a/salt/elasticsearch/files/curator/action/delete.yml
+++ b/salt/curator/files/action/delete.yml
@@ -1,3 +1,12 @@
+{% if grains['role'] == 'so-node' %}
+
+{%- set log_size_limit = salt['pillar.get']('node:log_size_limit', '') -%}
+
+{% elif grains['role'] == 'so-eval' %}
+
+{%- set log_size_limit = salt['pillar.get']('master:log_size_limit', '') -%}
+
+{%- endif %}
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
@@ -9,7 +18,7 @@ actions:
1:
action: delete_indices
description: >-
- Delete indices when $disk_space value (in GB) is exceeded.
+ Delete indices when {{log_size_limit}}(GB) is exceeded.
options:
ignore_empty_list: True
disable_action: False
@@ -20,4 +29,4 @@ actions:
- filtertype: space
source: creation_date
use_age: True
- disk_space: 43
+ disk_space: {{log_size_limit}}
diff --git a/salt/curator/files/bin/so-curator-close b/salt/curator/files/bin/so-curator-close
new file mode 100644
index 000000000..dff6bbb39
--- /dev/null
+++ b/salt/curator/files/bin/so-curator-close
@@ -0,0 +1,2 @@
+#!/bin/bash
+/usr/sbin/so-curator-closed-delete > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/close.yml > /dev/null 2>&1
diff --git a/salt/curator/files/bin/so-curator-closed-delete b/salt/curator/files/bin/so-curator-closed-delete
new file mode 100755
index 000000000..4382a721d
--- /dev/null
+++ b/salt/curator/files/bin/so-curator-closed-delete
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+#. /usr/sbin/so-elastic-common
+#. /etc/nsm/securityonion.conf
+
+# If logrotate script doesn't already exist, create it
+#FILE="/etc/logrotate.d/so-curator-closed-delete"
+#if ! [ -f ${FILE} ]; then
+# cat << EOF > ${FILE}
+#/var/log/nsm/so-curator-closed-delete.log {
+# daily
+# rotate 7
+# copytruncate
+# compress
+# missingok
+# notifempty
+#}
+#EOF
+#fi
+
+# Avoid starting multiple instances
+if pgrep -f "so-curator-closed-delete-delete" >/dev/null; then
+ echo "Script is already running."
+else
+ /usr/sbin/so-curator-closed-delete-delete
+fi
diff --git a/salt/curator/files/bin/so-curator-closed-delete-delete b/salt/curator/files/bin/so-curator-closed-delete-delete
new file mode 100755
index 000000000..8841c843f
--- /dev/null
+++ b/salt/curator/files/bin/so-curator-closed-delete-delete
@@ -0,0 +1,58 @@
+
+{% if grains['role'] == 'so-node' %}
+
+{%- set ELASTICSEARCH_HOST = salt['pillar.get']('node:mainip', '') -%}
+{%- set ELASTICSEARCH_PORT = salt['pillar.get']('node:es_port', '') -%}
+{%- set LOG_SIZE_LIMIT = salt['pillar.get']('node:log_size_limit', '') -%}
+
+{% elif grains['role'] == 'so-eval' %}
+
+{%- set ELASTICSEARCH_HOST = salt['pillar.get']('master:mainip', '') -%}
+{%- set ELASTICSEARCH_PORT = salt['pillar.get']('master:es_port', '') -%}
+{%- set LOG_SIZE_LIMIT = salt['pillar.get']('master:log_size_limit', '') -%}
+
+{%- endif %}
+
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+#. /usr/sbin/so-elastic-common
+#. /etc/nsm/securityonion.conf
+
+LOG="/opt/so/log/curator/so-curator-closed-delete.log"
+
+# Check for 2 conditions:
+# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
+# 2. Are there any closed logstash- indices that we can delete?
+# If both conditions are true, keep on looping until one of the conditions is false.
+while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
+curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" > /dev/null; do
+
+ # We need to determine OLDEST_INDEX.
+ # First, get the list of closed indices that are prefixed with "logstash-".
+ # For example: logstash-ids-YYYY.MM.DD
+ # Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
+ # Finally, select the first entry in that sorted list.
+ OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep "^ close logstash-" | awk '{print $2}' | sort -t- -k3 | head -1)
+
+ # Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
+ curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
+
+ # Finally, write a log entry that says we deleted it.
+ echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
+
+done
diff --git a/salt/curator/files/bin/so-curator-delete b/salt/curator/files/bin/so-curator-delete
new file mode 100644
index 000000000..166497855
--- /dev/null
+++ b/salt/curator/files/bin/so-curator-delete
@@ -0,0 +1,2 @@
+#!/bin/bash
+docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/delete.yml > /dev/null 2>&1
diff --git a/salt/elasticsearch/files/curator/curator.yml b/salt/curator/files/curator.yml
similarity index 62%
rename from salt/elasticsearch/files/curator/curator.yml
rename to salt/curator/files/curator.yml
index dc8b69309..aa55836b1 100644
--- a/salt/elasticsearch/files/curator/curator.yml
+++ b/salt/curator/files/curator.yml
@@ -1,9 +1,19 @@
+{% if grains['role'] == 'so-node' %}
+
+{%- set elasticsearch = salt['pillar.get']('node:mainip', '') -%}
+
+{% elif grains['role'] == 'so-eval' %}
+
+{%- set elasticsearch = salt['pillar.get']('master:mainip', '') -%}
+
+{%- endif %}
+
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
client:
hosts:
- - elasticsearch
+ - {{elasticsearch}}
port: 9200
url_prefix:
use_ssl: False
diff --git a/salt/curator/init.sls b/salt/curator/init.sls
new file mode 100644
index 000000000..adbf4f6b0
--- /dev/null
+++ b/salt/curator/init.sls
@@ -0,0 +1,136 @@
+{% if grains['role'] == 'so-node' or grains['role'] == 'so-eval' %}
+# Curator
+# Create the group
+curatorgroup:
+ group.present:
+ - name: curator
+ - gid: 934
+
+# Add user
+curator:
+ user.present:
+ - uid: 934
+ - gid: 934
+ - home: /opt/so/conf/curator
+ - createhome: False
+
+# Create the log directory
+curactiondir:
+ file.directory:
+ - name: /opt/so/conf/curator/action
+ - user: 934
+ - group: 939
+ - makedirs: True
+
+curlogdir:
+ file.directory:
+ - name: /opt/so/log/curator
+ - user: 934
+ - group: 939
+
+curcloseconf:
+ file.managed:
+ - name: /opt/so/conf/curator/action/close.yml
+ - source: salt://curator/files/action/close.yml
+ - user: 934
+ - group: 939
+ - template: jinja
+
+curdelconf:
+ file.managed:
+ - name: /opt/so/conf/curator/action/delete.yml
+ - source: salt://curator/files/action/delete.yml
+ - user: 934
+ - group: 939
+ - template: jinja
+
+curconf:
+ file.managed:
+ - name: /opt/so/conf/curator/curator.yml
+ - source: salt://curator/files/curator.yml
+ - user: 934
+ - group: 939
+ - template: jinja
+
+curcloseddel:
+ file.managed:
+ - name: /usr/sbin/so-curator-closed-delete
+ - source: salt://curator/files/bin/so-curator-closed-delete
+ - user: 934
+ - group: 939
+ - mode: 755
+
+curcloseddeldel:
+ file.managed:
+ - name: /usr/sbin/so-curator-closed-delete-delete
+ - source: salt://curator/files/bin/so-curator-closed-delete-delete
+ - user: 934
+ - group: 939
+ - mode: 755
+ - template: jinja
+
+curclose:
+ file.managed:
+ - name: /usr/sbin/so-curator-close
+ - source: salt://curator/files/bin/so-curator-close
+ - user: 934
+ - group: 939
+ - mode: 755
+
+curdel:
+ file.managed:
+ - name: /usr/sbin/so-curator-delete
+ - source: salt://curator/files/bin/so-curator-delete
+ - user: 934
+ - group: 939
+ - mode: 755
+
+/usr/sbin/so-curator-closed-delete:
+ cron.present:
+ - user: root
+ - minute: '*'
+ - hour: '*'
+ - daymonth: '*'
+ - month: '*'
+ - dayweek: '*'
+
+/usr/sbin/so-curator-close:
+ cron.present:
+ - user: root
+ - minute: '*'
+ - hour: '*'
+ - daymonth: '*'
+ - month: '*'
+ - dayweek: '*'
+
+/usr/sbin/so-curator-delete:
+ cron.present:
+ - user: root
+ - minute: '*'
+ - hour: '*'
+ - daymonth: '*'
+ - month: '*'
+ - dayweek: '*'
+
+
+so-curator:
+ docker_container.running:
+ - image: soshybridhunter/so-curator:HH1.0.3
+ - hostname: curator
+ - name: so-curator
+ - user: curator
+ - interactive: True
+ - tty: True
+ - binds:
+ - /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
+ - /opt/so/conf/curator/action/:/etc/curator/action:ro
+ - /opt/so/log/curator:/var/log/curator:rw
+# Begin Curator Cron Jobs
+
+# Close
+# Delete
+# Hot Warm
+# Segment Merge
+
+# End Curator Cron Jobs
+{% endif %}
diff --git a/salt/elastalert/init.sls b/salt/elastalert/init.sls
new file mode 100644
index 000000000..28722fe01
--- /dev/null
+++ b/salt/elastalert/init.sls
@@ -0,0 +1,101 @@
+# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+{% if grains['role'] == 'so-master' %}
+
+{% set esalert = salt['pillar.get']('master:elastalert', '1') %}
+{% set esip = salt['pillar.get']('master:mainip', '') %}
+{% set esport = salt['pillar.get']('master:es_port', '') %}
+
+
+{% elif grains['role'] == 'so-eval' %}
+
+{% set esalert = salt['pillar.get']('master:elastalert', '1') %}
+{% set esip = salt['pillar.get']('master:mainip', '') %}
+{% set esport = salt['pillar.get']('master:es_port', '') %}
+
+
+{% elif grains['role'] == 'so-node' %}
+
+{% set esalert = salt['pillar.get']('node:elastalert', '0') %}
+
+{% endif %}
+
+# Elastalert
+{% if esalert == 1 %}
+
+# Create the group
+elastagroup:
+ group.present:
+ - name: elastalert
+ - gid: 933
+
+# Add user
+elastalert:
+ user.present:
+ - uid: 933
+ - gid: 933
+ - home: /opt/so/conf/elastalert
+ - createhome: False
+
+elastalogdir:
+ file.directory:
+ - name: /opt/so/log/elastalert
+ - user: 933
+ - group: 939
+ - makedirs: True
+
+elastarules:
+ file.directory:
+ - name: /opt/so/rules/elastalert
+ - user: 933
+ - group: 939
+ - makedirs: True
+
+#elastaconfdir:
+# file.directory:
+# - name: /opt/so/conf/elastalert
+# - user: 933
+# - group: 939
+# - makedirs: True
+
+#elastaconf:
+# file.managed:
+# - name: /opt/so/conf/elastalert/config.yaml
+# - source: salt://elastalert/files/config.yaml
+# - user: 933
+# - group: 939
+# - template: jinja
+
+so-elastalert:
+ docker_container.running:
+ - image: soshybridhunter/so-elastalert:HH1.0.3
+ - hostname: elastalert
+ - name: so-elastalert
+ - user: elastalert
+ - detach: True
+ - binds:
+# - /opt/so/conf/elastalert/config.yaml:/etc/elastalert/conf/elastalert_config.yaml:ro
+ - /opt/so/rules/elastalert:/etc/elastalert/rules/:ro
+ - /opt/so/log/elastalert:/var/log/elastalert:rw
+ - environment:
+ - ELASTICSEARCH_HOST: {{ esip }}
+ - ELASTICSEARCH_PORT: {{ esport }}
+ - ELASTALERT_CONFIG: /etc/elastalert/conf/elastalert_config.yaml
+ - ELASTALERT_SUPERVISOR_CONF: /etc/elastalert/conf/elastalert_supervisord.conf
+ - RULES_DIRECTORY: /etc/elastalert/rules/
+ - LOG_DIR: /var/log/elastalert
+
+{% endif %}
diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls
index 75cc7e5f7..27bc883f4 100644
--- a/salt/elasticsearch/init.sls
+++ b/salt/elasticsearch/init.sls
@@ -18,7 +18,6 @@
{% set esheap = salt['pillar.get']('master:esheap', '') %}
{% set freq = salt['pillar.get']('master:freq', '0') %}
{% set dstats = salt['pillar.get']('master:dstats', '0') %}
-{% set esalert = salt['pillar.get']('master:elastalert', '1') %}
{% elif grains['role'] == 'so-eval' %}
@@ -26,7 +25,6 @@
{% set esheap = salt['pillar.get']('master:esheap', '') %}
{% set freq = salt['pillar.get']('master:freq', '0') %}
{% set dstats = salt['pillar.get']('master:dstats', '0') %}
-{% set esalert = salt['pillar.get']('master:elastalert', '1') %}
{% elif grains['role'] == 'so-node' %}
@@ -34,7 +32,6 @@
{% set esheap = salt['pillar.get']('node:esheap', '') %}
{% set freq = salt['pillar.get']('node:freq', '0') %}
{% set dstats = salt['pillar.get']('node:dstats', '0') %}
-{% set esalert = salt['pillar.get']('node:elastalert', '1') %}
{% endif %}
@@ -150,6 +147,7 @@ so-freq:
docker_container.running:
- image: soshybridhunter/so-freqserver:HH1.0.3
- hostname: freqserver
+ - name: so-freqserver
- user: freqserver
- binds:
- /opt/so/log/freq_server:/var/log/freq_server:rw
@@ -185,137 +183,10 @@ so-domainstats:
docker_container.running:
- image: soshybridhunter/so-domainstats:HH1.0.3
- hostname: domainstats
- - name: domainstats
+ - name: so-domainstats
- user: domainstats
- binds:
- /opt/so/log/domainstats:/var/log/domain_stats
{% endif %}
-
-# Curator
-# Create the group
-curatorgroup:
- group.present:
- - name: curator
- - gid: 934
-
-# Add user
-curator:
- user.present:
- - uid: 934
- - gid: 934
- - home: /opt/so/conf/curator
- - createhome: False
-
-# Create the log directory
-curactiondir:
- file.directory:
- - name: /opt/so/conf/curator/action
- - user: 934
- - group: 939
- - makedirs: True
-
-curlogdir:
- file.directory:
- - name: /opt/so/log/curator
- - user: 934
- - group: 939
-
-curclose:
- file.managed:
- - name: /opt/so/conf/curator/action/close.yml
- - source: salt://elasticsearch/files/curator/action/close.yml
- - user: 934
- - group: 939
- - template: jinja
-
-curdel:
- file.managed:
- - name: /opt/so/conf/curator/action/delete.yml
- - source: salt://elasticsearch/files/curator/action/delete.yml
- - user: 934
- - group: 939
- - template: jinja
-
-curconf:
- file.managed:
- - name: /opt/so/conf/curator/curator.yml
- - source: salt://elasticsearch/files/curator/curator.yml
- - user: 934
- - group: 939
- - template: jinja
-
-so-curator:
- docker_container.running:
- - image: soshybridhunter/so-curator:HH1.0.3
- - hostname: curator
- - name: curator
- - user: curator
- - interactive: True
- - tty: True
- - binds:
- - /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
- - /opt/so/conf/curator/action/:/etc/curator/action:ro
- - /opt/so/log/curator:/var/log/curator:rw
-
-
-# Begin Curator Cron Jobs
-
-# Close
-# Delete
-# Hot Warm
-# Segment Merge
-
-# End Curator Cron Jobs
-
-# Elastalert
-{% if esalert == 1 %}
-
-# Create the group
-elastagroup:
- group.present:
- - name: elastalert
- - gid: 933
-
-# Add user
-elastalert:
- user.present:
- - uid: 933
- - gid: 933
- - home: /opt/so/conf/elastalert
- - createhome: False
-
-elastalogdir:
- file.directory:
- - name: /opt/so/log/elastalert
- - user: 933
- - group: 939
- - makedirs: True
-
-elastarules:
- file.directory:
- - name: /opt/so/rules/elastalert
- - user: 933
- - group: 939
- - makedirs: True
-
-elastaconf:
- file.directory:
- - name: /opt/so/conf/elastalert
- - user: 933
- - group: 939
- - makedirs: True
-
-so-elastalert:
- docker_container.running:
- - image: soshybridhunter/so-elastalert:HH1.0.3
- - hostname: elastalert
- - name: elastalert
- - user: elastalert
- - detach: True
- - binds:
- - /etc/elastalert/rules/:/etc/elastalert/rules/:ro
- - /opt/so/log/elastalert:/var/log/elastalert:rw
-
-{% endif %}
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index f8fb5acf5..b7ab91e12 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -1,6 +1,7 @@
{%- set MASTER = grains['master'] %}
{%- set HOSTNAME = salt['grains.get']('host', '') %}
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
+{%- set WAZUHENABLED = salt['pillar.get']('static:wazuh_enabled', '1') %}
name: {{ HOSTNAME }}
@@ -11,6 +12,7 @@ filebeat.modules:
# List of prospectors to fetch data.
filebeat.prospectors:
#------------------------------ Log prospector --------------------------------
+{%- if grains['role'] == 'so-sensor' or grains['role'] == "so-eval" %}
{%- if BROVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('brologs:enabled', '') %}
- type: log
@@ -35,7 +37,29 @@ filebeat.prospectors:
fields_under_root: true
clean_removed: false
close_removed: false
+{%- endif %}
+{%- if WAZUHENABLED == '1' %}
+
+ - type: log
+ paths:
+ - /wazuh/alerts/alerts.json
+ fields:
+ type: ossec
+ fields_under_root: true
+ clean_removed: false
+ close_removed: false
+
+ - type: log
+ paths:
+ - /wazuh/archives/archives.json
+ fields:
+ type: ossec_archive
+ fields_under_root: true
+ clean_removed: false
+ close_removed: false
+
+{%- endif %}
#----------------------------- Logstash output ---------------------------------
output.logstash:
@@ -51,7 +75,6 @@ output.logstash:
# Set gzip compression level.
compression_level: 3
-
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
ssl.enabled: true
@@ -75,7 +98,6 @@ output.logstash:
# Client Certificate Key
ssl.key: "/usr/share/filebeat/filebeat.key"
-
# Elasticsearch template settings
#setup.template.settings:
@@ -152,7 +174,7 @@ output.logstash:
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
-#logging.level: info
+logging.level: debug
# Enable debug output for selected components. To enable all selectors use ["*"]
# Other available selectors are "beat", "publish", "service"
diff --git a/salt/filebeat/init.sls b/salt/filebeat/init.sls
index 251274606..d3a1dfb14 100644
--- a/salt/filebeat/init.sls
+++ b/salt/filebeat/init.sls
@@ -61,8 +61,15 @@ so-filebeat:
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /nsm/bro:/nsm/bro:ro
- /opt/so/log/suricata:/suricata:ro
+ - /opt/so/wazuh/logs/alerts/:/wazuh/alerts:ro
+ - /opt/so/wazuh/logs/archives/:/wazuh/archives:ro
+{%- if grains['role'] == 'so-master' %}
+ - /etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
+ - /etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
+{%- else %}
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
+{%- endif %}
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
- watch:
- file: /opt/so/conf/filebeat/etc
diff --git a/salt/firewall/init.sls b/salt/firewall/init.sls
index 103172517..18d0c814d 100644
--- a/salt/firewall/init.sls
+++ b/salt/firewall/init.sls
@@ -1,5 +1,11 @@
# Firewall Magic for the grid
-
+{%- if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
+{%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- elif grains['role'] == 'so-node' %}
+{%- set ip = salt['pillar.get']('node:mainip', '') %}
+{%- elif grains['role'] == 'so-sensor' %}
+{%- set ip = salt['pillar.get']('sensor:mainip', '') %}
+{%- endif %}
# Keep localhost in the game
iptables_allow_localhost:
iptables.append:
@@ -86,6 +92,29 @@ enable_docker_user_established:
- match: conntrack
- ctstate: 'RELATED,ESTABLISHED'
+# Add rule(s) for Wazuh manager
+enable_wazuh_manager_1514_tcp_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: tcp
+ - source: {{ ip }}
+ - dport: 1514
+ - position: 1
+ - save: True
+
+enable_wazuh_manager_1514_udp_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: udp
+ - source: {{ ip }}
+ - dport: 1514
+ - position: 1
+ - save: True
+
# Rules if you are a Master
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
#This should be more granular
@@ -166,6 +195,17 @@ enable_masternode_influxdb_8086_{{ip}}:
- position: 1
- save: True
+enable_masternode_mysql_3306_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: tcp
+ - source: {{ ip }}
+ - dport: 3306
+ - position: 1
+ - save: True
+
{% endfor %}
# Make it so all the minions can talk to salt and update etc.
@@ -299,6 +339,22 @@ enable_standard_beats_5044_{{ip}}:
{% endfor %}
+# Allow OSQuery Endpoints to send their traffic
+{% for ip in pillar.get('osquery_endpoint') %}
+
+enable_standard_osquery_8080_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: tcp
+ - source: {{ ip }}
+ - dport: 8080
+ - position: 1
+ - save: True
+
+{% endfor %}
+
# Allow Analysts
{% for ip in pillar.get('analyst') %}
@@ -346,6 +402,17 @@ enable_standard_analyst_5601_{{ip}}:
- dport: 5601
- position: 1
- save: True
+#THIS IS TEMPORARY
+enable_standard_analyst_8080_{{ip}}:
+ iptables.insert:
+ - table: filter
+ - chain: DOCKER-USER
+ - jump: ACCEPT
+ - proto: tcp
+ - source: {{ ip }}
+ - dport: 8080
+ - position: 1
+ - save: True
{% endfor %}
diff --git a/salt/fleet/init.sls b/salt/fleet/init.sls
index abb1828c8..c5d77a7ec 100644
--- a/salt/fleet/init.sls
+++ b/salt/fleet/init.sls
@@ -1,3 +1,7 @@
+{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', 'iwonttellyou') %}
+{%- set FLEETPASS = salt['pillar.get']('auth:fleet', 'bazinga') -%}
+{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
+
# Fleet Setup
fleetcdir:
file.directory:
@@ -5,3 +9,53 @@ fleetcdir:
- user: 939
- group: 939
- makedirs: True
+
+fleetlogdir:
+ file.directory:
+ - name: /opt/so/log/fleet
+ - user: 939
+ - group: 939
+ - makedirs: True
+
+fleetdb:
+ mysql_database.present:
+ - name: fleet
+
+fleetdbuser:
+ mysql_user.present:
+ - host: 172.17.0.0/255.255.0.0
+ - password: {{ FLEETPASS }}
+ - connection_user: root
+ - connection_pass: {{ MYSQLPASS }}
+
+fleetdbpriv:
+ mysql_grants.present:
+ - grant: all privileges
+ - database: fleet.*
+ - user: fleetdbuser
+ - host: 172.17.0.0/255.255.0.0
+
+so-fleet:
+ docker_container.running:
+ - image: soshybridhunter/so-fleet:HH1.0.5
+ - hostname: so-fleet
+ - port_bindings:
+ - 0.0.0.0:8080:8080
+ - environment:
+ - KOLIDE_MYSQL_ADDRESS={{ MASTERIP }}:3306
+ - KOLIDE_MYSQL_DATABASE=fleet
+ - KOLIDE_MYSQL_USERNAME=fleetdbuser
+ - KOLIDE_MYSQL_PASSWORD={{ FLEETPASS }}
+ - KOLIDE_REDIS_ADDRESS={{ MASTERIP }}:6379
+ - KOLIDE_SERVER_CERT=/ssl/server.cert
+ - KOLIDE_SERVER_KEY=/ssl/server.key
+ - KOLIDE_LOGGING_JSON=true
+ - KOLIDE_AUTH_JWT_KEY=thisisatest
+ - KOLIDE_OSQUERY_STATUS_LOG_FILE=/var/log/osquery/status.log
+ - KOLIDE_OSQUERY_RESULT_LOG_FILE=/var/log/osquery/result.log
+ - binds:
+ - /etc/pki/fleet.key:/ssl/server.key:ro
+ - /etc/pki/fleet.crt:/ssl/server.cert:ro
+ - /opt/so/log/fleet:/var/log/osquery
+ - watch:
+ - /opt/so/conf/fleet/etc
diff --git a/salt/kibana/etc/kibana.yml b/salt/kibana/etc/kibana.yml
index c29218784..bbbfeb575 100644
--- a/salt/kibana/etc/kibana.yml
+++ b/salt/kibana/etc/kibana.yml
@@ -3,6 +3,7 @@
{%- set ES = salt['pillar.get']('master:mainip', '') -%}
server.name: kibana
server.host: "0"
+server.basePath: /kibana
elasticsearch.url: http://{{ ES }}:9200
#elasticsearch.username: elastic
#elasticsearch.password: changeme
diff --git a/salt/kibana/init.sls b/salt/kibana/init.sls
index b4b641862..4bd800297 100644
--- a/salt/kibana/init.sls
+++ b/salt/kibana/init.sls
@@ -59,7 +59,7 @@ synckibanacustom:
# Start the kibana docker
so-kibana:
docker_container.running:
- - image: soshybridhunter/so-kibana:HH1.0.3
+ - image: soshybridhunter/so-kibana:HH1.0.5
- hostname: kibana
- user: kibana
- environment:
diff --git a/salt/logstash/conf/conf.enabled.txt.so-eval b/salt/logstash/conf/conf.enabled.txt.so-eval
index e5ce9c803..71e50525f 100644
--- a/salt/logstash/conf/conf.enabled.txt.so-eval
+++ b/salt/logstash/conf/conf.enabled.txt.so-eval
@@ -12,8 +12,8 @@
/usr/share/logstash/pipeline.so/0002_input_windows_json.conf
/usr/share/logstash/pipeline.so/0003_input_syslog.conf
/usr/share/logstash/pipeline.so/0005_input_suricata.conf
+/usr/share/logstash/pipeline.dynamic/0006_input_beats.conf
/usr/share/logstash/pipeline.so/0007_input_import.conf
-/usr/share/logstash/pipeline.so/0008_input_eval.conf
/usr/share/logstash/pipeline.so/1000_preprocess_log_elapsed.conf
/usr/share/logstash/pipeline.so/1001_preprocess_syslogng.conf
/usr/share/logstash/pipeline.so/1002_preprocess_json.conf
diff --git a/salt/logstash/conf/conf.enabled.txt.so-eval.old b/salt/logstash/conf/conf.enabled.txt.so-eval.old
new file mode 100644
index 000000000..e5ce9c803
--- /dev/null
+++ b/salt/logstash/conf/conf.enabled.txt.so-eval.old
@@ -0,0 +1,109 @@
+# This is where can specify which LogStash configs get loaded.
+#
+# The custom folder on the master gets automatically synced to each logstash
+# node.
+#
+# To enable a custom configuration see the following example and uncomment:
+# /usr/share/logstash/pipeline.custom/1234_input_custom.conf
+##
+# All of the defaults are loaded.
+/usr/share/logstash/pipeline.so/0000_input_syslogng.conf
+/usr/share/logstash/pipeline.so/0001_input_json.conf
+/usr/share/logstash/pipeline.so/0002_input_windows_json.conf
+/usr/share/logstash/pipeline.so/0003_input_syslog.conf
+/usr/share/logstash/pipeline.so/0005_input_suricata.conf
+/usr/share/logstash/pipeline.so/0007_input_import.conf
+/usr/share/logstash/pipeline.so/0008_input_eval.conf
+/usr/share/logstash/pipeline.so/1000_preprocess_log_elapsed.conf
+/usr/share/logstash/pipeline.so/1001_preprocess_syslogng.conf
+/usr/share/logstash/pipeline.so/1002_preprocess_json.conf
+/usr/share/logstash/pipeline.so/1003_preprocess_bro.conf
+/usr/share/logstash/pipeline.so/1004_preprocess_syslog_types.conf
+/usr/share/logstash/pipeline.so/1026_preprocess_dhcp.conf
+/usr/share/logstash/pipeline.so/1029_preprocess_esxi.conf
+/usr/share/logstash/pipeline.so/1030_preprocess_greensql.conf
+/usr/share/logstash/pipeline.so/1031_preprocess_iis.conf
+/usr/share/logstash/pipeline.so/1032_preprocess_mcafee.conf
+/usr/share/logstash/pipeline.so/1033_preprocess_snort.conf
+/usr/share/logstash/pipeline.so/1034_preprocess_syslog.conf
+/usr/share/logstash/pipeline.so/1100_preprocess_bro_conn.conf
+/usr/share/logstash/pipeline.so/1101_preprocess_bro_dhcp.conf
+/usr/share/logstash/pipeline.so/1102_preprocess_bro_dns.conf
+/usr/share/logstash/pipeline.so/1103_preprocess_bro_dpd.conf
+/usr/share/logstash/pipeline.so/1104_preprocess_bro_files.conf
+/usr/share/logstash/pipeline.so/1105_preprocess_bro_ftp.conf
+/usr/share/logstash/pipeline.so/1106_preprocess_bro_http.conf
+/usr/share/logstash/pipeline.so/1107_preprocess_bro_irc.conf
+/usr/share/logstash/pipeline.so/1108_preprocess_bro_kerberos.conf
+/usr/share/logstash/pipeline.so/1109_preprocess_bro_notice.conf
+/usr/share/logstash/pipeline.so/1110_preprocess_bro_rdp.conf
+/usr/share/logstash/pipeline.so/1111_preprocess_bro_signatures.conf
+/usr/share/logstash/pipeline.so/1112_preprocess_bro_smtp.conf
+/usr/share/logstash/pipeline.so/1113_preprocess_bro_snmp.conf
+/usr/share/logstash/pipeline.so/1114_preprocess_bro_software.conf
+/usr/share/logstash/pipeline.so/1115_preprocess_bro_ssh.conf
+/usr/share/logstash/pipeline.so/1116_preprocess_bro_ssl.conf
+/usr/share/logstash/pipeline.so/1117_preprocess_bro_syslog.conf
+/usr/share/logstash/pipeline.so/1118_preprocess_bro_tunnel.conf
+/usr/share/logstash/pipeline.so/1119_preprocess_bro_weird.conf
+/usr/share/logstash/pipeline.so/1121_preprocess_bro_mysql.conf
+/usr/share/logstash/pipeline.so/1122_preprocess_bro_socks.conf
+/usr/share/logstash/pipeline.so/1123_preprocess_bro_x509.conf
+/usr/share/logstash/pipeline.so/1124_preprocess_bro_intel.conf
+/usr/share/logstash/pipeline.so/1125_preprocess_bro_modbus.conf
+/usr/share/logstash/pipeline.so/1126_preprocess_bro_sip.conf
+/usr/share/logstash/pipeline.so/1127_preprocess_bro_radius.conf
+/usr/share/logstash/pipeline.so/1128_preprocess_bro_pe.conf
+/usr/share/logstash/pipeline.so/1129_preprocess_bro_rfb.conf
+/usr/share/logstash/pipeline.so/1130_preprocess_bro_dnp3.conf
+/usr/share/logstash/pipeline.so/1131_preprocess_bro_smb_files.conf
+/usr/share/logstash/pipeline.so/1132_preprocess_bro_smb_mapping.conf
+/usr/share/logstash/pipeline.so/1133_preprocess_bro_ntlm.conf
+/usr/share/logstash/pipeline.so/1134_preprocess_bro_dce_rpc.conf
+/usr/share/logstash/pipeline.so/1998_test_data.conf
+/usr/share/logstash/pipeline.so/2000_network_flow.conf
+/usr/share/logstash/pipeline.so/6000_bro.conf
+/usr/share/logstash/pipeline.so/6001_bro_import.conf
+/usr/share/logstash/pipeline.so/6002_syslog.conf
+/usr/share/logstash/pipeline.so/6101_switch_brocade.conf
+/usr/share/logstash/pipeline.so/6200_firewall_fortinet.conf
+/usr/share/logstash/pipeline.so/6201_firewall_pfsense.conf
+/usr/share/logstash/pipeline.so/6300_windows.conf
+/usr/share/logstash/pipeline.so/6301_dns_windows.conf
+/usr/share/logstash/pipeline.so/6400_suricata.conf
+/usr/share/logstash/pipeline.so/6500_ossec.conf
+/usr/share/logstash/pipeline.so/6501_ossec_sysmon.conf
+/usr/share/logstash/pipeline.so/6502_ossec_autoruns.conf
+/usr/share/logstash/pipeline.so/6600_winlogbeat_sysmon.conf
+/usr/share/logstash/pipeline.so/6700_winlogbeat.conf
+/usr/share/logstash/pipeline.so/8000_postprocess_bro_cleanup.conf
+/usr/share/logstash/pipeline.so/8001_postprocess_common_ip_augmentation.conf
+#/usr/share/logstash/pipeline.so/8006_postprocess_dns.conf
+#/usr/share/logstash/pipeline.so/8007_postprocess_dns_top1m_tagging.conf
+/usr/share/logstash/pipeline.so/8007_postprocess_http.conf
+#/usr/share/logstash/pipeline.so/8008_postprocess_dns_whois_age.conf
+/usr/share/logstash/pipeline.so/8200_postprocess_tagging.conf
+#/usr/share/logstash/pipeline.so/8502_postprocess_freq_analysis_bro_dns.conf
+#/usr/share/logstash/pipeline.so/8503_postprocess_freq_analysis_bro_http.conf
+#/usr/share/logstash/pipeline.so/8504_postprocess_freq_analysis_bro_ssl.conf
+#/usr/share/logstash/pipeline.so/8505_postprocess_freq_analysis_bro_x509.conf
+/usr/share/logstash/pipeline.so/8998_postprocess_log_elapsed.conf
+/usr/share/logstash/pipeline.so/8999_postprocess_rename_type.conf
+/usr/share/logstash/pipeline.dynamic/9000_output_bro.conf
+/usr/share/logstash/pipeline.dynamic/9001_output_switch.conf
+/usr/share/logstash/pipeline.dynamic/9002_output_import.conf
+/usr/share/logstash/pipeline.dynamic/9004_output_flow.conf
+/usr/share/logstash/pipeline.dynamic/9026_output_dhcp.conf
+/usr/share/logstash/pipeline.dynamic/9029_output_esxi.conf
+/usr/share/logstash/pipeline.dynamic/9030_output_greensql.conf
+/usr/share/logstash/pipeline.dynamic/9031_output_iis.conf
+/usr/share/logstash/pipeline.dynamic/9032_output_mcafee.conf
+/usr/share/logstash/pipeline.dynamic/9033_output_snort.conf
+/usr/share/logstash/pipeline.dynamic/9034_output_syslog.conf
+/usr/share/logstash/pipeline.dynamic/9200_output_firewall.conf
+/usr/share/logstash/pipeline.dynamic/9300_output_windows.conf
+/usr/share/logstash/pipeline.dynamic/9301_output_dns_windows.conf
+/usr/share/logstash/pipeline.dynamic/9400_output_suricata.conf
+/usr/share/logstash/pipeline.dynamic/9500_output_beats.conf
+/usr/share/logstash/pipeline.dynamic/9600_output_ossec.conf
+/usr/share/logstash/pipeline.dynamic/9998_output_test_data.conf
diff --git a/salt/logstash/files/dynamic/0006_input_beats.conf b/salt/logstash/files/dynamic/0006_input_beats.conf
index d3615b126..3d0306dd4 100644
--- a/salt/logstash/files/dynamic/0006_input_beats.conf
+++ b/salt/logstash/files/dynamic/0006_input_beats.conf
@@ -9,20 +9,21 @@ input {
}
}
filter {
- if "ids" in [tags] {
+ if [type] == "ids" or [type] =~ "bro" {
mutate {
rename => { "host" => "beat_host" }
remove_tag => ["beat"]
add_field => { "sensor_name" => "%{[beat][name]}" }
add_field => { "syslog-host_from" => "%{[beat][name]}" }
+ remove_field => [ "beat", "prospector", "input", "offset" ]
}
}
- if "bro" in [tags] {
+ if [type] =~ "ossec" {
mutate {
rename => { "host" => "beat_host" }
remove_tag => ["beat"]
- add_field => { "sensor_name" => "%{[beat][name]}" }
add_field => { "syslog-host_from" => "%{[beat][name]}" }
+ remove_field => [ "beat", "prospector", "input", "offset" ]
}
}
}
diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls
index 6901a857f..e816c888e 100644
--- a/salt/logstash/init.sls
+++ b/salt/logstash/init.sls
@@ -149,7 +149,7 @@ lslogdir:
so-logstash:
docker_container.running:
- - image: soshybridhunter/so-logstash:HH1.0.4
+ - image: soshybridhunter/so-logstash:HH1.0.5
- hostname: so-logstash
- name: so-logstash
- user: logstash
diff --git a/salt/master/files/acng/acng.conf b/salt/master/files/acng/acng.conf
index fd4c84991..a37d898af 100644
--- a/salt/master/files/acng/acng.conf
+++ b/salt/master/files/acng/acng.conf
@@ -79,7 +79,7 @@ RedirMax: 6
VfileUseRangeOps: 0
# PassThroughPattern: private-ppa\.launchpad\.net:443$
# PassThroughPattern: .* # this would allow CONNECT to everything
-PassThroughPattern: (download\.docker\.com:443|mirrors\.fedoraproject\.org:443|repo\.saltstack\.com:443|yum\.dockerproject\.org:443|download\.docker\.com:443|registry\.npmjs\.org:443|registry\.yarnpkg\.com:443)$ # yarn/npm pkg, cant to http :/
+PassThroughPattern: (download\.docker\.com:443|mirrors\.fedoraproject\.org:443|packages\.wazuh\.com:443|repo\.saltstack\.com:443|yum\.dockerproject\.org:443|download\.docker\.com:443|registry\.npmjs\.org:443|registry\.yarnpkg\.com:443)$ # yarn/npm pkg, cant to http :/
# ResponseFreezeDetectTime: 500
# ReuseConnections: 1
# PipelineDepth: 255
diff --git a/salt/master/init.sls b/salt/master/init.sls
index 8dd1fcae5..35f6c5254 100644
--- a/salt/master/init.sls
+++ b/salt/master/init.sls
@@ -49,7 +49,7 @@ acngcopyconf:
# Install the apt-cacher-ng container
so-aptcacherng:
docker_container.running:
- - image: soshybridhunter/so-acng:HH1.0.3
+ - image: soshybridhunter/so-acng:HH1.0.5
- hostname: so-acng
- port_bindings:
- 0.0.0.0:3142:3142
diff --git a/salt/mysql/etc/my.cnf b/salt/mysql/etc/my.cnf
index e1bf4e117..e37f690dc 100644
--- a/salt/mysql/etc/my.cnf
+++ b/salt/mysql/etc/my.cnf
@@ -22,7 +22,7 @@ skip-name-resolve
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
secure-file-priv=/var/lib/mysql-files
-user=939
+user=socore
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
diff --git a/salt/mysql/etc/mypass b/salt/mysql/etc/mypass
index 723d4722f..85ae1772b 100644
--- a/salt/mysql/etc/mypass
+++ b/salt/mysql/etc/mypass
@@ -1,2 +1,2 @@
-{%- set MYSQLPASS = salt['pillar.get']('master:mysqlpass', 'iwonttellyou') %}
+{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', 'iwonttellyou') -%}
{{ MYSQLPASS }}
diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls
index d5a48eff1..af80030ee 100644
--- a/salt/mysql/init.sls
+++ b/salt/mysql/init.sls
@@ -1,5 +1,5 @@
-{%- set MYSQLPASS = salt['pillar.get']('master:mysqlpass', 'iwonttellyou') %}
-{%- set FLEETPASS = salt['pillar.get']('master:fleetpass', 'bazinga') %}
+{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', 'iwonttellyou') %}
+{%- set FLEETPASS = salt['pillar.get']('auth:fleet', 'bazinga') %}
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
# MySQL Setup
mysqlpkgs:
@@ -19,7 +19,14 @@ mysqletcdir:
- group: 939
- makedirs: True
-lsetcsync:
+mysqlpiddir:
+ file.directory:
+ - name: /opt/so/conf/mysql/pid
+ - user: 939
+ - group: 939
+ - makedirs: True
+
+mysqletcsync:
file.recurse:
- name: /opt/so/conf/mysql/etc
- source: salt://mysql/etc
@@ -43,7 +50,7 @@ mysqldatadir:
so-mysql:
docker_container.running:
- - image: mysql/mysql-server:5.7
+ - image: soshybridhunter/so-mysql:HH1.0.5
- hostname: so-mysql
- user: socore
- port_bindings:
@@ -52,26 +59,9 @@ so-mysql:
- MYSQL_ROOT_HOST={{ MASTERIP }}
- MYSQL_ROOT_PASSWORD=/etc/mypass
- binds:
- - /opt/so/conf/etc/my.cnf:/etc/my.cnf:ro
- - /opt/so/conf/etc/mypass:/etc/mypass
+ - /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro
+ - /opt/so/conf/mysql/etc/mypass:/etc/mypass
- /nsm/mysql:/var/lib/mysql:rw
- /opt/so/log/mysql:/var/log/mysql:rw
- watch:
- /opt/so/conf/mysql/etc
-
-fleetdb:
- mysql_database.present:
- - name: fleet
-
-fleetdbuser:
- mysql_user.present:
- - host: {{ MASTERIP }}
- - password: {{ FLEETPASS }}
- - connection_user: root
- - connection_pass: {{ MYSQLPASS }}
-
-fleetdbpriv:
- mysql_grants.present:
- - grant: all privileges
- - database: fleet.*
- - user: fleet
diff --git a/salt/pulledpork/etc/disablesid.conf b/salt/pulledpork/etc/disablesid.conf
deleted file mode 100644
index 7e2381aa3..000000000
--- a/salt/pulledpork/etc/disablesid.conf
+++ /dev/null
@@ -1,38 +0,0 @@
-# example disablesid.conf V3.1
-
-# Example of modifying state for individual rules
-# 1:1034,1:9837,1:1270,1:3390,1:710,1:1249,3:13010
-
-# Example of modifying state for rule ranges
-# 1:220-1:3264,3:13010-3:13013
-
-# Comments are allowed in this file, and can also be on the same line
-# As the modify state syntax, as long as it is a trailing comment
-# 1:1011 # I Disabled this rule because I could!
-
-# Example of modifying state for MS and cve rules, note the use of the :
-# in cve. This will modify MS09-008, cve 2009-0233, bugtraq 21301,
-# and all MS00 and all cve 2000 related sids! These support regular expression
-# matching only after you have specified what you are looking for, i.e.
-# MS00- or cve:, the first section CANNOT contain a regular
-# expression (MS\d{2}-\d+) will NOT work, use the pcre: keyword (below)
-# for this.
-# MS09-008,cve:2009-0233,bugtraq:21301,MS00-\d+,cve:2000-\d+
-
-# Example of using the pcre: keyword to modify rulestate. the pcre keyword
-# allows for full use of regular expression syntax, you do not need to designate
-# with / and all pcre searches are treated as case insensitive. For more information
-# about regular expression syntax: http://www.regular-expressions.info/
-# The following example modifies state for all MS07 through MS10
-# pcre:MS(0[7-9]|10)-\d+
-
-# Example of modifying state for specific categories entirely (see README.CATEGORIES)
-# VRT-web-iis,ET-shellcode,ET-emergingthreats-smtp,Custom-shellcode,Custom-emergingthreats-smtp
-
-# Any of the above values can be on a single line or multiple lines, when
-# on a single line they simply need to be separated by a ,
-# 1:9837,1:220-1:3264,3:13010-3:13013,pcre:MS(0[0-7])-\d+,MS09-008,cve:2009-0233
-
-# The modifications in this file are for sample/example purposes only and
-# should not actively be used, you need to modify this file to fit your
-# environment.
diff --git a/salt/pulledpork/etc/dropsid.conf b/salt/pulledpork/etc/dropsid.conf
deleted file mode 100644
index 27a41e57e..000000000
--- a/salt/pulledpork/etc/dropsid.conf
+++ /dev/null
@@ -1,42 +0,0 @@
-# example dropsid.conf V3.1
-#
-# Note: This file is used to specify what rules you wish to be set to have
-# an action of drop rather than alert. This means that you are running
-# snort inline (more info about inline deployments at snort.org).
-
-# Example of modifying state for individual rules
-# 1:1034,1:9837,1:1270,1:3390,1:710,1:1249,3:13010
-
-# Example of modifying state for rule ranges
-# 1:220-1:3264,3:13010-3:13013
-
-# Comments are allowed in this file, and can also be on the same line
-# As the modify state syntax, as long as it is a trailing comment
-# 1:1011 # I Disabled this rule because I could!
-
-# Example of modifying state for MS and cve rules, note the use of the :
-# in cve. This will modify MS09-008, cve 2009-0233, bugtraq 21301,
-# and all MS00 and all cve 2000 related sids! These support regular expression
-# matching only after you have specified what you are looking for, i.e.
-# MS00- or cve:, the first section CANNOT contain a regular
-# expression (MS\d{2}-\d+) will NOT work, use the pcre: keyword (below)
-# for this.
-# MS09-008,cve:2009-0233,bugtraq:21301,MS00-\d+,cve:2000-\d+
-
-# Example of using the pcre: keyword to modify rulestate. the pcre keyword
-# allows for full use of regular expression syntax, you do not need to designate
-# with / and all pcre searches are treated as case insensitive. For more information
-# about regular expression syntax: http://www.regular-expressions.info/
-# The following example modifies state for all MS07 through MS10
-# pcre:MS(0[7-9]|10)-\d+
-
-# Example of modifying state for specific categories entirely (see README.CATEGORIES)
-# VRT-web-iis,ET-shellcode,ET-emergingthreats-smtp,Custom-shellcode,Custom-emergingthreats-smtp
-
-# Any of the above values can be on a single line or multiple lines, when
-# on a single line they simply need to be separated by a ,
-# 1:9837,1:220-1:3264,3:13010-3:13013,pcre:MS(0[0-7])-\d+,MS09-008,cve:2009-0233
-
-# The modifications in this file are for sample/example purposes only and
-# should not actively be used, you need to modify this file to fit your
-# environment.
diff --git a/salt/pulledpork/etc/enablesid.conf b/salt/pulledpork/etc/enablesid.conf
deleted file mode 100644
index 261f605e4..000000000
--- a/salt/pulledpork/etc/enablesid.conf
+++ /dev/null
@@ -1,48 +0,0 @@
-# example enablesid.conf v3.1
-
-# SPECIAL NOTE, if you use the -R flag, the rule(s) specified in this file
-# will be set back to their ORIGINAL state as it was read when they were
-# originally extracted from the source tarball!
-
-# Example of modifying state for individual rules
-# 1:1034,1:9837,1:1270,1:3390,1:710,1:1249,3:13010
-
-# Example of modifying state for rule ranges
-# 1:220-1:3264,3:13010-3:13013
-
-# Comments are allowed in this file, and can also be on the same line
-# As the modify state syntax, as long as it is a trailing comment
-# 1:1011 # I Disabled this rule because I could!
-
-# Example of modifying state for MS and cve rules, note the use of the :
-# in cve. This will modify MS09-008, cve 2009-0233, bugtraq 21301,
-# and all MS00 and all cve 2000 related sids! These support regular expression
-# matching only after you have specified what you are looking for, i.e.
-# MS00- or cve:, the first section CANNOT contain a regular
-# expression (MS\d{2}-\d+) will NOT work, use the pcre: keyword (below)
-# for this.
-# MS09-008,cve:2009-0233,bugtraq:21301,MS00-\d+,cve:2000-\d+
-
-# Example of using the pcre: keyword to modify rulestate. the pcre keyword
-# allows for full use of regular expression syntax, you do not need to designate
-# with / and all pcre searches are treated as case insensitive. For more information
-# about regular expression syntax: http://www.regular-expressions.info/
-# The following example modifies state for all MS07 through MS10
-# pcre:MS(0[7-9]|10)-\d+
-
-# FOR TESTING ONLY:
-# The following will enable ALL signatures for which Pulledpork has been configured
-# to download
-# pcre:.
-
-# Example of modifying state for specific categories entirely (see README.CATEGORIES)
-# VRT-web-iis,ET-shellcode,ET-emergingthreats-smtp,Custom-shellcode,Custom-emergingthreats-smtp
-
-# Any of the above values can be on a single line or multiple lines, when
-# on a single line they simply need to be separated by a ,
-# 1:9837,1:220-1:3264,3:13010-3:13013,pcre:MS(0[0-7])-\d+,MS09-008,cve:2009-0233
-
-# The modifications in this file are for sample/example purposes only and
-# should not actively be used, you need to modify this file to fit your
-# environment.
-
diff --git a/salt/pulledpork/etc/modifysid.conf b/salt/pulledpork/etc/modifysid.conf
deleted file mode 100644
index 50ee97601..000000000
--- a/salt/pulledpork/etc/modifysid.conf
+++ /dev/null
@@ -1,40 +0,0 @@
-# example modifysid.conf v1.1 2/18/2011 Alan Ptak
-#
-# Change history:
-# -----------------------------------------------
-# v1.1 2/18/2011 Alan Ptak
-# - Inserted comments around example elements that would otherwise modify rules
-#
-# v1.0 7/25/2010 JJC
-# - original release
-# -----------------------------------------------
-#
-# formatting is simple
-# "what I'm replacing" "what I'm replacing it with"
-#
-# Note that this will only work with GID:1 rules, simply because modifying
-# GID:3 stub rules would not actually affect the rule, thusly it will remain
-# non modifyable!
-#
-# If you are attempting to change rulestate (enable,drop,disable) from here
-# then you are doing it wrong, it is much more efficient to do so from within
-# the respective rulestate modification configuration files, please see doc/
-# and the README file!
-
-# the following applies to sid 10010 only and represents what would normally
-# be s/to_client/from_server/
-# 10010 "to_client" "from_server"
-
-# the following would replace HTTP_PORTS with HTTPS_PORTS for ALL GID:1
-# rules
-# "HTTP_PORTS" "HTTPS_PORTS"
-
-# multiple sids can be specified as noted below:
-# 302,429,1821 "\$EXTERNAL_NET" "$HOME_NET"
-
-# example of modification of a rule to make snortsam BLOCK the rule:
-# note that one rule changes from alert to BLOCK and that the other
-# modifies the msg:" field value so that when the alert occurs it is noted
-# that it is a SNORTSAM block rule!
-# 17803 "\(msg:"" "\(msg:"SNORTSAM ";
-# 17803 "^\s*alert" "BLOCK";
diff --git a/salt/pulledpork/etc/pulledpork.conf b/salt/pulledpork/etc/pulledpork.conf
deleted file mode 100644
index daa5fcb17..000000000
--- a/salt/pulledpork/etc/pulledpork.conf
+++ /dev/null
@@ -1,214 +0,0 @@
-# Config file for pulledpork
-# Be sure to read through the entire configuration file
-# If you specify any of these items on the command line, it WILL take
-# precedence over any value that you specify in this file!
-
-#######
-####### The below section defines what your oinkcode is (required for
-####### VRT rules), defines a temp path (must be writable) and also
-####### defines what version of rules that you are getting (for your
-####### snort version and subscription etc...)
-#######
-
-# You can specify one or as many rule_urls as you like, they
-# must appear as http://what.site.com/|rulesfile.tar.gz|1234567. You can specify
-# each on an individual line, or you can specify them in a , separated list
-# i.e. rule_url=http://x.y.z/|a.tar.gz|123,http://z.y.z/|b.tar.gz|456
-# note that the url, rule file, and oinkcode itself are separated by a pipe |
-# i.e. url|tarball|123456789,
-#rule_url=https://www.snort.org/reg-rules/|snortrules-snapshot.tar.gz|
-# NEW Community ruleset:
-#rule_url=https://snort.org/downloads/community/|community-rules.tar.gz|Community
-# NEW For IP Blacklisting! Note the format is urltofile|IPBLACKLIST|
-# This format MUST be followed to let pulledpork know that this is a blacklist
-#rule_url=http://talosintelligence.com/feeds/ip-filter.blf|IPBLACKLIST|open
-# URL for rule documentation! (slow to process)
-#rule_url=https://snort.org/downloads/community/|opensource.tar.gz|Opensource
-# THE FOLLOWING URL is for emergingthreats downloads, note the tarball name change!
-# and open-nogpl, to avoid conflicts.
-rule_url=https://rules.emergingthreats.net/open/suricata-4.0/|emerging.rules.tar.gz|open
-# THE FOLLOWING URL is for etpro downloads, note the tarball name change!
-# and the et oinkcode requirement!
-#rule_url=https://rules.emergingthreatspro.com/|etpro.rules.tar.gz|
-# NOTE above that the VRT snortrules-snapshot does not contain the version
-# portion of the tarball name, this is because PP now automatically populates
-# this value for you, if, however you put the version information in, PP will
-# NOT populate this value but will use your value!
-
-# Specify rule categories to ignore from the tarball in a comma separated list
-# with no spaces. There are four ways to do this:
-# 1) Specify the category name with no suffix at all to ignore the category
-# regardless of what rule-type it is, ie: netbios
-# 2) Specify the category name with a '.rules' suffix to ignore only gid 1
-# rulefiles located in the /rules directory of the tarball, ie: policy.rules
-# 3) Specify the category name with a '.preproc' suffix to ignore only
-# preprocessor rules located in the /preproc_rules directory of the tarball,
-# ie: sensitive-data.preproc
-# 4) Specify the category name with a '.so' suffix to ignore only shared-object
-# rules located in the /so_rules directory of the tarball, ie: netbios.so
-# The example below ignores dos rules wherever they may appear, sensitive-
-# data preprocessor rules, p2p so-rules (while including gid 1 p2p rules),
-# and netbios gid-1 rules (while including netbios so-rules):
-# ignore = dos,sensitive-data.preproc,p2p.so,netbios.rules
-# These defaults are reasonable for the VRT ruleset with Snort 2.9.0.x.
-ignore=deleted.rules,experimental.rules,local.rules
-# IMPORTANT, if you are NOT yet using 2.8.6 then you MUST comment out the
-# previous ignore line and uncomment the following!
-# ignore=deleted,experimental,local,decoder,preprocessor,sensitive-data
-
-# What is our temp path, be sure this path has a bit of space for rule
-# extraction and manipulation, no trailing slash
-temp_path=/tmp
-
-#######
-####### The below section is for rule processing. This section is
-####### required if you are not specifying the configuration using
-####### runtime switches. Note that runtime switches do SUPERSEED
-####### any values that you have specified here!
-#######
-
-# What path you want the .rules file containing all of the processed
-# rules? (this value has changed as of 0.4.0, previously we copied
-# all of the rules, now we are creating a single large rules file
-# but still keeping a separate file for your so_rules!
-rule_path=/opt/so/rules/nids/downloaded.rules
-
-# What path you want the .rules files to be written to, this is UNIQUE
-# from the rule_path and cannot be used in conjunction, this is to be used with the
-# -k runtime flag, this can be set at runtime using the -K flag or specified
-# here. If specified here, the -k option must also be passed at runtime, however
-# specifying -K at runtime forces the -k option to also be set
-# out_path=/usr/local/etc/snort/rules/
-
-# If you are running any rules in your local.rules file, we need to
-# know about them to properly build a sid-msg.map that will contain your
-# local.rules metadata (msg) information. You can specify other rules
-# files that are local to your system here by adding a comma and more paths...
-# remember that the FULL path must be specified for EACH value.
-# local_rules=/path/to/these.rules,/path/to/those.rules
-local_rules=/opt/so/rules/nids/local.rules,/opt/so/rules/nids/decoder-events.rules,/opt/so/rules/nids/stream-events.rules,/opt/so/rules/nids/http-events.rules,/opt/so/rules/nids/smtp-events.rules
-
-# Where should I put the sid-msg.map file?
-sid_msg=/opt/so/rules/nids/sid-msg.map
-
-# New for by2 and more advanced msg mapping. Valid options are 1 or 2
-# specify version 2 if you are running barnyard2.2+. Otherwise use 1
-sid_msg_version=1
-
-# Where do you want me to put the sid changelog? This is a changelog
-# that pulledpork maintains of all new sids that are imported
-sid_changelog=/var/log/nsm/sid_changes.log
-# this value is optional
-
-#######
-####### The below section is for so_rule processing only. If you don't
-####### need to use them.. then comment this section out!
-####### Alternately, if you are not using pulledpork to process
-####### so_rules, you can specify -T at runtime to bypass this altogether
-#######
-
-# What path you want the .so files to actually go to *i.e. where is it
-# defined in your snort.conf, needs a trailing slash
-sorule_path=/usr/local/lib/snort_dynamicrules/
-
-# Path to the snort binary, we need this to generate the stub files
-snort_path=/usr/bin/snort
-
-# We need to know where your snort.conf file lives so that we can
-# generate the stub files
-config_path=/etc/nsm/templates/snort/snort.conf
-
-##### Deprecated - The stubs are now categorically written to the single rule file!
-# sostub_path=/usr/local/etc/snort/rules/so_rules.rules
-
-# Define your distro, this is for the precompiled shared object libs!
-# Valid Distro Types:
-# Debian-6-0, Ubuntu-10-4
-# Ubuntu-12-04, Centos-5-4
-# FC-12, FC-14, RHEL-5-5, RHEL-6-0
-# FreeBSD-8-1, FreeBSD-9-0, FreeBSD-10-0
-# OpenBSD-5-2, OpenBSD-5-3
-# OpenSUSE-11-4, OpenSUSE-12-1
-# Slackware-13-1
-distro=Centos-5-4
-
-####### This next section is optional, but probably pretty useful to you.
-####### Please read thoroughly!
-
-# If you are using IP Reputation and getting some public lists, you will probably
-# want to tell pulledpork where your blacklist file lives, PP automagically will
-# de-dupe any duplicate IPs from different sources.
-black_list=/usr/local/etc/snort/rules/iplists/default.blacklist
-
-# IP Reputation does NOT require a full snort HUP, it introduces a concept whereby
-# the IP list can be reloaded while snort is running through the use of a control
-# socket. Please be sure that you built snort with the following optins:
-# -enable-shared-rep and --enable-control-socket. Be sure to read about how to
-# configure these! The following option tells pulledpork where to place the version
-# file for use with control socket ip list reloads!
-# This should be the same path where your black_list lives!
-IPRVersion=/usr/local/etc/snort/rules/iplists
-
-# The following option tells snort where the snort_control tool is located.
-snort_control=/usr/local/bin/snort_control
-
-# What do you want to backup and archive? This is a comma separated list
-# of file or directory values. If a directory is specified, PP will recurse
-# through said directory and all subdirectories to archive all files.
-# The following example backs up all snort config files, rules, pulledpork
-# config files, and snort shared object binary rules.
-# backup=/usr/local/etc/snort,/usr/local/etc/pulledpork,/usr/local/lib/snort_dynamicrules/
-
-# what path and filename should we use for the backup tarball?
-# note that an epoch time value and the .tgz extension is automatically added
-# to the backup_file name on completeion i.e. the written file is:
-# pp_backup.1295886020.tgz
-# backup_file=/tmp/pp_backup
-
-# Where do you want the signature docs to be copied, if this is commented
-# out then they will not be copied / extracted. Note that extracting them
-# will add considerable runtime to pulledpork.
-# docs=/path/to/base/www
-
-# The following option, state_order, allows you to more finely control the order
-# that pulledpork performs the modify operations, specifically the enablesid
-# disablesid and dropsid functions. An example use case here would be to
-# disable an entire category and later enable only a rule or two out of it.
-# the valid values are disable, drop, and enable.
-# state_order=disable,drop,enable
-
-
-# Define the path to the pid files of any running process that you want to
-# HUP after PP has completed its run.
-# pid_path=/var/run/snort.pid,/var/run/barnyard.pid,/var/run/barnyard2.pid
-# and so on...
-# pid_path=/var/run/snort_eth0.pid
-
-# This defines the version of snort that you are using, for use ONLY if the
-# proper snort binary is not on the system that you are fetching the rules with
-# This value MUST contain all 4 minor version
-# numbers. ET rules are now also dependant on this, verify supported ET versions
-# prior to simply throwing rubbish in this variable kthx!
-#
-# Suricata users - set this to 'suricata-3.x.x' to process rule files
-# for suricata, this mimics the -S flag on the command line.
-# snort_version=2.9.0.0
-
-# Here you can specify what rule modification files to run automatically.
-# simply uncomment and specify the apt path.
-enablesid=/opt/so/pulledpork/etc/enablesid.conf
-dropsid=/opt/so/pulledpork/dropsid.conf
-disablesid=/opt/so/pulledpork/disablesid.conf
-modifysid=/opt/so/pulledpork/modifysid.conf
-
-# What is the base ruleset that you want to use, please uncomment to use
-# and see the README.RULESETS for a description of the options.
-# Note that setting this value will disable all ET rulesets if you are
-# Running such rulesets
-# ips_policy=security
-
-####### Remember, a number of these values are optional.. if you don't
-####### need to process so_rules, simply comment out the so_rule section
-####### you can also specify -T at runtime to process only GID 1 rules.
-
-version=0.7.3
diff --git a/salt/pulledpork/init.sls b/salt/pulledpork/init.sls
deleted file mode 100644
index e1ae2728f..000000000
--- a/salt/pulledpork/init.sls
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# PulledProk Setup
-ppdir:
- file.directory:
- - name: /opt/so/pulledpork/etc
- - user: 939
- - group: 939
- - makedirs: True
-
-ppetcsync:
- file.recurse:
- - name: /opt/so/pulledpork/etc
- - source: salt://pulledpork/etc
- - user: 939
- - group: 939
- - template: jinja
-
-rulesdir:
- file.directory:
- - name: /opt/so/rules/nids
- - user: 939
- - group: 939
- - makedirs: True
-
-ruleslink:
- file.symlink:
- - name: /opt/so/saltstack/salt/pulledpork/rules
- - target: /opt/so/rules/nids
-
-toosmooth/so-pulledpork:test2:
- docker_image.present
-
-so-pulledpork:
- docker_container.running:
- - image: toosmooth/so-pulledpork:test2
- - hostname: so-pulledpork
- - user: socore
- - binds:
- - /opt/so/pulledpork/etc:/opt/pulledpork/etc:ro
- - /opt/so/rules/nids:/opt/so/rules/nids:rw
- - network_mode: so-elastic-net
diff --git a/salt/redis/init.sls b/salt/redis/init.sls
index 81c47da93..cd982a137 100644
--- a/salt/redis/init.sls
+++ b/salt/redis/init.sls
@@ -49,7 +49,7 @@ toosmooth/so-redis:test2:
so-redis:
docker_container.running:
- - image: soshybridhunter/so-redis:HH1.0.3
+ - image: soshybridhunter/so-redis:HH1.0.5
- hostname: so-redis
- user: socore
- port_bindings:
diff --git a/salt/somaster/init.sls b/salt/somaster/init.sls
deleted file mode 100644
index 3545ef846..000000000
--- a/salt/somaster/init.sls
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-# Add Redis docker if REDIS is enabled
-# Add REDIS user
-
-# Sync updated logstash config for REDIS
-
-# Add ES user
-
-
-# Add ES Docker
diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls
index 8a8d9309a..a2d2b613f 100644
--- a/salt/ssl/init.sls
+++ b/salt/ssl/init.sls
@@ -23,7 +23,8 @@ m2cryptopkgs:
- signing_policy: influxdb
- public_key: /etc/pki/influxdb.key
- CN: {{ master }}
- - days_remaining: 3000
+ - days_remaining: 0
+ - days_valid: 3650
- backup: True
- managed_private_key:
name: /etc/pki/influxdb.key
@@ -39,7 +40,8 @@ m2cryptopkgs:
- signing_policy: filebeat
- public_key: /etc/pki/filebeat.key
- CN: {{ master }}
- - days_remaining: 3000
+ - days_remaining: 0
+ - days_valid: 3650
- backup: True
- managed_private_key:
name: /etc/pki/filebeat.key
@@ -71,7 +73,8 @@ fbcrtlink:
- signing_policy: registry
- public_key: /etc/pki/registry.key
- CN: {{ master }}
- - days_remaining: 3000
+ - days_remaining: 0
+ - days_valid: 3650
- backup: True
- managed_private_key:
name: /etc/pki/registry.key
@@ -85,15 +88,37 @@ fbcrtlink:
- signing_policy: masterssl
- public_key: /etc/pki/masterssl.key
- CN: {{ master }}
- - days_remaining: 3000
+ - days_remaining: 0
+ - days_valid: 3650
- backup: True
- managed_private_key:
name: /etc/pki/masterssl.key
bits: 4096
backup: True
+# Create a private key and cert for OSQuery
+/etc/pki/fleet.key:
+ x509.private_key_managed:
+ - CN: {{ master }}
+ - bits: 4096
+ - days_remaining: 0
+ - days_valid: 3650
+ - backup: True
+
+/etc/pki/fleet.crt:
+ x509.certificate_managed:
+ - signing_private_key: /etc/pki/fleet.key
+ - CN: {{ master }}
+ - days_remaining: 0
+ - days_valid: 3650
+ - backup: True
+ - managed_private_key:
+ name: /etc/pki/fleet.key
+ bits: 4096
+ backup: True
+
{% endif %}
-{% if grains['role'] == 'so-SENSOR' or grains['role'] == 'so-eval' %}
+{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-node' or grains['role'] == 'so-eval' %}
fbcertdir:
file.directory:
@@ -107,7 +132,8 @@ fbcertdir:
- signing_policy: filebeat
- public_key: /opt/so/conf/filebeat/etc/pki/filebeat.key
- CN: {{ master }}
- - days_remaining: 3000
+ - days_remaining: 0
+ - days_valid: 3650
- backup: True
- managed_private_key:
name: /opt/so/conf/filebeat/etc/pki/filebeat.key
diff --git a/salt/top.sls b/salt/top.sls
index 1ab76733e..a319209ca 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -10,6 +10,7 @@ base:
{%- if BROVER != 'SURICATA' %}
- bro
{%- endif %}
+ - wazuh
- filebeat
- schedule
@@ -20,13 +21,18 @@ base:
- firewall
- master
- idstools
- - redis
+ - mysql
- elasticsearch
- logstash
- kibana
- pcap
- suricata
- bro
+ - curator
+ - elastalert
+ - fleet
+ - wazuh
+ - filebeat
- utility
- schedule
@@ -39,11 +45,16 @@ base:
- master
- idstools
- redis
+ - mysql
- elasticsearch
- logstash
- kibana
+ - elastalert
+ - wazuh
+ - filebeat
- utility
- schedule
+ - fleet
# Storage node logic
@@ -60,6 +71,7 @@ base:
- firewall
- logstash
- elasticsearch
+ - curator
- schedule
'G@role:so-node and I@node:node_type:warm':
@@ -77,6 +89,9 @@ base:
- firewall
- logstash
- elasticsearch
+ - curator
+ - wazuh
+ - filebeat
- schedule
'G@role:mastersensor':
diff --git a/salt/wazuh/files/agent/ossec.conf b/salt/wazuh/files/agent/ossec.conf
new file mode 100644
index 000000000..5b02910f9
--- /dev/null
+++ b/salt/wazuh/files/agent/ossec.conf
@@ -0,0 +1,203 @@
+{%- if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
+{%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- elif grains['role'] == 'so-node' %}
+{%- set ip = salt['pillar.get']('node:mainip', '') %}
+{%- elif grains['role'] == 'so-sensor' %}
+{%- set ip = salt['pillar.get']('sensor:mainip', '') %}
+{%- endif %}
+
+
+
+
+
+ {{ip}}
+ 1514
+ udp
+
+{%- if grains['os'] == 'Ubuntu' %}
+ ubuntu, ubuntu16, ubuntu16.04
+{%- else %}
+ centos, centos7
+{%- endif %}
+ 10
+ 60
+ yes
+ aes
+
+
+
+
+ no
+ 5000
+ 500
+
+
+
+
+ no
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+
+
+ 43200
+
+ /var/ossec/etc/shared/rootkit_files.txt
+ /var/ossec/etc/shared/rootkit_trojans.txt
+
+ /var/ossec/etc/shared/system_audit_rcl.txt
+ /var/ossec/etc/shared/system_audit_ssh.txt
+
+ yes
+
+
+
+ yes
+ 1800
+ 1d
+ yes
+
+
+
+ yes
+ 1800
+ 1d
+ yes
+
+ wodles/java
+ wodles/ciscat
+
+
+
+
+ yes
+ yes
+ /var/log/osquery/osqueryd.results.log
+ /etc/osquery/osquery.conf
+ yes
+
+
+
+
+ no
+ 1h
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+ yes
+
+
+
+
+ no
+
+
+ 43200
+
+ yes
+
+
+ /etc,/usr/bin,/usr/sbin
+ /bin,/sbin,/boot
+
+
+ /etc/mtab
+ /etc/hosts.deny
+ /etc/mail/statistics
+ /etc/random-seed
+ /etc/random.seed
+ /etc/adjtime
+ /etc/httpd/logs
+ /etc/utmpx
+ /etc/wtmpx
+ /etc/cups/certs
+ /etc/dumpdates
+ /etc/svc/volatile
+ /sys/kernel/security
+ /sys/kernel/debug
+
+
+ /etc/ssl/private.key
+
+ yes
+
+
+ yes
+
+
+ yes
+
+
+
+
+ command
+ df -P
+ 360
+
+
+
+ full_command
+ netstat -tulpn | sed 's/\([[:alnum:]]\+\)\ \+[[:digit:]]\+\ \+[[:digit:]]\+\ \+\(.*\):\([[:digit:]]*\)\ \+\([0-9\.\:\*]\+\).\+\ \([[:digit:]]*\/[[:alnum:]\-]*\).*/\1 \2 == \3 == \4 \5/' | sort -k 4 -g | sed 's/ == \(.*\) ==/:\1/' | sed 1,2d
+ netstat listening ports
+ 360
+
+
+
+ full_command
+ last -n 20
+ 360
+
+
+
+
+ no
+ /var/ossec/etc/wpk_root.pem
+ yes
+
+
+
+
+ plain
+
+
+
+
+
+
+ syslog
+ /var/ossec/logs/active-responses.log
+
+
+
+ syslog
+ /var/log/auth.log
+
+
+
+ syslog
+ /var/log/syslog
+
+
+
+ syslog
+ /var/log/dpkg.log
+
+
+
+ syslog
+ /var/log/kern.log
+
+
+
diff --git a/salt/wazuh/files/agent/wazuh-register-agent b/salt/wazuh/files/agent/wazuh-register-agent
new file mode 100755
index 000000000..4197a5334
--- /dev/null
+++ b/salt/wazuh/files/agent/wazuh-register-agent
@@ -0,0 +1,139 @@
+{%- if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %}
+{%- set ip = salt['pillar.get']('static:masterip', '') %}
+{%- elif grains['role'] == 'so-node' %}
+{%- set ip = salt['pillar.get']('node:mainip', '') %}
+{%- elif grains['role'] == 'so-sensor' %}
+{%- set ip = salt['pillar.get']('sensor:mainip', '') %}
+{%- endif %}
+#!/bin/bash
+
+###
+# Shell script for registering agents automatically with the API
+# Copyright (C) 2017 Wazuh, Inc. All rights reserved.
+# Wazuh.com
+#
+# This program is a free software; you can redistribute it
+# and/or modify it under the terms of the GNU General Public
+# License (version 2) as published by the FSF - Free Software
+# Foundation.
+###
+#
+# 12/11/2018
+# This script has been modified by Security Onion Solutions
+# - Added Agent IP variable and option
+###
+
+# Connection variables
+API_IP="localhost"
+API_PORT="55000"
+PROTOCOL="https"
+USER="foo"
+PASSWORD="bar"
+AGENT_NAME=$(hostname)
+AGENT_IP="{{ip}}"
+
+display_help() {
+cat <&1
+
+ if [ "$?" != "0" ]; then
+ echo -e $API_RESULT | sed -rn 's/.*"message":"(.+)".*/\1/p'
+ exit 0
+ fi
+ # Get agent id and agent key
+ AGENT_ID=$(echo $API_RESULT | cut -d':' -f 4 | cut -d ',' -f 1)
+ AGENT_KEY=$(echo $API_RESULT | cut -d':' -f 5 | cut -d '}' -f 1)
+
+ echo "Agent '$AGENT_NAME' with ID '$AGENT_ID' added."
+ echo "Key for agent '$AGENT_ID' received."
+
+ # Importing key
+ echo ""
+ echo "Importing authentication key:"
+ echo "y" | /var/ossec/bin/manage_agents -i $AGENT_KEY
+
+ # Restarting agent
+ echo ""
+ echo "Restarting:"
+ echo ""
+ /var/ossec/bin/ossec-control restart
+
+ exit 0
+}
+
+remove_agent() {
+ echo "Found: $AGENT_ID"
+ echo "Removing previous registration for '$AGENT_NAME' using ID: $AGENT_ID ..."
+ # curl -u foo:bar -k -X DELETE "https://127.0.0.1:55000/agents/001
+ REMOVE_AGENT=$(curl -s -u $USER:"$PASSWORD" -k -X DELETE $PROTOCOL://$API_IP:$API_PORT/agents/$AGENT_ID)
+ echo -e $REMOVE_AGENT
+}
+
+get_agent_id() {
+ echo ""
+ echo "Checking for Agent ID..."
+ AGENT_ID=$(curl -s -u $USER:"$PASSWORD" -k -X GET $PROTOCOL://$API_IP:$API_PORT/agents/name/$AGENT_NAME | rev | cut -d: -f1 | rev | grep -o '".*"' | tr -d '"')
+}
+
+# MAIN
+# ENTRY POINT
+
+while getopts ':hfsi:' OPTION; do
+ case "$OPTION" in
+ h)
+ display_help
+ exit 0
+ ;;
+ f|--force)
+ FORCE=true
+ ;;
+ i|--ip)
+ AGENT_IP=${OPTARG}
+ ;;
+ s|--silent)
+ SILENT=true
+ ;;
+ esac
+done
+# reset $1, $2 .... as normal argument after the flag
+shift $(($OPTIND - 1))
+
+# if no arguments are passed in after the flags, we assign the hostname value to the AGENT_NAME
+#AGENT_NAME=${1:-$(hostname)}
+
+#get_agent_id
+
+# check the return value. If we get an integer back then the agent is already registered. Anything else -> agent is not registered
+# if ! [ "$AGENT_ID" -eq "$AGENT_ID" ] 2> /dev/null ; then
+# echo "Starting registration process ..."
+# :
+# elif [[ "$FORCE" = true && "$SILENT" = "true" ]] ; then
+# remove_agent > /dev/null 2>&1
+# else
+# if [[ "$FORCE" = true ]] ; then
+# remove_agent
+# fi
+# fi
+
+# Default action -> try to register the agent
+sleep 10s
+register_agent
+#remove_agent
diff --git a/salt/wazuh/files/filebeat.yml b/salt/wazuh/files/filebeat.yml
deleted file mode 100644
index 7f076793c..000000000
--- a/salt/wazuh/files/filebeat.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-filebeat:
- prospectors:
- - input_type: log
- paths:
- - "/var/ossec/data/logs/alerts/alerts.json"
- document_type: wazuh-alerts
- json.message_key: log
- json.keys_under_root: true
- json.overwrite_keys: true
-
-output:
- logstash:
- # The Logstash hosts
- hosts: ["logstash:5000"]
-# ssl:
-# certificate_authorities: ["/etc/filebeat/logstash.crt"]
diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls
index d034cab5c..ac05f1984 100644
--- a/salt/wazuh/init.sls
+++ b/salt/wazuh/init.sls
@@ -1,91 +1,77 @@
-# Create a state directory
+{%- set HOSTNAME = salt['grains.get']('host', '') %}
-statedir:
- file.directory:
- - name: /opt/so/state
+# Add ossec group
+ossecgroup:
+ group.present:
+ - name: ossec
+ - gid: 945
-salttmp:
- file.directory:
- - name: /opt/so/tmp
+# Add ossecm user
+ossecm:
+ user.present:
+ - uid: 943
+ - gid: 945
+ - home: /opt/so/wazuh
+ - createhome: False
-# Install packages needed for the sensor
+# Add ossecr user
+ossecr:
+ user.present:
+ - uid: 944
+ - gid: 945
+ - home: /opt/so/wazuh
+ - createhome: False
-sensorpkgs:
- pkg.installed:
- - skip_suggestions: True
- - pkgs:
- - docker-ce
- - python-docker
+# Add ossec user
+ossec:
+ user.present:
+ - uid: 945
+ - gid: 945
+ - home: /opt/so/wazuh
+ - createhome: False
-# Always keep these packages up to date
+# Add wazuh agent
+wazuhpkgs:
+ pkg.installed:
+ - skip_suggestions: False
+ - pkgs:
+ - wazuh-agent
-alwaysupdated:
- pkg.latest:
- - pkgs:
- - openssl
- - openssh-server
- - bash
- - skip_suggestions: True
-
-# Set time to UTC
-
-Etc/UTC:
- timezone.system
-
-# Set up docker network
-dockernet:
- docker_network.present:
- - name: so-elastic-net
- - driver: bridge
-
-# Snag the so-core docker
-toosmooth/so-core:test2:
- docker_image.present
-
-# Drop the correct nginx config based on role
-
-nginxconfdir:
- file.directory:
- - name: /opt/so/conf/nginx
- - user: 939
- - group: 939
- - makedirs: True
-
-nginxconf:
+# Add Wazuh agent conf
+wazuhagentconf:
file.managed:
- - name: /opt/so/conf/nginx/nginx.conf
- - user: 939
- - group: 939
+ - name: /var/ossec/etc/ossec.conf
+ - source: salt://wazuh/files/agent/ossec.conf
+ - user: 0
+ - group: 945
- template: jinja
- - source: salt://common/nginx/nginx.conf.{{ grains.role }}
-nginxlogdir:
- file.directory:
- - name: /opt/so/log/nginx/
- - user: 939
- - group: 939
+# Add Wazuh agent conf
+wazuhagentregister:
+ file.managed:
+ - name: /usr/sbin/wazuh-register-agent
+ - source: salt://wazuh/files/agent/wazuh-register-agent
+ - user: 0
+ - group: 0
+ - mode: 755
+ - template: jinja
-nginxtmp:
- file.directory:
- - name: /opt/so/tmp/nginx/tmp
- - user: 939
- - group: 939
- - makedirs: True
-
-# Start the core docker
-so-core:
+so-wazuh:
docker_container.running:
- - image: toosmooth/so-core:test2
- - hostname: so-core
- - user: socore
- - binds:
- - /opt/so:/opt/so:rw
- - /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- - /opt/so/log/nginx/:/var/log/nginx:rw
- - /opt/so/tmp/nginx/:/var/lib/nginx:rw
- - /opt/so/tmp/nginx/:/run:rw
- - network_mode: so-elastic-net
- - cap_add: NET_BIND_SERVICE
+ - image: soshybridhunter/so-wazuh:HH1.0.5
+ - hostname: {{HOSTNAME}}-wazuh-manager
+ - name: so-wazuh
+ - detach: True
- port_bindings:
- - 80:80
- - 443:443
+ - 0.0.0.0:1514:1514/udp
+ - 0.0.0.0:1514:1514/tcp
+ - 0.0.0.0:55000:55000
+ - binds:
+ - /opt/so/wazuh/:/var/ossec/data/:rw
+
+# Register the agent
+registertheagent:
+ cmd.run:
+ - name: /usr/sbin/wazuh-register-agent
+ - cwd: /
+ #- stateful: True
diff --git a/so-setup-network.sh b/so-setup-network.sh
index aef893040..de978e94a 100644
--- a/so-setup-network.sh
+++ b/so-setup-network.sh
@@ -22,6 +22,7 @@ NICS=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"
CPUCORES=$(cat /proc/cpuinfo | grep processor | wc -l)
LISTCORES=$(cat /proc/cpuinfo | grep processor | awk '{print $3 " \"" "core" "\""}')
RANDOMUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 16 | head -n 1)
+NODE_ES_PORT="9200"
# End Global Variable Section
@@ -49,7 +50,9 @@ add_master_hostfile() {
"Enter your Master Server IP Address" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
# Add the master to the host file if it doesn't resolve
- echo "$MSRVIP $MSRV" >> /etc/hosts
+ if ! grep -q $MSRVIP /etc/hosts; then
+ echo "$MSRVIP $MSRV" >> /etc/hosts
+ fi
}
add_socore_user_master() {
@@ -74,6 +77,19 @@ add_socore_user_notmaster() {
}
+# Create an auth pillar so that passwords survive re-install
+auth_pillar(){
+
+ if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
+ echo "Creating Auth Pillar"
+ mkdir -p /opt/so/saltstack/pillar
+ echo "auth:" >> /opt/so/saltstack/pillar/auth.sls
+ echo " mysql: $MYSQLPASS" >> /opt/so/saltstack/pillar/auth.sls
+ echo " fleet: $FLEETPASS" >> /opt/so/saltstack/pillar/auth.sls
+ fi
+
+}
+
# Enable Bro Logs
bro_logs_enabled() {
@@ -154,8 +170,9 @@ chown_salt_master() {
clear_master() {
# Clear out the old master public key in case this is a re-install.
# This only happens if you re-install the master.
- if [ -f /etc/salt/pki/minion/minion_master.pub]; then
+ if [ -f /etc/salt/pki/minion/minion_master.pub ]; then
rm /etc/salt/pki/minion/minion_master.pub
+ service salt-minion restart
fi
}
@@ -170,6 +187,15 @@ configure_minion() {
if [ $TYPE == 'master' ] || [ $TYPE == 'eval' ]; then
echo "master: $HOSTNAME" > /etc/salt/minion
echo "id: $HOSTNAME" >> /etc/salt/minion
+ echo "mysql.host: '$MAINIP'" >> /etc/salt/minion
+ echo "mysql.port: 3306" >> /etc/salt/minion
+ echo "mysql.user: 'root'" >> /etc/salt/minion
+ if [ ! -f /opt/so/saltstack/pillar/auth.sls ]; then
+ echo "mysql.pass: '$MYSQLPASS'" >> /etc/salt/minion
+ else
+ OLDPASS=$(cat /opt/so/saltstack/pillar/auth.sls | grep mysql | awk {'print $2'})
+ echo "mysql.pass: '$OLDPASS'" >> /etc/salt/minion
+ fi
else
echo "master: $MSRV" > /etc/salt/minion
echo "id: $HOSTNAME" >> /etc/salt/minion
@@ -250,7 +276,9 @@ create_bond() {
# Need to add 17.04 support still
apt-get -y install ifenslave
- echo "bonding" >> /etc/modules
+ if ! grep -q bonding /etc/modules; then
+ echo "bonding" >> /etc/modules
+ fi
modprobe bonding
local LBACK=$(awk '/auto lo/,/^$/' /etc/network/interfaces)
@@ -329,6 +357,10 @@ docker_install() {
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum -y update
yum -y install docker-ce docker-python python-docker
+ docker_registry
+ echo "Restarting Docker"
+ systemctl restart docker
+ systemctl enable docker
else
if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
@@ -384,10 +416,31 @@ filter_nics() {
FNICS=$(ip link | grep -vw $MNIC | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2 " \"" "Interface" "\"" " OFF"}')
}
+
+generate_passwords(){
+ # Generate Random Passwords for Things
+ MYSQLPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+ FLEETPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1)
+}
+
get_filesystem_nsm(){
FSNSM=$(df /nsm | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
}
+get_log_size_limit() {
+
+ DISK_DIR="/"
+ if [ -d /nsm ]; then
+ DISK_DIR="/nsm"
+ fi
+ DISK_SIZE_K=`df $DISK_DIR |grep -v "^Filesystem" | awk '{print $2}'`
+ PERCENTAGE=85
+ DISK_SIZE=DISK_SIZE_K*1000
+ PERCENTAGE_DISK_SPACE=`echo $(($DISK_SIZE*$PERCENTAGE/100))`
+ LOG_SIZE_LIMIT=$(($PERCENTAGE_DISK_SPACE/1000000000))
+
+}
+
get_filesystem_root(){
FSROOT=$(df / | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }')
}
@@ -435,6 +488,7 @@ install_master() {
mkdir -p /opt/so/gpg
wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub
wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
else
apt-get install -y salt-master
@@ -484,6 +538,11 @@ master_pillar() {
echo " oinkcode: $OINKCODE" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
#echo " access_key: $ACCESS_KEY" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
#echo " access_secret: $ACCESS_SECRET" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
+ echo " es_port: $NODE_ES_PORT" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
+ echo " log_size_limit: $LOG_SIZE_LIMIT" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
+ echo " cur_close_days: $CURCLOSEDAYS" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
+ #echo " mysqlpass: $MYSQLPASS" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
+ #echo " fleetpass: $FLEETPASS" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls
}
@@ -492,14 +551,14 @@ master_static() {
# Create a static file for global values
touch /opt/so/saltstack/pillar/static.sls
- echo "static:" >> /opt/so/saltstack/pillar/static.sls
+ echo "static:" > /opt/so/saltstack/pillar/static.sls
echo " hnmaster: $HNMASTER" >> /opt/so/saltstack/pillar/static.sls
echo " ntpserver: $NTPSERVER" >> /opt/so/saltstack/pillar/static.sls
echo " proxy: $PROXY" >> /opt/so/saltstack/pillar/static.sls
echo " broversion: $BROVERSION" >> /opt/so/saltstack/pillar/static.sls
echo " ids: $NIDS" >> /opt/so/saltstack/pillar/static.sls
echo " masterip: $MAINIP" >> /opt/so/saltstack/pillar/static.sls
- if [ $MASTERUPDATES == 'MASTER' ]; then
+ if [[ $MASTERUPDATES == 'MASTER' ]]; then
echo " masterupdate: 1" >> /opt/so/saltstack/pillar/static.sls
else
echo " masterupdate: 0" >> /opt/so/saltstack/pillar/static.sls
@@ -531,6 +590,9 @@ node_pillar() {
echo " ls_batch_count: $LSINPUTBATCHCOUNT" >> $TMP/$HOSTNAME.sls
echo " es_shard_count: $SHARDCOUNT" >> $TMP/$HOSTNAME.sls
echo " node_type: $NODETYPE" >> $TMP/$HOSTNAME.sls
+ echo " es_port: $NODE_ES_PORT" >> $TMP/$HOSTNAME.sls
+ echo " log_size_limit: $LOG_SIZE_LIMIT" >> $TMP/$HOSTNAME.sls
+ echo " cur_close_days: $CURCLOSEDAYS" >> $TMP/$HOSTNAME.sls
}
@@ -542,6 +604,15 @@ saltify() {
if [ $INSTALLTYPE == 'MASTERONLY' ] || [ $INSTALLTYPE == 'EVALMODE' ]; then
yum -y install https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
+ cat > /etc/yum.repos.d/wazuh.repo <<\EOF
+[wazuh_repo]
+gpgcheck=1
+gpgkey=https://packages.wazuh.com/key/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://packages.wazuh.com/3.x/yum/
+protect=1
+EOF
else
@@ -580,6 +651,62 @@ saltify() {
echo "=dtMN" >> /etc/pki/rpm-gpg/saltstack-signing-key
echo "-----END PGP PUBLIC KEY BLOCK-----" >> /etc/pki/rpm-gpg/saltstack-signing-key
+ # Add the Wazuh Key
+ cat > /etc/pki/rpm-gpg/GPG-KEY-WAZUH <<\EOF
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQINBFeeyYwBEACyf4VwV8c2++J5BmCl6ofLCtSIW3UoVrF4F+P19k/0ngnSfjWb
+8pSWB11HjZ3Mr4YQeiD7yY06UZkrCXk+KXDlUjMK3VOY7oNPkqzNaP6+8bDwj4UA
+hADMkaXBvWooGizhCoBtDb1bSbHKcAnQ3PTdiuaqF5bcyKk8hv939CHulL2xH+BP
+mmTBi+PM83pwvR+VRTOT7QSzf29lW1jD79v4rtXHJs4KCz/amT/nUm/tBpv3q0sT
+9M9rH7MTQPdqvzMl122JcZST75GzFJFl0XdSHd5PAh2mV8qYak5NYNnwA41UQVIa
++xqhSu44liSeZWUfRdhrQ/Nb01KV8lLAs11Sz787xkdF4ad25V/Rtg/s4UXt35K3
+klGOBwDnzPgHK/OK2PescI5Ve1z4x1C2bkGze+gk/3IcfGJwKZDfKzTtqkZ0MgpN
+7RGghjkH4wpFmuswFFZRyV+s7jXYpxAesElDSmPJ0O07O4lQXQMROE+a2OCcm0eF
+3+Cr6qxGtOp1oYMOVH0vOLYTpwOkAM12/qm7/fYuVPBQtVpTojjV5GDl2uGq7p0o
+h9hyWnLeNRbAha0px6rXcF9wLwU5n7mH75mq5clps3sP1q1/VtP/Fr84Lm7OGke4
+9eD+tPNCdRx78RNWzhkdQxHk/b22LCn1v6p1Q0qBco9vw6eawEkz1qwAjQARAQAB
+tDFXYXp1aC5jb20gKFdhenVoIFNpZ25pbmcgS2V5KSA8c3VwcG9ydEB3YXp1aC5j
+b20+iQI9BBMBCAAnBQJXnsmMAhsDBQkFo5qABQsJCAcDBRUKCQgLBRYCAwEAAh4B
+AheAAAoJEJaz7l8pERFFHEsQAIaslejcW2NgjgOZuvn1Bht4JFMbCIPOekg4Z5yF
+binRz0wmA7JNaawDHTBYa6L+A2Xneu/LmuRjFRMesqopUukVeGQgHBXbGMzY46eI
+rqq/xgvgWzHSbWweiOX0nn+exbEAM5IyW+efkWNz0e8xM1LcxdYZxkVOqFqkp3Wv
+J9QUKw6z9ifUOx++G8UO307O3hT2f+x4MUoGZeOF4q1fNy/VyBS2lMg2HF7GWy2y
+kjbSe0p2VOFGEZLuu2f5tpPNth9UJiTliZKmgSk/zbKYmSjiVY2eDqNJ4qjuqes0
+vhpUaBjA+DgkEWUrUVXG5yfQDzTiYIF84LknjSJBYSLZ4ABsMjNO+GApiFPcih+B
+Xc9Kx7E9RNsNTDqvx40y+xmxDOzVIssXeKqwO8r5IdG3K7dkt2Vkc/7oHOpcKwE5
+8uASMPiqqMo+t1RVa6Spckp3Zz8REILbotnnVwDIwo2HmgASirMGUcttEJzubaIa
+Mv43GKs8RUH9s5NenC02lfZG7D8WQCz5ZH7yEWrt5bCaQRNDXjhsYE17SZ/ToHi3
+OpWu050ECWOHdxlXNG3dOWIdFDdBJM7UfUNSSOe2Y5RLsWfwvMFGbfpdlgJcMSDV
+X+ienkrtXhBteTu0dwPu6HZTFOjSftvtAo0VIqGQrKMvKelkkdNGdDFLQw2mUDcw
+EQj6uQINBFeeyYwBEADD1Y3zW5OrnYZ6ghTd5PXDAMB8Z1ienmnb2IUzLM+i0yE2
+TpKSP/XYCTBhFa390rYgFO2lbLDVsiz7Txd94nHrdWXGEQfwrbxsvdlLLWk7iN8l
+Fb4B60OfRi3yoR96a/kIPNa0x26+n79LtDuWZ/DTq5JSHztdd9F1sr3h8i5zYmtv
+luj99ZorpwYejbBVUm0+gP0ioaXM37uO56UFVQk3po9GaS+GtLnlgoE5volgNYyO
+rkeIua4uZVsifREkHCKoLJip6P7S3kTyfrpiSLhouEZ7kV1lbMbFgvHXyjm+/AIx
+HIBy+H+e+HNt5gZzTKUJsuBjx44+4jYsOR67EjOdtPOpgiuJXhedzShEO6rbu/O4
+wM1rX45ZXDYa2FGblHCQ/VaS0ttFtztk91xwlWvjTR8vGvp5tIfCi+1GixPRQpbN
+Y/oq8Kv4A7vB3JlJscJCljvRgaX0gTBzlaF6Gq0FdcWEl5F1zvsWCSc/Fv5WrUPY
+5mG0m69YUTeVO6cZS1aiu9Qh3QAT/7NbUuGXIaAxKnu+kkjLSz+nTTlOyvbG7BVF
+a6sDmv48Wqicebkc/rCtO4g8lO7KoA2xC/K/6PAxDrLkVyw8WPsAendmezNfHU+V
+32pvWoQoQqu8ysoaEYc/j9fN4H3mEBCN3QUJYCugmHP0pu7VtpWwwMUqcGeUVwAR
+AQABiQIlBBgBCAAPBQJXnsmMAhsMBQkFo5qAAAoJEJaz7l8pERFFz8IP/jfBxJSB
+iOw+uML+C4aeYxuHSdxmSsrJclYjkw7Asha/fm4Kkve00YAW8TGxwH2kgS72ooNJ
+1Q7hUxNbVyrJjQDSMkRKwghmrPnUM3UyHmE0dq+G2NhaPdFo8rKifLOPgwaWAfSV
+wgMTK86o0kqRbGpXgVIG5eRwv2FcxM3xGfy7sub07J2VEz7Ba6rYQ3NTbPK42AtV
++wRJDXcgS7y6ios4XQtSbIB5f6GI56zVlwfRd3hovV9ZAIJQ6DKM31wD6Kt/pRun
+DjwMZu0/82JMoqmxX/00sNdDT1S13guCfl1WhBu7y1ja9MUX5OpUzyEKg5sxme+L
+iY2Rhs6CjmbTm8ER4Uj8ydKyVTy8zbumbB6T8IwCAbEMtPxm6pKh/tgLpoJ+Bj0y
+AsGjmhV7R6PKZSDXg7/qQI98iC6DtWc9ibC/QuHLcvm3hz40mBgXAemPJygpxGst
+mVtU7O3oHw9cIUpkbMuVqSxgPFmSSq5vEYkka1CYeg8bOz6aCTuO5J0GDlLrpjtx
+6lyImbZAF/8zKnW19aq5lshT2qJlTQlZRwwDZX5rONhA6T8IEUnUyD4rAIQFwfJ+
+gsXa4ojD/tA9NLdiNeyEcNfyX3FZwXWCtVLXflzdRN293FKamcdnMjVRjkCnp7iu
+7eO7nMgcRoWddeU+2aJFqCoQtKCp/5EKhFey
+=UIVm
+-----END PGP PUBLIC KEY BLOCK-----
+EOF
+
# Proxy is hating on me.. Lets just set it manually
echo "[salt-latest]" > /etc/yum.repos.d/salt-latest.repo
echo "name=SaltStack Latest Release Channel for RHEL/Centos \$releasever" >> /etc/yum.repos.d/salt-latest.repo
@@ -588,8 +715,27 @@ saltify() {
echo "enabled=1" >> /etc/yum.repos.d/salt-latest.repo
echo "gpgcheck=1" >> /etc/yum.repos.d/salt-latest.repo
echo "gpgkey=file:///etc/pki/rpm-gpg/saltstack-signing-key" >> /etc/yum.repos.d/salt-latest.repo
+
+ cat > /etc/yum.repos.d/wazuh.repo <<\EOF
+[wazuh_repo]
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://packages.wazuh.com/3.x/yum/
+protect=1
+EOF
else
yum -y install https://repo.saltstack.com/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
+cat > /etc/yum.repos.d/wazuh.repo <<\EOF
+[wazuh_repo]
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
+enabled=1
+name=Wazuh repository
+baseurl=https://packages.wazuh.com/3.x/yum/
+protect=1
+EOF
fi
fi
@@ -632,6 +778,13 @@ saltify() {
mkdir -p /opt/so/gpg
wget --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest/SALTSTACK-GPG-KEY.pub
wget --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg
+ wget --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH
+
+ # Get key and install wazuh
+ curl -s https://packages.wazuh.com/key/GPG-KEY-WAZUH | apt-key add -
+ # Add repo
+ echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list
+
# Initialize the new repos
apt-get update >>~/sosetup.log 2>&1
apt-get -y install salt-minion python-m2crypto >>~/sosetup.log 2>&1
@@ -642,7 +795,9 @@ saltify() {
mkdir $TMP/gpg
scp socore@$MSRV:/opt/so/gpg/* $TMP/gpg
apt-key add $TMP/gpg/SALTSTACK-GPG-KEY.pub
+ apt-key add $TMP/gpg/GPG-KEY-WAZUH
echo "deb http://repo.saltstack.com/apt/ubuntu/$UVER/amd64/latest xenial main" > /etc/apt/sources.list.d/saltstack.list
+ echo "deb https://packages.wazuh.com/3.x/apt/ stable main" | tee /etc/apt/sources.list.d/wazuh.list
# Initialize the new repos
apt-get update >>~/sosetup.log 2>&1
apt-get -y install salt-minion python-m2crypto >>~/sosetup.log 2>&1
@@ -815,7 +970,9 @@ set_updates() {
echo "MASTERUPDATES is $MASTERUPDATES"
if [ $MASTERUPDATES == 'MASTER' ]; then
if [ $OS == 'centos' ]; then
+ if ! grep -q $MSRV /etc/yum.conf; then
echo "proxy=http://$MSRV:3142" >> /etc/yum.conf
+ fi
else
@@ -923,6 +1080,16 @@ whiptail_check_exitstatus() {
}
+whiptail_cur_close_days() {
+
+ CURCLOSEDAYS=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Please specify the threshold (in days) at which Elasticsearch indices will be closed" 10 60 $CURCLOSEDAYS 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
whiptail_homenet_master() {
# Ask for the HOME_NET on the master
@@ -970,6 +1137,18 @@ whiptail_install_type() {
}
+whiptail_log_size_limit() {
+
+ LOG_SIZE_LIMIT=$(whiptail --title "Security Onion Setup" --inputbox \
+ "Please specify the amount of disk space (in GB) you would like to allocate for Elasticsearch data storage. \
+ By default, this is set to 85% of the disk space allotted for /nsm." 10 60 $LOG_SIZE_LIMIT 3>&1 1>&2 2>&3)
+
+ local exitstatus=$?
+ whiptail_check_exitstatus $exitstatus
+
+}
+
+
whiptail_management_nic() {
MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 78 12 ${NICS[@]} 3>&1 1>&2 2>&3 )
@@ -1347,6 +1526,8 @@ if (whiptail_you_sure); then
# Last Chance to back out
whiptail_make_changes
+ generate_passwords
+ auth_pillar
clear_master
mkdir -p /nsm
get_filesystem_root
@@ -1455,7 +1636,7 @@ if (whiptail_you_sure); then
sensor_pillar
saltify
docker_install
- configure_minion SENSOR
+ configure_minion sensor
copy_minion_pillar sensors
salt_firstcheckin
# Accept the Salt Key
@@ -1499,11 +1680,15 @@ if (whiptail_you_sure); then
NSMSETUP=BASIC
NIDS=Suricata
BROVERSION=ZEEK
+ CURCLOSEDAYS=30
whiptail_make_changes
+ generate_passwords
+ auth_pillar
clear_master
mkdir -p /nsm
get_filesystem_root
get_filesystem_nsm
+ get_log_size_limit
get_main_ip
# Add the user so we can sit back and relax
echo ""
@@ -1544,6 +1729,8 @@ if (whiptail_you_sure); then
whiptail_management_server
whiptail_master_updates
set_updates
+ get_log_size_limit
+ CURCLOSEDAYS=30
es_heapsize
ls_heapsize
whiptail_node_advanced
@@ -1554,6 +1741,8 @@ if (whiptail_you_sure); then
whiptail_node_ls_pipline_batchsize
whiptail_node_ls_input_threads
whiptail_node_ls_input_batch_count
+ whiptail_cur_close_days
+ whiptail_log_size_limit
else
NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE
NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE