Merge remote-tracking branch 'remotes/origin/dev' into issue/1049

This commit is contained in:
m0duspwnens
2020-08-07 10:27:11 -04:00
92 changed files with 569 additions and 297 deletions

View File

@@ -1,11 +1,11 @@
{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
{% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %}
{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
{% set ZEEKVER = salt['pillar.get']('static:zeekversion', 'COMMUNITY') %}
{% set ZEEKVER = salt['pillar.get']('global:zeekversion', 'COMMUNITY') %}
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
eval:

View File

@@ -1,7 +1,12 @@
{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'minio') %}
logstash:
pipelines:
manager:
config:
- so/0009_input_beats.conf
- so/0010_input_hhbeats.conf
{%- if PIPELINE == "minio"%}
- so/9998_output_minio.conf.jinja
{%- else %}
- so/9999_output_redis.conf.jinja
{%- endif %}

View File

@@ -1,8 +1,13 @@
{%- set PIPELINE = salt['pillar.get']('global:pipeline', 'minio') %}
logstash:
pipelines:
search:
config:
{%- if PIPELINE == "minio"%}
- so/0899_input_minio.conf.jinja
{%- else %}
- so/0900_input_redis.conf.jinja
{%- endif %}
- so/9000_output_zeek.conf.jinja
- so/9002_output_import.conf.jinja
- so/9034_output_syslog.conf.jinja

View File

@@ -14,14 +14,14 @@ base:
- elasticsearch.search
'*_sensor':
- static
- global
- zeeklogs
- healthcheck.sensor
- minions.{{ grains.id }}
'*_manager or *_managersearch':
- match: compound
- static
- global
- data.*
- secrets
- minions.{{ grains.id }}
@@ -36,7 +36,7 @@ base:
- secrets
- healthcheck.eval
- elasticsearch.eval
- static
- global
- minions.{{ grains.id }}
'*_standalone':
@@ -48,20 +48,20 @@ base:
- zeeklogs
- secrets
- healthcheck.standalone
- static
- global
- minions.{{ grains.id }}
'*_node':
- static
- global
- minions.{{ grains.id }}
'*_heavynode':
- static
- global
- zeeklogs
- minions.{{ grains.id }}
'*_helix':
- static
- global
- fireeye
- zeeklogs
- logstash
@@ -69,13 +69,13 @@ base:
- minions.{{ grains.id }}
'*_fleet':
- static
- global
- data.*
- secrets
- minions.{{ grains.id }}
'*_searchnode':
- static
- global
- logstash
- logstash.search
- elasticsearch.search

View File

@@ -20,7 +20,7 @@
{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
{{ append_containers('manager', 'grafana', 0) }}
{{ append_containers('static', 'fleet_manager', 0) }}
{{ append_containers('global', 'fleet_manager', 0) }}
{{ append_containers('manager', 'wazuh', 0) }}
{{ append_containers('manager', 'thehive', 0) }}
{{ append_containers('manager', 'playbook', 0) }}
@@ -29,11 +29,11 @@
{% endif %}
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
{{ append_containers('static', 'strelka', 0) }}
{{ append_containers('global', 'strelka', 0) }}
{% endif %}
{% if role in ['heavynode', 'standalone'] %}
{{ append_containers('static', 'zeekversion', 'SURICATA') }}
{{ append_containers('global', 'zeekversion', 'SURICATA') }}
{% endif %}
{% if role == 'searchnode' %}
@@ -41,5 +41,5 @@
{% endif %}
{% if role == 'sensor' %}
{{ append_containers('static', 'zeekversion', 'SURICATA') }}
{{ append_containers('global', 'zeekversion', 'SURICATA') }}
{% endif %}

View File

@@ -76,6 +76,7 @@ if [ $MANAGERCHECK != 'so-helix' ]; then
"so-kibana:$VERSION" \
"so-kratos:$VERSION" \
"so-logstash:$VERSION" \
"so-minio:$VERSION" \
"so-mysql:$VERSION" \
"so-nginx:$VERSION" \
"so-pcaptools:$VERSION" \

View File

@@ -14,7 +14,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%}
. /usr/sbin/so-common
SKIP=0

View File

@@ -29,9 +29,9 @@ manager_check() {
}
manager_check
VERSION=$(grep soversion $local_salt_dir/pillar/static.sls | cut -d':' -f2|sed 's/ //g')
# Modify static.sls to enable Features
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/static.sls
VERSION=$(grep soversion $local_salt_dir/pillar/global.sls | cut -d':' -f2|sed 's/ //g')
# Modify global.sls to enable Features
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/global.sls
SUFFIX="-features"
TRUSTED_CONTAINERS=( \
"so-elasticsearch:$VERSION$SUFFIX" \

View File

@@ -16,9 +16,9 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{%- set MANAGERIP = salt['pillar.get']('static:managerip') -%}
{% set VERSION = salt['pillar.get']('global:soversion') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%}
. /usr/sbin/so-common

View File

@@ -1,8 +1,8 @@
#!/bin/bash
#
# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
# {%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', '') %}
# {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%}
# {%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', '') %}
# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC

View File

@@ -111,6 +111,7 @@ update_dockers() {
"so-kibana" \
"so-kratos" \
"so-logstash" \
"so-minio" \
"so-mysql" \
"so-nginx" \
"so-pcaptools" \
@@ -159,7 +160,7 @@ update_version() {
# Update the version to the latest
echo "Updating the Security Onion version file."
echo $NEWVERSION > /etc/soversion
sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/static.sls
sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/global.sls
}
upgrade_check() {

View File

@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %}
# Curator

View File

@@ -1,4 +1,4 @@
{%- set FLEETSETUP = salt['pillar.get']('static:fleetsetup', '0') -%}
{%- set FLEETSETUP = salt['pillar.get']('global:fleetsetup', '0') -%}
{%- if FLEETSETUP != 0 %}
launcherpkg:

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
# Create the group
dstatsgroup:

View File

@@ -21,7 +21,7 @@ run_every:
# ElastAlert will buffer results from the most recent
# period of time, in case some log sources are not in real time
buffer_time:
minutes: 1
minutes: 5
# The maximum time between queries for ElastAlert to start at the most recently
# run query. When ElastAlert starts, for each rule, it will search elastalert_metadata
@@ -38,7 +38,7 @@ es_host: {{ esip }}
es_port: {{ esport }}
# Sets timeout for connecting to and reading from es_host
es_conn_timeout: 60
es_conn_timeout: 55
# The maximum number of documents that will be downloaded from Elasticsearch in
# a single query. The default is 10,000, and if you expect to get near this number,

View File

@@ -16,7 +16,7 @@ class PlaybookESAlerter(Alerter):
today = strftime("%Y.%m.%d", gmtime())
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
headers = {"Content-Type": "application/json"}
payload = {"rule.name": self.rule['play_title'],"event.severity": self.rule['event.severity'],"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"event.module": self.rule['event.module'],"event.dataset": self.rule['event.dataset'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"rule.category": self.rule['rule.category'],"data": match, "@timestamp": timestamp}
payload = {"rule.name": self.rule['play_title'],"event.severity": self.rule['event.severity'],"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"event.module": self.rule['event.module'],"event.dataset": self.rule['event.dataset'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"rule.category": self.rule['rule.category'],"alert_data": match, "@timestamp": timestamp}
url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
requests.post(url, data=json.dumps(payload), headers=headers, verify=False)

View File

@@ -1,6 +1,6 @@
{% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set es = salt['pillar.get']('global:managerip', '') %}
{% set hivehost = salt['pillar.get']('global:managerip', '') %}
{% set hivekey = salt['pillar.get']('global:hivekey', '') %}
{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward Suricata alerts from Security Onion to a specified TheHive instance.
@@ -8,14 +8,10 @@
es_host: {{es}}
es_port: 9200
name: Suricata-Alert
type: frequency
type: any
index: "*:so-ids-*"
num_events: 1
timeframe:
minutes: 10
buffer_time:
minutes: 10
allow_buffer_time_overlap: true
minutes: 5
query_key: ["rule.uuid","source.ip","destination.ip"]
realert:
days: 1

View File

@@ -1,6 +1,6 @@
{% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set es = salt['pillar.get']('global:managerip', '') %}
{% set hivehost = salt['pillar.get']('global:managerip', '') %}
{% set hivekey = salt['pillar.get']('global:hivekey', '') %}
{% set MANAGER = salt['pillar.get']('manager:url_base', '') %}
# Elastalert rule to forward high level Wazuh alerts from Security Onion to a specified TheHive instance.
@@ -8,14 +8,10 @@
es_host: {{es}}
es_port: 9200
name: Wazuh-Alert
type: frequency
type: any
index: "*:so-ossec-*"
num_events: 1
timeframe:
minutes: 10
buffer_time:
minutes: 10
allow_buffer_time_overlap: true
minutes: 5
realert:
days: 1
filter:

View File

@@ -12,8 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}

View File

@@ -12,8 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}

View File

@@ -6,11 +6,11 @@
{%- set HOSTNAME = salt['grains.get']('host', '') %}
{%- set ZEEKVER = salt['pillar.get']('static:zeekversion', 'COMMUNITY') %}
{%- set WAZUHENABLED = salt['pillar.get']('static:wazuh', '0') %}
{%- set ZEEKVER = salt['pillar.get']('global:zeekversion', 'COMMUNITY') %}
{%- set WAZUHENABLED = salt['pillar.get']('global:wazuh', '0') %}
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
name: {{ HOSTNAME }}

View File

@@ -11,10 +11,10 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}
{% set FEATURES = "-features" %}

View File

@@ -15,6 +15,7 @@ role:
- {{ portgroups.mysql }}
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.influxdb }}
- {{ portgroups.fleet_api }}
- {{ portgroups.cortex }}
@@ -38,6 +39,7 @@ role:
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:
@@ -99,6 +101,7 @@ role:
- {{ portgroups.mysql }}
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.influxdb }}
- {{ portgroups.fleet_api }}
- {{ portgroups.cortex }}
@@ -122,6 +125,7 @@ role:
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:
@@ -180,6 +184,7 @@ role:
- {{ portgroups.mysql }}
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.influxdb }}
- {{ portgroups.fleet_api }}
- {{ portgroups.cortex }}
@@ -203,6 +208,7 @@ role:
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:
@@ -261,6 +267,7 @@ role:
- {{ portgroups.mysql }}
- {{ portgroups.kibana }}
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.influxdb }}
- {{ portgroups.fleet_api }}
- {{ portgroups.cortex }}
@@ -284,6 +291,7 @@ role:
search_node:
portgroups:
- {{ portgroups.redis }}
- {{ portgroups.minio }}
- {{ portgroups.elasticsearch_node }}
self:
portgroups:

View File

@@ -45,6 +45,9 @@ firewall:
kibana:
tcp:
- 5601
minio:
tcp:
- 9595
mysql:
tcp:
- 3306

View File

@@ -1,10 +1,10 @@
{% set MANAGER = salt['grains.get']('master') %}
{% set ENROLLSECRET = salt['pillar.get']('secrets:fleet_enroll-secret') %}
{% set CURRENTPACKAGEVERSION = salt['pillar.get']('static:fleet_packages-version') %}
{% set VERSION = salt['pillar.get']('static:soversion') %}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node') -%}
{% set CURRENTPACKAGEVERSION = salt['pillar.get']('global:fleet_packages-version') %}
{% set VERSION = salt['pillar.get']('global:soversion') %}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node') -%}
{% if CUSTOM_FLEET_HOSTNAME != None and CUSTOM_FLEET_HOSTNAME != '' %}
{% set HOSTNAME = CUSTOM_FLEET_HOSTNAME %}

View File

@@ -1,4 +1,4 @@
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %}
so/fleet:
event.send:

View File

@@ -1,8 +1,8 @@
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
{%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%}
{%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set FLEETARCH = salt['grains.get']('role') %}
@@ -10,7 +10,7 @@
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% else %}
{% set MAINIP = salt['pillar.get']('static:managerip') %}
{% set MAINIP = salt['pillar.get']('global:managerip') %}
{% endif %}
include:

View File

@@ -1,8 +1,8 @@
{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%}
{%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
{%- set FLEETHOSTNAME = salt['pillar.get']('global:fleet_hostname', False) -%}
{%- set FLEETIP = salt['pillar.get']('global:fleet_ip', False) -%}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %}
{% if CUSTOM_FLEET_HOSTNAME != (None and '') %}

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
# Create the user
fservergroup:

View File

@@ -1,4 +1,4 @@
{%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
{%- set MANAGER = salt['pillar.get']('global:managerip', '') %}
apiVersion: 1
deleteDatasources:

View File

@@ -1,7 +1,7 @@
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}

View File

@@ -12,8 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
# IDSTools Setup
idstoolsdir:

View File

@@ -1,7 +1,7 @@
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-eval', 'so-standalone'] and GRAFANA == 1 %}

View File

@@ -1,6 +1,6 @@
#!/bin/bash
# {%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('static:fleet_node', False) -%}
# {%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%}
# {%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
KIBANA_VERSION="7.6.1"

View File

@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
{% if FEATURES %}

View File

@@ -12,8 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
@@ -127,7 +127,7 @@ importdir:
# Create the logstash data directory
nsmlsdir:
file.directory:
- name: /nsm/logstash
- name: /nsm/logstash/tmp
- user: 931
- group: 939
- makedirs: True
@@ -148,6 +148,7 @@ so-logstash:
- user: logstash
- environment:
- LS_JAVA_OPTS=-Xms{{ lsheap }} -Xmx{{ lsheap }}
- SSL_CERT_FILE=/etc/ssl/certs/ca.crt
- port_bindings:
{% for BINDING in DOCKER_OPTIONS.port_bindings %}
- {{ BINDING }}
@@ -166,6 +167,7 @@ so-logstash:
- /etc/pki/filebeat.crt:/usr/share/logstash/filebeat.crt:ro
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro
- /etc/ssl/certs/intca.crt:/etc/ssl/certs/ca.crt:ro
{%- if grains['role'] == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro
- /nsm/suricata:/suricata:ro

View File

@@ -0,0 +1,23 @@
{%- if grains.role == 'so-heavynode' %}
{%- set MANAGER = salt['grains.get']('host') %}
{%- else %}
{%- set MANAGER = salt['grains.get']('master') %}
{% endif -%}
{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %}
{%- set access_key = salt['pillar.get']('minio:access_key', '') %}
{%- set access_secret = salt['pillar.get']('minio:access_secret', '') %}
{%- set INTERVAL = salt['pillar.get']('s3_settings:interval', 5) %}
input {
s3 {
access_key_id => "{{ access_key }}"
secret_access_key => "{{ access_secret }}"
endpoint => "https://{{ MANAGER }}:9595"
bucket => "logstash"
delete => true
interval => {{ INTERVAL }}
codec => json
additional_settings => {
"force_path_style" => true
}
}
}

View File

@@ -1,7 +1,7 @@
{%- if grains.role == 'so-heavynode' %}
{%- set MANAGER = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- else %}
{%- set MANAGER = salt['pillar.get']('static:managerip', '') %}
{%- set MANAGER = salt['pillar.get']('global:managerip', '') %}
{% endif -%}
{%- set THREADS = salt['pillar.get']('logstash_settings:ls_input_threads', '') %}

View File

@@ -0,0 +1,24 @@
{%- set MANAGER = salt['grains.get']('master') %}
{%- set access_key = salt['pillar.get']('minio:access_key', '') %}
{%- set access_secret = salt['pillar.get']('minio:access_secret', '') %}
{%- set SIZE_FILE = salt['pillar.get']('s3_settings:size_file', 2048) %}
{%- set TIME_FILE = salt['pillar.get']('s3_settings:time_file', 1) %}
{%- set UPLOAD_QUEUE_SIZE = salt['pillar.get']('s3_settings:upload_queue_size', 4) %}
{%- set ENCODING = salt['pillar.get']('s3_settings:encoding', 'gzip') %}
output {
s3 {
access_key_id => "{{ access_key }}"
secret_access_key => "{{ access_secret}}"
endpoint => "https://{{ MANAGER }}:9595"
bucket => "logstash"
size_file => {{ SIZE_FILE }}
time_file => {{ TIME_FILE }}
codec => json
encoding => {{ ENCODING }}
upload_queue_size => {{ UPLOAD_QUEUE_SIZE }}
temporary_directory => "/usr/share/logstash/data/tmp"
additional_settings => {
"force_path_style" => true
}
}
}

View File

@@ -1,4 +1,4 @@
{% set MANAGER = salt['pillar.get']('static:managerip', '') %}
{% set MANAGER = salt['pillar.get']('global:managerip', '') %}
{% set BATCH = salt['pillar.get']('logstash_settings:ls_pipeline_batch_size', 125) %}
output {
redis {

View File

@@ -12,10 +12,10 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set managerproxy = salt['pillar.get']('static:managerupdate', '0') %}
{% set managerproxy = salt['pillar.get']('global:managerupdate', '0') %}
socore_own_saltstack:
file.directory:

View File

@@ -13,47 +13,47 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set access_key = salt['pillar.get']('manager:access_key', '') %}
{% set access_secret = salt['pillar.get']('manager:access_secret', '') %}
{% set access_key = salt['pillar.get']('minio:access_key', '') %}
{% set access_secret = salt['pillar.get']('minio:access_secret', '') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
# Minio Setup
minioconfdir:
file.directory:
- name: /opt/so/conf/minio/etc
- name: /opt/so/conf/minio/etc/certs
- user: 939
- group: 939
- makedirs: True
miniodatadir:
file.directory:
- name: /nsm/minio/data
- name: /nsm/minio/data/
- user: 939
- group: 939
- makedirs: True
#redisconfsync:
# file.recurse:
# - name: /opt/so/conf/redis/etc
# - source: salt://redis/etc
# - user: 939
# - group: 939
# - template: jinja
logstashbucket:
file.directory:
- name: /nsm/minio/data/logstash
- user: 939
- group: 939
- makedirs: True
minio/minio:
docker_image.present
minio:
so-minio:
docker_container.running:
- image: minio/minio
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-minio:{{ VERSION }}
- hostname: so-minio
- user: socore
- port_bindings:
- 0.0.0.0:9000:9000
- 0.0.0.0:9595:9595
- environment:
- MINIO_ACCESS_KEY: {{ access_key }}
- MINIO_SECRET_KEY: {{ access_secret }}
- binds:
- /nsm/minio/data:/data:rw
- /opt/so/conf/minio/etc:/root/.minio:rw
- entrypoint: "/usr/bin/docker-entrypoint.sh server /data"
- network_mode: so-elastic-net
- /opt/so/conf/minio/etc:/.minio:rw
- /etc/pki/minio.key:/.minio/certs/private.key:ro
- /etc/pki/minio.crt:/.minio/certs/public.crt:ro
- entrypoint: "/usr/bin/docker-entrypoint.sh server --certs-dir /.minio/certs --address :9595 /data"

View File

@@ -1,7 +1,7 @@
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %}
{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set MAINIP = salt['pillar.get']('elasticsearch:mainip') %}
{% set FLEETARCH = salt['grains.get']('role') %}
@@ -10,7 +10,7 @@
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% else %}
{% set MAINIP = salt['pillar.get']('static:managerip') %}
{% set MAINIP = salt['pillar.get']('global:managerip') %}
{% endif %}
# MySQL Setup

View File

@@ -1,7 +1,7 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/

View File

@@ -1,7 +1,7 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/

View File

@@ -1,7 +1,7 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/

View File

@@ -1,7 +1,7 @@
{%- set managerip = salt['pillar.get']('manager:mainip', '') %}
{%- set FLEET_MANAGER = salt['pillar.get']('static:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('static:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('static:fleet_ip', None) %}
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
# For more information on configuration, see:
# * Official English Documentation: http://nginx.org/en/docs/
# * Official Russian Documentation: http://nginx.org/ru/docs/

View File

@@ -1,4 +1,4 @@
{%- set ip = salt['pillar.get']('static:managerip', '') %}
{%- set ip = salt['pillar.get']('global:managerip', '') %}
{
"enterprise_attack_url": "https://raw.githubusercontent.com/mitre/cti/master/enterprise-attack/enterprise-attack.json",

View File

@@ -1,8 +1,8 @@
{% set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) %}
{% set FLEETNODE = salt['pillar.get']('static:fleet_node', False) %}
{% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %}
{% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %}
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
# Drop the correct nginx config based on role
nginxconfdir:

View File

@@ -1,4 +1,4 @@
{%- set ip = salt['pillar.get']('static:managerip', '') -%}
{%- set ip = salt['pillar.get']('global:managerip', '') -%}
#!/bin/bash
default_salt_dir=/opt/so/saltstack/default

File diff suppressed because one or more lines are too long

View File

@@ -13,7 +13,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
# Create the nodered group
noderedgroup:

View File

@@ -1,5 +1,5 @@
{%- set MANAGER = salt['grains.get']('master') -%}
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%}
{%- set CHECKININTERVALMS = salt['pillar.get']('pcap:sensor_checkin_interval_ms', 10000) -%}
{
"logFilename": "/opt/sensoroni/logs/sensoroni.log",

View File

@@ -12,8 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set INTERFACE = salt['pillar.get']('sensor:interface', 'bond0') %}
{% set BPF_STENO = salt['pillar.get']('steno:bpf', None) %}

View File

@@ -1,6 +1,6 @@
{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] %}
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}

View File

@@ -10,7 +10,7 @@ def run():
MINIONID = data['id']
ACTION = data['data']['action']
LOCAL_SALT_DIR = "/opt/so/saltstack/local"
STATICFILE = f"{LOCAL_SALT_DIR}/pillar/static.sls"
STATICFILE = f"{LOCAL_SALT_DIR}/pillar/global.sls"
SECRETSFILE = f"{LOCAL_SALT_DIR}/pillar/secrets.sls"
if MINIONID.split('_')[-1] in ['manager','eval','fleet','managersearch','standalone']:

View File

@@ -12,8 +12,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
# Redis Setup

View File

@@ -1,6 +1,10 @@
{
"title": "Security Onion 2.0.2 RC1 is here!",
"title": "Security Onion 2.0.3 RC1 is here!",
"changes": [
{ "summary": "Resolved an issue with large drives and the ISO install." },
{ "summary": "Modified ISO installation to use Logical Volume Management (LVM) for disk partitioning." },
{ "summary": "Updated Elastic Stack components to version 7.8.1." },
{ "summary": "Updated Zeek to version 3.0.8." },
{ "summary": "Fixed standalone pcap interval issue." },
{ "summary": "<a target='so-github' href='https://github.com/Security-Onion-Solutions/securityonion/issues/1067'>Security Fix 1067:</a> variables.txt from ISO install stays on disk for 10 days." },
{ "summary": "<a target='so-github' href='https://github.com/Security-Onion-Solutions/securityonion/issues/1068'>Security Fix 1068:</a> Remove user values from static.sls." },

View File

@@ -1,5 +1,5 @@
{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') -%}
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%}
{%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') -%}
{
"logFilename": "/opt/sensoroni/logs/sensoroni-server.log",
"server": {
@@ -33,53 +33,47 @@
"mostRecentlyUsedLimit": 5,
"eventFields": {
"default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id", "event.dataset" ],
"bro_conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "service", "log.id.uid" ],
"bro_dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "endpoint", "named_pipe", "operation", "log.id.uid" ],
"bro_dhcp": ["soc_timestamp", "source.ip", "destination.ip", "domain_name", "hostname", "message_types", "log.id.uid" ],
"bro_dnp3": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "fc_reply", "log.id.uid" ],
"bro_dns": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "query", "query_type_name", "rcode_name", "log.id.uid" ],
"bro_dpd": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ],
"bro_files": ["soc_timestamp", "source.ip", "destination.ip", "log.id.flog.id.uid", "mimetype", "source", "log.id.uid" ],
"bro_ftp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ftp_argument", "ftp_command", "reply_code", "log.id.uid", "username" ],
"bro_http": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "virtual_host", "status_code", "status_message", "log.id.uid" ],
"bro_intel": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "indicator", "indicator_type", "seen_where", "log.id.uid" ],
"bro_irc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "irc_command", "log.id.uid", "value" ],
"bro_kerberos": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client", "service", "request_type", "log.id.uid" ],
"bro_modbus": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "function", "log.id.uid" ],
"bro_mysql": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "mysql_argument", "mysql_command", "mysql_success", "response", "log.id.uid" ],
"bro_notice": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "msg", "log.id.uid" ],
"bro_ntlm": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "hostname", "ntlm_success", "server_dns_computer_name", "server_nb_computer_name", "server_tree_name", "log.id.uid" ],
"bro_pe": ["soc_timestamp", "is_64bit", "is_exe", "machine", "os", "subsystem", "log.id.flog.id.uid" ],
"bro_radius": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "username", "framed_addr", "reply_msg", "result" ],
"bro_rdp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "client_build", "client_name", "cookie", "encryption_level", "encryption_method", "keyboard_layout", "result", "security_protocol", "log.id.uid" ],
"bro_rfb": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "authentication_method", "auth", "share_flag", "desktop_name", "log.id.uid" ],
"bro_signatures" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "signature_id", "event_message", "sub_message", "signature_count", "host_count", "log.id.uid" ],
"bro_sip": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "method", "uri", "request_from", "request_to", "response_from", "response_to", "call_id", "subject", "user_agent", "status_code", "log.id.uid" ],
"bro_smb_files" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.flog.id.uid", "action", "path", "name", "size", "prev_name", "log.id.uid" ],
"bro_smb_mapping" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "path", "service", "share_type", "log.id.uid" ],
"bro_smtp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "helo", "mail_from", "recipient_to", "from", "to", "cc", "reply_to", "subject", "useragent", "log.id.uid" ],
"bro_snmp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "community", "version", "log.id.uid" ],
"bro_socks": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid" ],
"bro_software": ["soc_timestamp", "source.ip", "name", "software_type" ],
"bro_ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "version", "hassh", "direction", "client", "server", "log.id.uid" ],
"bro_ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "cipher", "curve", "server_name", "log.id.uid", "validation_status", "version" ],
"bro_syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "facility", "protocol", "severity", "syslog-priority", "log.id.uid" ],
"bro_tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ],
"bro_weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "name", "log.id.uid" ],
"bro_x509": ["soc_timestamp", "certificate_common_name", "certificate_country_code", "certificate_key_length", "issuer_organization", "log.id.id" ],
"cron" : ["soc_timestamp", "message" ],
"anacron": ["soc_timestamp", "message" ],
"bluetoothd": ["soc_timestamp", "message" ],
"firewall": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "protocol", "direction", "interface", "action", "reason" ],
"ntpd" : ["soc_timestamp", "message" ],
"ossec": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "alert_level", "classification", "description", "username", "escalated_user", "location", "process" ],
"pulseaudio": ["soc_timestamp", "message" ],
"snort": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "sid", "alert", "category", "classification", "severity" ],
"su" : ["soc_timestamp", "message" ],
"sudo" : ["soc_timestamp", "message" ],
"systemd": ["soc_timestamp", "message" ],
"sysmon": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "host.name", "event.dataset", "parent_image_path", "source_name", "task", "user.name" ],
"wineventlog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "host.name", "event.code", "event.dataset", "source_name", "task" ]
"::conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "network.protocol", "log.id.uid" ],
"::dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "dce_rpc.endpoint", "dce_rpc.named_pipe", "dce_rpc.operation", "log.id.uid" ],
"::dhcp": ["soc_timestamp", "source.ip", "destination.ip", "host.domain", "host.hostname", "dhcp.message_types", "log.id.uid" ],
"::dnp3": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "dnp3.fc_reply", "log.id.uid" ],
"::dns": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "dns.query.name", "dns.query.type_name", "dns.response.code_name", "log.id.uid" ],
"::dpd": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.protocol", "observer.analyser", "error.reason", "log.id.uid" ],
"::files": ["soc_timestamp", "source.ip", "destination.ip", "file.name", "file.mime_type", "file.source", "file.bytes.total", "log.id.fuid", "log.id.uid" ],
"::ftp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ftp.user", "ftp.command", "ftp.argument", "ftp.reply_code", "file.size", "log.id.uid" ],
"::http": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "http.method", "http.virtual_host", "http.status_code", "http.status_message", "http.request.body.length", "http.response.body.length", "log.id.uid" ],
"::intel": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "intel.indicator", "intel.indicator_type", "intel.seen_where", "log.id.uid" ],
"::irc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "irc.username", "irc.nickname", "irc.command.type", "irc.command.value", "irc.command.info", "log.id.uid" ],
"::kerberos": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "kerberos.client", "kerberos.service", "kerberos.request_type", "log.id.uid" ],
"::modbus": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "modbus.function", "log.id.uid" ],
"::mysql": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "mysql.command", "mysql.argument", "mysql.success", "mysql.response", "log.id.uid" ],
"::notice": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "notice.note", "notice.message", "log.id.fuid", "log.id.uid" ],
"::ntlm": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ntlm.name", "ntlm.success", "ntlm.server.dns.name", "ntlm.server.nb.name", "ntlm.server.tree.name", "log.id.uid" ],
"::pe": ["soc_timestamp", "file.is_64bit", "file.is_exe", "file.machine", "file.os", "file.subsystem", "log.id.fuid" ],
"::radius": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "username", "radius.framed_address", "radius.reply_message", "radius.result" ],
"::rdp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rdp.client_build", "client_name", "rdp.cookie", "rdp.encryption_level", "rdp.encryption_method", "rdp.keyboard_layout", "rdp.result", "rdp.security_protocol", "log.id.uid" ],
"::rfb": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rfb.authentication.method", "rfb.authentication.success", "rfb.share_flag", "rfb.desktop.name", "log.id.uid" ],
"::signatures" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "note", "signature_id", "event_message", "sub_message", "signature_count", "host.count", "log.id.uid" ],
"::sip": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "sip.method", "sip.uri", "sip.request.from", "sip.request.to", "sip.response.from", "sip.response.to", "sip.call_id", "sip.subject", "sip.user_agent", "sip.status_code", "log.id.uid" ],
"::smb_files" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.fuid", "file.action", "file.path", "file.name", "file.size", "file.prev_name", "log.id.uid" ],
"::smb_mapping" : ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "smb.path", "smb.service", "smb.share_type", "log.id.uid" ],
"::smtp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "smtp.helo", "smtp.mail_from", "smtp.recipient_to", "smtp.from", "smtp.to", "smtp.cc", "smtp.reply_to", "smtp.subject", "smtp.useragent", "log.id.uid" ],
"::snmp": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "snmp.community", "snmp.version", "log.id.uid" ],
"::socks": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "socks.name", "socks.request.host", "socks.request.port", "socks.status", "log.id.uid" ],
"::software": ["soc_timestamp", "source.ip", "software.name", "software.type" ],
"::ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssh.version", "ssh.hassh_version", "ssh.direction", "ssh.client", "ssh.server", "log.id.uid" ],
"::ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssl.cipher", "ssl.curve", "ssl.certificate.subject", "ssl.validation_status", "ssl.version", "log.id.uid" ],
"::syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ],
"::tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ],
"::weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "weird.name", "log.id.uid" ],
"::x509": ["soc_timestamp", "x509.certificate.subject", "x509.certificate.key.type", "x509.certificate.key.length", "x509.certificate.issuer", "log.id.id" ],
":firewall:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "direction", "interface", "action", "reason" ],
":osquery:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ],
":ossec:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.name", "rule.level", "rule.category", "process.name", "user.name", "user.escalated", "location", "process.name" ],
":strelka:file": ["soc_timestamp", "scan.exiftool.OriginalFileName", "file.size", "hash.md5", "scan.exiftool.CompanyName", "scan.exiftool.Description", "scan.exiftool.Directory", "scan.exiftool.FileType", "scan.exiftool.FileOS", "log.id.fuid" ],
":suricata:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.gid", "rule.name", "rule.category", "rule.rev", "event.severity", "event.severity_label" ],
":sysmon:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ],
":windows_eventlog:": ["soc_timestamp", "user.name" ]
},
"queries": [
{ "name": "Default Query", "description": "Show all events grouped by the origin host", "query": "* | groupby observer.name"},

View File

@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
socdir:

View File

@@ -1,6 +1,6 @@
{%- set MANAGER = salt['pillar.get']('manager:url_base', '') %}
{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %}
[es]
es_url = http://{{MANAGER}}:9200

View File

@@ -1,4 +1,4 @@
{% set ES = salt['pillar.get']('static:managerip', '') %}
{% set ES = salt['pillar.get']('global:managerip', '') %}
alert: modules.so.playbook-es.PlaybookESAlerter
elasticsearch_host: "{{ ES }}:9200"

View File

@@ -1,6 +1,6 @@
{% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set es = salt['pillar.get']('global:managerip', '') %}
{% set hivehost = salt['pillar.get']('global:managerip', '') %}
{% set hivekey = salt['pillar.get']('global:hivekey', '') %}
alert: hivealerter
hive_connection:

View File

@@ -1,6 +1,6 @@
{% set es = salt['pillar.get']('static:managerip', '') %}
{% set hivehost = salt['pillar.get']('static:managerip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
{% set es = salt['pillar.get']('global:managerip', '') %}
{% set hivehost = salt['pillar.get']('global:managerip', '') %}
{% set hivekey = salt['pillar.get']('global:hivekey', '') %}
alert: hivealerter
hive_connection:

View File

@@ -1,8 +1,8 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{%- set MANAGER_URL = salt['pillar.get']('manager:url_base', '') %}
{%- set MANAGER_IP = salt['pillar.get']('static:managerip', '') %}
{%- set MANAGER_IP = salt['pillar.get']('global:managerip', '') %}
soctopusdir:
file.directory:

View File

@@ -1,11 +1,11 @@
{% set manager = salt['grains.get']('master') %}
{% set managerip = salt['pillar.get']('static:managerip', '') %}
{% set managerip = salt['pillar.get']('global:managerip', '') %}
{% set HOSTNAME = salt['grains.get']('host') %}
{% set global_ca_text = [] %}
{% set global_ca_server = [] %}
{% set MAININT = salt['pillar.get']('host:mainint') %}
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('static:fleet_custom_hostname', None) %}
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %}
{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone'] %}
{% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %}
@@ -181,6 +181,41 @@ regkeyperms:
- mode: 640
- group: 939
/etc/pki/minio.key:
x509.private_key_managed:
- CN: {{ manager }}
- bits: 4096
- days_remaining: 0
- days_valid: 820
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/minio.key') -%}
- prereq:
- x509: /etc/pki/minio.crt
{%- endif %}
# Create a cert for the docker registry
/etc/pki/minio.crt:
x509.certificate_managed:
- ca_server: {{ ca_server }}
- signing_policy: registry
- public_key: /etc/pki/minio.key
- CN: {{ manager }}
- days_remaining: 0
- days_valid: 820
- backup: True
- unless:
# https://github.com/saltstack/salt/issues/52167
# Will trigger 5 days (432000 sec) from cert expiration
- 'enddate=$(date -d "$(openssl x509 -in /etc/pki/minio.crt -enddate -noout | cut -d= -f2)" +%s) ; now=$(date +%s) ; expire_date=$(( now + 432000)); [ $enddate -gt $expire_date ]'
miniokeyperms:
file.managed:
- replace: False
- name: /etc/pki/minio.key
- mode: 640
- group: 939
/etc/pki/managerssl.key:
x509.private_key_managed:
- CN: {{ manager }}

View File

@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
{%- set ip = salt['pillar.get']('static:managerip') %}
{%- set ip = salt['pillar.get']('global:managerip') %}
{%- endif -%}
logging_cfg: '/etc/strelka/logging.yaml'
limits:

View File

@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
{%- set ip = salt['pillar.get']('static:managerip') %}
{%- set ip = salt['pillar.get']('global:managerip') %}
{%- endif -%}
conn:
server: '{{ ip }}:57314'

View File

@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
{%- set ip = salt['pillar.get']('static:managerip') %}
{%- set ip = salt['pillar.get']('global:managerip') %}
{%- endif -%}
server: ":57314"
coordinator:

View File

@@ -2,7 +2,7 @@
{%- set mainint = salt['pillar.get']('sensor:mainint') %}
{%- set ip = salt['grains.get']('ip_interfaces:' ~ mainint[0], salt['pillar.get']('sensor:mainip')) %}
{%- else %}
{%- set ip = salt['pillar.get']('static:managerip') %}
{%- set ip = salt['pillar.get']('global:managerip') %}
{%- endif -%}
coordinator:
addr: '{{ ip }}:6380'

View File

@@ -13,9 +13,9 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{%- set MANAGER = salt['grains.get']('master') %}
{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{%- set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') -%}
# Strelka config

View File

@@ -14,9 +14,9 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{% set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
{% set ZEEKVER = salt['pillar.get']('static:zeekversion', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set ZEEKVER = salt['pillar.get']('global:zeekversion', '') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set BPF_NIDS = salt['pillar.get']('nids:bpf') %}
{% set BPF_STATUS = 0 %}

View File

@@ -11,7 +11,7 @@ HOME_NET: "[{{salt['pillar.get']('sensor:hnsensor')}}]"
{% endload %}
{% else %}
{% load_yaml as homenet %}
HOME_NET: "[{{salt['pillar.get']('static:hnmanager', '')}}]"
HOME_NET: "[{{salt['pillar.get']('global:hnmanager', '')}}]"
{% endload %}
{% endif %}
@@ -44,7 +44,7 @@ HOME_NET: "[{{salt['pillar.get']('static:hnmanager', '')}}]"
{% endfor %}
{% set surimeta_evelog_index = surimeta_evelog_index[0] %}
{% if salt['pillar.get']('static:zeekversion', 'ZEEK') == 'SURICATA' %}
{% if salt['pillar.get']('global:zeekversion', 'ZEEK') == 'SURICATA' %}
{% do suricata_defaults.suricata.config.outputs[default_evelog_index]['eve-log'].types.extend(suricata_meta.suricata.config.outputs[surimeta_evelog_index]['eve-log'].types) %}
{% endif %}

View File

@@ -1,6 +1,6 @@
{% if grains['role'] == 'so-sensor' or grains['role'] == 'so-eval' %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
so-tcpreplay:

View File

@@ -1,6 +1,6 @@
{% set MANAGER = salt['grains.get']('master') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
# Add Telegraf to monitor all the things.
tgraflogdir:

View File

@@ -1,6 +1,6 @@
{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
{%- set HIVEPLAYSECRET = salt['pillar.get']('static:hiveplaysecret', '') %}
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %}
{%- set HIVEPLAYSECRET = salt['pillar.get']('global:hiveplaysecret', '') %}
# Secret Key
# The secret key is used to secure cryptographic functions.

View File

@@ -1,5 +1,5 @@
{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{%- set CORTEXPLAYSECRET = salt['pillar.get']('static:cortexplaysecret', '') %}
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{%- set CORTEXPLAYSECRET = salt['pillar.get']('global:cortexplaysecret', '') %}
# Secret Key
# The secret key is used to secure cryptographic functions.

View File

@@ -1,6 +1,6 @@
{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
thehiveconfdir:
file.directory:

View File

@@ -1,18 +1,18 @@
#!/bin/bash
# {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
# {%- set CORTEXUSER = salt['pillar.get']('static:cortexuser', 'cortexadmin') %}
# {%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', 'cortexchangeme') %}
# {%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %}
# {%- set CORTEXORGNAME = salt['pillar.get']('static:cortexorgname', '') %}
# {%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', 'soadmin') %}
# {%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
# {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %}
# {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %}
# {%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') %}
# {%- set CORTEXORGNAME = salt['pillar.get']('global:cortexorgname', '') %}
# {%- set CORTEXORGUSER = salt['pillar.get']('global:cortexorguser', 'soadmin') %}
# {%- set CORTEXORGUSERKEY = salt['pillar.get']('global:cortexorguserkey', '') %}
default_salt_dir=/opt/so/saltstack/default
cortex_clean(){
sed -i '/^ cortexuser:/d' /opt/so/saltstack/local/pillar/static.sls
sed -i '/^ cortexpassword:/d' /opt/so/saltstack/local/pillar/static.sls
sed -i '/^ cortexorguser:/d' /opt/so/saltstack/local/pillar/static.sls
sed -i '/^ cortexuser:/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/^ cortexpassword:/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/^ cortexorguser:/d' /opt/so/saltstack/local/pillar/global.sls
}
cortex_init(){

View File

@@ -1,12 +1,12 @@
#!/bin/bash
# {%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
# {%- set THEHIVEUSER = salt['pillar.get']('static:hiveuser', 'hiveadmin') %}
# {%- set THEHIVEPASSWORD = salt['pillar.get']('static:hivepassword', 'hivechangeme') %}
# {%- set THEHIVEKEY = salt['pillar.get']('static:hivekey', '') %}
# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
# {%- set THEHIVEUSER = salt['pillar.get']('global:hiveuser', 'hiveadmin') %}
# {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %}
# {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
thehive_clean(){
sed -i '/^ hiveuser:/d' /opt/so/saltstack/local/pillar/static.sls
sed -i '/^ hivepassword:/d' /opt/so/saltstack/local/pillar/static.sls
sed -i '/^ hiveuser:/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/^ hivepassword:/d' /opt/so/saltstack/local/pillar/global.sls
}
thehive_init(){

View File

@@ -1,11 +1,11 @@
{%- set ZEEKVER = salt['pillar.get']('static:zeekversion', '') -%}
{%- set WAZUH = salt['pillar.get']('static:wazuh', '0') -%}
{%- set ZEEKVER = salt['pillar.get']('global:zeekversion', '') -%}
{%- set WAZUH = salt['pillar.get']('global:wazuh', '0') -%}
{%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') -%}
{%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') -%}
{%- set FREQSERVER = salt['pillar.get']('manager:freq', '0') -%}
{%- set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') -%}
{%- set FLEETMANAGER = salt['pillar.get']('static:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
{%- set STRELKA = salt['pillar.get']('strelka:enabled', '0') -%}
{% import_yaml 'salt/minion.defaults.yaml' as salt %}
{% set saltversion = salt.salt.minion.version %}
@@ -142,7 +142,6 @@ base:
- manager
- idstools
- suricata.manager
- redis
{%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
@@ -150,6 +149,8 @@ base:
- wazuh
{%- endif %}
- logstash
- minio
- redis
- kibana
- elastalert
- filebeat
@@ -158,6 +159,7 @@ base:
{%- if FLEETMANAGER or FLEETNODE %}
- fleet
- fleet.install_package
- redis
{%- endif %}
- soctopus
{%- if THEHIVE != 0 %}
@@ -189,7 +191,6 @@ base:
- idstools
- suricata.manager
- healthcheck
- redis
{%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
@@ -197,6 +198,7 @@ base:
- wazuh
{%- endif %}
- logstash
- minio
- kibana
- pcap
- suricata
@@ -312,7 +314,7 @@ base:
- manager
- idstools
- suricata.manager
- redis
- minio
{%- if FLEETMANAGER or FLEETNODE or PLAYBOOK != 0 %}
- mysql
{%- endif %}
@@ -328,6 +330,7 @@ base:
- schedule
{%- if FLEETMANAGER or FLEETNODE %}
- fleet
- redis
- fleet.install_package
{%- endif %}
- soctopus
@@ -351,7 +354,7 @@ base:
- common
- telegraf
- firewall
- redis
- minio
{%- if WAZUH != 0 %}
- wazuh
{%- endif %}
@@ -360,6 +363,7 @@ base:
- filebeat
{%- if FLEETMANAGER or FLEETNODE %}
- fleet.install_package
- redis
{%- endif %}
- pcap
- suricata

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set ip = salt['pillar.get']('static:managerip', '') %}
{%- set ip = salt['pillar.get']('global:managerip', '') %}
{%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
{%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- elif grains['role'] == 'so-sensor' %}

View File

@@ -1,5 +1,5 @@
{%- if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] %}
{%- set ip = salt['pillar.get']('static:managerip', '') %}
{%- set ip = salt['pillar.get']('global:managerip', '') %}
{%- elif grains['role'] == 'so-node' or grains['role'] == 'so-heavynode' %}
{%- set ip = salt['pillar.get']('elasticsearch:mainip', '') %}
{%- elif grains['role'] == 'so-sensor' %}

View File

@@ -1,5 +1,5 @@
{%- set MANAGERIP = salt['pillar.get']('static:managerip', '') %}
{%- set WAZUH_ENABLED = salt['pillar.get']('static:wazuh', '0') %}
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{%- set WAZUH_ENABLED = salt['pillar.get']('global:wazuh', '0') %}
#!/bin/bash
local_salt_dir=/opt/so/saltstack/local

View File

@@ -1,6 +1,6 @@
{%- set HOSTNAME = salt['grains.get']('host', '') %}
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
# Add ossec group
ossecgroup:
@@ -46,6 +46,15 @@ wazuhpkgs:
- hold: True
- update_holds: True
wazuhvarossecdir:
file.directory:
- name: /var/ossec
- user: ossec
- group: ossec
- recurse:
- user
- group
# Add Wazuh agent conf
wazuhagentconf:
file.managed:

View File

@@ -11,6 +11,6 @@ installonly_limit={{ salt['pillar.get']('yum:config:installonly_limit', 2) }}
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
distroverpkg=centos-release
{% if salt['pillar.get']('static:managerupdate', '0') %}
{% if salt['pillar.get']('global:managerupdate', '0') %}
proxy=http://{{ salt['pillar.get']('yum:config:proxy', salt['config.get']('master')) }}:3142
{% endif %}

View File

@@ -1,5 +1,5 @@
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('static:imagerepo') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set BPF_ZEEK = salt['pillar.get']('zeek:bpf', {}) %}
{% set BPF_STATUS = 0 %}

View File

@@ -0,0 +1,77 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TESTING=true
address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
ALLOW_CIDR=0.0.0.0/0
ALLOW_ROLE=a
BASICZEEK=7
BASICSURI=7
# BLOGS=
BNICS=ens6
ZEEKVERSION=ZEEK
# CURCLOSEDAYS=
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=eval-aws
install_type=EVAL
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
MANAGERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=
# MMASK=
MNIC=ens5
# MSEARCH=
# MSRV=
# MTU=
NIDS=Suricata
# NODE_ES_HEAP_SIZE=
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
NODEUPDATES=MANAGER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
# REDIRECTHOST=
REDIRECTINFO=HOSTNAME
RULESETUP=ETOPEN
# SHARDCOUNT=
SKIP_REBOOT=0
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
THEHIVE=1
WAZUH=1
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=0n10nus3r
WEBPASSWD2=0n10nus3r

View File

@@ -0,0 +1,77 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TESTING=true
address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
ALLOW_CIDR=0.0.0.0/0
ALLOW_ROLE=a
BASICZEEK=7
BASICSURI=7
# BLOGS=
BNICS=ens6
ZEEKVERSION=ZEEK
# CURCLOSEDAYS=
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=manager-aws
install_type=MANAGER
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
MANAGERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=
# MMASK=
MNIC=ens5
# MSEARCH=
# MSRV=
# MTU=
NIDS=Suricata
# NODE_ES_HEAP_SIZE=
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
NODEUPDATES=MANAGER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
# REDIRECTHOST=
REDIRECTINFO=HOSTNAME
RULESETUP=ETOPEN
# SHARDCOUNT=
SKIP_REBOOT=0
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
THEHIVE=1
WAZUH=1
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=0n10nus3r
WEBPASSWD2=0n10nus3r

View File

@@ -34,7 +34,7 @@ GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
HOSTNAME=standalone-aws
install_type=STANDALONE
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=

View File

@@ -27,7 +27,7 @@ accept_salt_key_remote() {
echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1
# Delete the key just in case.
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y
salt-call state.apply ca
salt-call state.apply ca >> /dev/null 2>&1
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -a "$MINION_ID" -y
}
@@ -743,6 +743,7 @@ docker_seed_registry() {
"so-grafana:$VERSION" \
"so-influxdb:$VERSION" \
"so-kibana:$VERSION" \
"so-minio:$VERSION" \
"so-mysql:$VERSION" \
"so-pcaptools:$VERSION" \
"so-playbook:$VERSION" \
@@ -960,8 +961,8 @@ manager_pillar() {
cat "$pillar_file" >> "$setup_log" 2>&1
}
manager_static() {
local static_pillar="$local_salt_dir/pillar/static.sls"
manager_global() {
local global_pillar="$local_salt_dir/pillar/global.sls"
if [ -z "$SENSOR_CHECKIN_INTERVAL_MS" ]; then
SENSOR_CHECKIN_INTERVAL_MS=10000
@@ -970,9 +971,9 @@ manager_static() {
fi
fi
# Create a static file for global values
# Create a global file for global values
printf '%s\n'\
"static:"\
"global:"\
" soversion: $SOVERSION"\
" hnmanager: $HNMANAGER"\
" ntpserver: $NTPSERVER"\
@@ -1002,6 +1003,7 @@ manager_static() {
" wazuh: $WAZUH"\
" managerupdate: $MANAGERUPDATES"\
" imagerepo: $IMAGEREPO"\
" pipeline: minio"\
"pcap:"\
" sensor_checkin_interval_ms: $SENSOR_CHECKIN_INTERVAL_MS"\
"strelka:"\
@@ -1071,10 +1073,19 @@ manager_static() {
" shards: 5"\
" warm: 7"\
" close: 365"\
" delete: 45" > "$static_pillar"
" delete: 45"\
"minio:"\
" access_key: $ACCESS_KEY"\
" access_secret: $ACCESS_SECRET"\
"s3_settings:"\
" size_file: 2048"\
" time_file: 1"\
" upload_queue_size: 4"\
" encoding: gzip"\
" interval: 5" > "$global_pillar"
printf '%s\n' '----' >> "$setup_log" 2>&1
cat "$static_pillar" >> "$setup_log" 2>&1
cat "$global_pillar" >> "$setup_log" 2>&1
}
minio_generate_keys() {
@@ -1474,10 +1485,6 @@ sensor_pillar() {
if [ "$HNSENSOR" != 'inherit' ]; then
echo " hnsensor: $HNSENSOR" >> "$pillar_file"
fi
printf '%s\n'\
" access_key: $ACCESS_KEY"\
" access_secret: $ACCESS_SECRET"\
"" >> "$pillar_file"
printf '%s\n' '----' >> "$setup_log" 2>&1
cat "$pillar_file" >> "$setup_log" 2>&1
@@ -1553,13 +1560,13 @@ set_initial_firewall_policy() {
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
case "$install_type" in
'EVAL')
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE True
$default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" True
;;
'MANAGERSEARCH')
$default_salt_dir/pillar/data/addtotab.sh managersearchtab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'STANDALONE')
$default_salt_dir/pillar/data/addtotab.sh standalonetab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE
$default_salt_dir/pillar/data/addtotab.sh standalonetab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE"
;;
esac
;;
@@ -1573,7 +1580,7 @@ set_initial_firewall_policy() {
case "$install_type" in
'SENSOR')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE"
;;
'SEARCHNODE')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
@@ -1582,7 +1589,7 @@ set_initial_firewall_policy() {
'HEAVYNODE')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" $INTERFACE
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'FLEET')

View File

@@ -463,8 +463,9 @@ fi
set_progress_str 11 'Updating sudoers file for soremote user'
update_sudoers >> $setup_log 2>&1
set_progress_str 12 'Generating manager static pillar'
manager_static >> $setup_log 2>&1
set_progress_str 12 'Generating manager global pillar'
minio_generate_keys
manager_global >> $setup_log 2>&1
set_progress_str 13 'Generating manager pillar'
manager_pillar >> $setup_log 2>&1
@@ -619,7 +620,7 @@ fi
if [[ $is_fleet_standalone && $FLEETCUSTOMHOSTNAME != '' ]]; then
set_progress_str 77 "$(print_salt_state_apply 'fleet.event_update-custom-hostname')"
pillar_override="{\"static\":{\"fleet_custom_hostname\": \"$FLEETCUSTOMHOSTNAME\"}}"
pillar_override="{\"global\":{\"fleet_custom_hostname\": \"$FLEETCUSTOMHOSTNAME\"}}"
salt-call state.apply -l info fleet.event_update-custom-hostname pillar="$pillar_override" >> $setup_log 2>&1
fi