Merge branch 'dev' into feature/soctopus-conf

This commit is contained in:
William Wernert
2020-04-01 13:35:44 -04:00
26 changed files with 1039 additions and 623 deletions

View File

@@ -61,3 +61,5 @@ peer:
reactor: reactor:
- 'so/fleet': - 'so/fleet':
- salt://reactor/fleet.sls - salt://reactor/fleet.sls
- 'salt/beacon/*/zeek/':
- salt://reactor/zeek.sls

View File

@@ -1,5 +1,5 @@
healthcheck: healthcheck:
enabled: False enabled: False
schedule: 10 schedule: 60
checks: checks:
- zeek - zeek

View File

@@ -1,5 +1,5 @@
healthcheck: healthcheck:
enabled: False enabled: False
schedule: 10 schedule: 60
checks: checks:
- zeek - zeek

33
salt/_beacons/zeek.py Normal file
View File

@@ -0,0 +1,33 @@
import logging
def status():
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl status'"
retval = __salt__['docker.run']('so-zeek', cmd)
logging.debug('zeekctl_module: zeekctl.status retval: %s' % retval)
return retval
def beacon(config):
retval = []
is_enabled = __salt__['healthcheck.is_enabled']()
logging.debug('zeek_beacon: healthcheck_is_enabled: %s' % is_enabled)
if is_enabled:
zeekstatus = status().lower().split(' ')
logging.debug('zeek_beacon: zeekctl.status: %s' % str(zeekstatus))
if 'stopped' in zeekstatus or 'crashed' in zeekstatus or 'error' in zeekstatus or 'error:' in zeekstatus:
zeek_restart = True
else:
zeek_restart = False
__salt__['telegraf.send']('healthcheck zeek_restart=%s' % str(zeek_restart))
retval.append({'zeek_restart': zeek_restart})
logging.info('zeek_beacon: retval: %s' % str(retval))
return retval

View File

@@ -3,50 +3,60 @@
import logging import logging
import sys import sys
allowed_functions = ['zeek'] allowed_functions = ['is_enabled,zeek']
states_to_apply = [] states_to_apply = []
def apply_states(states=''): def apply_states(states=''):
calling_func = sys._getframe().f_back.f_code.co_name calling_func = sys._getframe().f_back.f_code.co_name
logging.debug('healthcheck module: apply_states function caller: %s' % calling_func) logging.debug('healthcheck_module: apply_states function caller: %s' % calling_func)
if not states: if not states:
states = ','.join(states_to_apply) states = ','.join(states_to_apply)
if states: if states:
logging.info('healthcheck module: apply_states states: %s' % str(states)) logging.info('healthcheck_module: apply_states states: %s' % str(states))
__salt__['state.apply'](states) __salt__['state.apply'](states)
def docker_restart(container): def docker_stop(container):
try: try:
stopdocker = __salt__['docker.rm'](container, 'stop=True') stopdocker = __salt__['docker.rm'](container, 'stop=True')
except Exception as e: except Exception as e:
logging.error('healthcheck module: %s' % e) logging.error('healthcheck_module: %s' % e)
def is_enabled():
if __salt__['pillar.get']('healthcheck:enabled', 'False'):
retval = True
else:
retval = False
return retval
def run(checks=''): def run(checks=''):
retval = [] retval = []
calling_func = sys._getframe().f_back.f_code.co_name calling_func = sys._getframe().f_back.f_code.co_name
logging.debug('healthcheck module: run function caller: %s' % calling_func) logging.debug('healthcheck_module: run function caller: %s' % calling_func)
if checks: if checks:
checks = checks.split(',') checks = checks.split(',')
else: else:
checks = __salt__['pillar.get']('healthcheck:checks', {}) checks = __salt__['pillar.get']('healthcheck:checks', {})
logging.debug('healthcheck module: run checks to be run: %s' % str(checks)) logging.debug('healthcheck_module: run checks to be run: %s' % str(checks))
for check in checks: for check in checks:
if check in allowed_functions: if check in allowed_functions:
retval.append(check) retval.append(check)
check = getattr(sys.modules[__name__], check) check = getattr(sys.modules[__name__], check)
check() check()
else: else:
logging.warning('healthcheck module: attempted to run function %s' % check) logging.warning('healthcheck_module: attempted to run function %s' % check)
# If you want to apply states at the end of the run, # If you want to apply states at the end of the run,
# be sure to append the state name to states_to_apply[] # be sure to append the state name to states_to_apply[]
@@ -58,19 +68,23 @@ def run(checks=''):
def zeek(): def zeek():
calling_func = sys._getframe().f_back.f_code.co_name calling_func = sys._getframe().f_back.f_code.co_name
logging.debug('healthcheck module: zeek function caller: %s' % calling_func) logging.info('healthcheck_module: zeek function caller: %s' % calling_func)
retval = []
retcode = __salt__['zeekctl.status'](verbose=False) retcode = __salt__['zeekctl.status'](verbose=False)
logging.debug('zeekctl.status retcode: %i' % retcode) logging.info('healthcheck_module: zeekctl.status retcode: %i' % retcode)
if retcode: if retcode:
docker_restart('so-zeek') zeek_restart = True
states_to_apply.append('zeek') if calling_func != 'beacon':
zeek_restarted = True docker_stop('so-zeek')
states_to_apply.append('zeek')
else: else:
zeek_restarted = False zeek_restart = False
if calling_func == 'execute': if calling_func == 'execute' and zeek_restart:
apply_states() apply_states()
retval.append({'zeek_restart': zeek_restart})
__salt__['telegraf.send']('healthcheck zeek_restarted=%s' % str(zeek_restarted)) __salt__['telegraf.send']('healthcheck zeek_restart=%s' % str(zeek_restart))
return 'zeek_restarted: %s' % str(zeek_restarted) return retval

View File

@@ -1,5 +1,7 @@
#!py #!py
import logging
def capstats(interval=10): def capstats(interval=10):
@@ -140,7 +142,7 @@ def status(verbose=True):
retval = __salt__['docker.run']('so-zeek', cmd) retval = __salt__['docker.run']('so-zeek', cmd)
if not verbose: if not verbose:
retval = __context__['retcode'] retval = __context__['retcode']
logging.info('zeekctl_module: zeekctl.status_NOTVERBOSE retval: %s' % retval)
return retval return retval

View File

@@ -516,7 +516,7 @@
} }
], ],
"thresholds": "5,10", "thresholds": "5,10",
"title": "{{ SERVERNAME }} - Zeek Packet Loss", "title": "{{ SERVERNAME }} -Zeek Packet Loss",
"type": "singlestat", "type": "singlestat",
"valueFontSize": "80%", "valueFontSize": "80%",
"valueMaps": [ "valueMaps": [
@@ -772,6 +772,130 @@
], ],
"valueName": "current" "valueName": "current"
}, },
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 5,
"w": 8,
"x": 0,
"y": 29
},
"hiddenSeries": false,
"id": 37,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "healthcheck",
"orderByTime": "ASC",
"policy": "autogen",
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"zeek_restart"
],
"type": "field"
},
{
"params": [],
"type": "last"
}
]
],
"tags": [
{
"key": "host",
"operator": "=",
"value": "{{ SERVERNAME }}"
}
]
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Zeek Restarts",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{ {
"aliasColors": { "aliasColors": {
"Interrupt": "#70DBED", "Interrupt": "#70DBED",

View File

@@ -2169,6 +2169,130 @@
], ],
"valueName": "current" "valueName": "current"
}, },
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "InfluxDB",
"fill": 1,
"fillGradient": 0,
"gridPos": {
"h": 5,
"w": 8,
"x": 0,
"y": 29
},
"hiddenSeries": false,
"id": 37,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "connected",
"options": {
"dataLinks": []
},
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"measurement": "healthcheck",
"orderByTime": "ASC",
"policy": "autogen",
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"zeek_restart"
],
"type": "field"
},
{
"params": [],
"type": "last"
}
]
],
"tags": [
{
"key": "host",
"operator": "=",
"value": "{{ SERVERNAME }}"
}
]
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Zeek Restarts",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
},
{ {
"aliasColors": { "aliasColors": {
"Buffered": "#6ED0E0", "Buffered": "#6ED0E0",

View File

@@ -83,10 +83,6 @@ docker:
service.running: service.running:
- enable: True - enable: True
salt-minion:
service.running:
- enable: True
# Drop the correct nginx config based on role # Drop the correct nginx config based on role
nginxconfdir: nginxconfdir:

View File

@@ -0,0 +1,26 @@
{
"description" : "osquery",
"processors" : [
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{
"script": {
"lang": "painless",
"source": "def dict = ['result': new HashMap()]; for (entry in ctx['message2'].entrySet()) { dict['result'][entry.getKey()] = entry.getValue(); } ctx['osquery'] = dict; "
}
},
{ "rename": { "field": "osquery.result.hostIdentifier", "target_field": "osquery.result.host_identifier", "ignore_missing": true } },
{ "rename": { "field": "osquery.result.calendarTime", "target_field": "osquery.result.calendar_time", "ignore_missing": true } },
{ "rename": { "field": "osquery.result.unixTime", "target_field": "osquery.result.unix_time", "ignore_missing": true } },
{ "json": { "field": "message", "target_field": "message3", "ignore_failure": true } },
{ "rename": { "field": "message3.columns.username", "target_field": "user.name", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.uid", "target_field": "user.uid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.gid", "target_field": "user.gid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.shell", "target_field": "user.shell", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.cmdline", "target_field": "process.command_line", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.pid", "target_field": "process.pid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.parent", "target_field": "process.ppid", "ignore_missing": true } },
{ "rename": { "field": "message3.columns.cwd", "target_field": "process.working_directory", "ignore_missing": true } },
{ "remove": { "field": [ "message3"], "ignore_failure": false } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -2,6 +2,7 @@
"description" : "zeek.weird", "description" : "zeek.weird",
"processors" : [ "processors" : [
{ "remove": { "field": ["host"], "ignore_failure": true } }, { "remove": { "field": ["host"], "ignore_failure": true } },
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.name", "target_field": "weird.name", "ignore_missing": true } }, { "rename": { "field": "message2.name", "target_field": "weird.name", "ignore_missing": true } },
{ "rename": { "field": "message2.addl", "target_field": "weird.additional_info", "ignore_missing": true } }, { "rename": { "field": "message2.addl", "target_field": "weird.additional_info", "ignore_missing": true } },
{ "rename": { "field": "message2.notice", "target_field": "weird.notice", "ignore_missing": true } }, { "rename": { "field": "message2.notice", "target_field": "weird.notice", "ignore_missing": true } },

View File

@@ -146,7 +146,9 @@ filebeat.inputs:
paths: paths:
- /nsm/osquery/fleet/result.log - /nsm/osquery/fleet/result.log
fields: fields:
type: osquery module: osquery
dataset: query_result
category: host
processors: processors:
- drop_fields: - drop_fields:
@@ -162,9 +164,10 @@ filebeat.inputs:
- type: log - type: log
paths: paths:
- /opt/so/log/strelka/strelka.log - /nsm/strelka/log/strelka.log
fields: fields:
module: strelka module: strelka
category: file
dataset: file dataset: file
processors: processors:
@@ -192,6 +195,12 @@ output.elasticsearch:
- index: "so-ossec-%{+yyyy.MM.dd}" - index: "so-ossec-%{+yyyy.MM.dd}"
when.contains: when.contains:
module: "ossec" module: "ossec"
- index: "so-osquery-%{+yyyy.MM.dd}"
when.contains:
module: "osquery"
- index: "so-strelka-%{+yyyy.MM.dd}"
when.contains:
module: "strelka"
#output.logstash: #output.logstash:
# Boolean flag to enable or disable the output module. # Boolean flag to enable or disable the output module.

View File

@@ -56,10 +56,11 @@ so-filebeat:
- /opt/so/log/filebeat:/usr/share/filebeat/logs:rw - /opt/so/log/filebeat:/usr/share/filebeat/logs:rw
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro - /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /nsm/zeek:/nsm/zeek:ro - /nsm/zeek:/nsm/zeek:ro
- /nsm/strelka/log:/nsm/strelka/log:ro
- /opt/so/log/suricata:/suricata:ro - /opt/so/log/suricata:/suricata:ro
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro - /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro - /opt/so/wazuh/logs/archives:/wazuh/archives:ro
- /nsm/osquery/fleet/:/osquery/logs:ro - /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro - /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro - /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro - /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro

View File

@@ -5,11 +5,11 @@ spec:
decorators: decorators:
always: always:
- SELECT codename FROM os_version; - SELECT codename FROM os_version;
- SELECT uuid AS LiveQuery FROM system_info; - SELECT uuid AS live_query FROM system_info;
- SELECT address AS EndpointIP1 FROM interface_addresses where address not - SELECT address AS endpoint_ip1 FROM interface_addresses where address not
like '%:%' and address not like '127%' and address not like '169%' order by like '%:%' and address not like '127%' and address not like '169%' order by
interface desc limit 1; interface desc limit 1;
- SELECT address AS EndpointIP2 FROM interface_addresses where address not - SELECT address AS endpoint_ip2 FROM interface_addresses where address not
like '%:%' and address not like '127%' and address not like '169%' order by like '%:%' and address not like '127%' and address not like '169%' order by
interface asc limit 1; interface asc limit 1;
- SELECT hardware_serial FROM system_info; - SELECT hardware_serial FROM system_info;

View File

@@ -1,3 +1,6 @@
### This state isn't used for anything. It was written to handle healthcheck scheduling,
### but we handle that with beacons now.
{% set CHECKS = salt['pillar.get']('healthcheck:checks', {}) %} {% set CHECKS = salt['pillar.get']('healthcheck:checks', {}) %}
{% set ENABLED = salt['pillar.get']('healthcheck:enabled', False) %} {% set ENABLED = salt['pillar.get']('healthcheck:enabled', False) %}
{% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 30) %} {% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 30) %}

File diff suppressed because one or more lines are too long

View File

@@ -1,5 +1,5 @@
{ {
"index_patterns": ["so-ids-*", "so-firewall-*", "so-syslog-*", "so-zeek-*", "so-import-*", "so-ossec-*", "so-strelka-*", "so-beats-*"], "index_patterns": ["so-ids-*", "so-firewall-*", "so-syslog-*", "so-zeek-*", "so-import-*", "so-ossec-*", "so-strelka-*", "so-beats-*", "so-osquery-*"],
"version":50001, "version":50001,
"order" : 10, "order" : 10,
"settings":{ "settings":{
@@ -252,6 +252,10 @@
"type":"object", "type":"object",
"dynamic": true "dynamic": true
}, },
"request":{
"type":"object",
"dynamic": true
},
"rfb":{ "rfb":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true
@@ -260,6 +264,10 @@
"type":"object", "type":"object",
"dynamic": true "dynamic": true
}, },
"scan":{
"type":"object",
"dynamic": true
},
"server":{ "server":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true

18
salt/reactor/zeek.sls Normal file
View File

@@ -0,0 +1,18 @@
#!py
import logging
import salt.client
local = salt.client.LocalClient()
def run():
minionid = data['id']
zeek_restart = data['zeek_restart']
logging.info('zeek_reactor: zeek_need_restarted:%s on:%s' % (zeek_restart, minionid))
if zeek_restart:
local.cmd(minionid, 'healthcheck.docker_stop', ['so-zeek'])
local.cmd(minionid, 'state.apply', ['zeek'])
# __salt__['telegraf.send']('healthcheck zeek_restarted=%s' % str(zeek_restarted))
return {}

25
salt/salt/beacons.sls Normal file
View File

@@ -0,0 +1,25 @@
{% set CHECKS = salt['pillar.get']('healthcheck:checks', {}) %}
{% set ENABLED = salt['pillar.get']('healthcheck:enabled', False) %}
{% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 30) %}
include:
- salt
{% if CHECKS and ENABLED %}
salt_beacons:
file.managed:
- name: /etc/salt/minion.d/beacons.conf
- source: salt://salt/files/beacons.conf.jinja
- template: jinja
- defaults:
CHECKS: {{ CHECKS }}
SCHEDULE: {{ SCHEDULE }}
- watch_in:
- service: salt_minion_service
{% else %}
salt_beacons:
file.absent:
- name: /etc/salt/minion.d/beacons.conf
- watch_in:
- service: salt_minion_service
{% endif %}

View File

@@ -0,0 +1,8 @@
{% if CHECKS -%}
beacons:
{%- for check in CHECKS %}
{{ check }}:
- disable_during_state_run: True
- interval: {{ SCHEDULE }}
{%- endfor %}
{%- endif %}

4
salt/salt/init.sls Normal file
View File

@@ -0,0 +1,4 @@
salt_minion_service:
service.running:
- name: salt-minion
- enable: True

View File

@@ -23,14 +23,6 @@ strelkaconfdir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
# Strelka logs
strelkalogdir:
file.directory:
- name: /opt/so/log/strelka
- user: 939
- group: 939
- makedirs: True
# Sync dynamic config to conf dir # Sync dynamic config to conf dir
strelkasync: strelkasync:
file.recurse: file.recurse:
@@ -47,6 +39,13 @@ strelkadatadir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
strelkalogdir:
file.directory:
- name: /nsm/strelka/log
- user: 939
- group: 939
- makedirs: True
strelkastagedir: strelkastagedir:
file.directory: file.directory:
- name: /nsm/strelka/processed - name: /nsm/strelka/processed
@@ -75,7 +74,7 @@ strelka_frontend:
- image: soshybridhunter/so-strelka-frontend:HH1.2.1 - image: soshybridhunter/so-strelka-frontend:HH1.2.1
- binds: - binds:
- /opt/so/conf/strelka/frontend/:/etc/strelka/:ro - /opt/so/conf/strelka/frontend/:/etc/strelka/:ro
- /opt/so/log/strelka/:/var/log/strelka/:rw - /nsm/strelka/log/:/var/log/strelka/:rw
- privileged: True - privileged: True
- name: so-strelka-frontend - name: so-strelka-frontend
- command: strelka-frontend - command: strelka-frontend
@@ -85,7 +84,6 @@ strelka_frontend:
strelka_backend: strelka_backend:
docker_container.running: docker_container.running:
- image: soshybridhunter/so-strelka-backend:HH1.2.1 - image: soshybridhunter/so-strelka-backend:HH1.2.1
- restart_policy: unless-stopped
- binds: - binds:
- /opt/so/conf/strelka/backend/:/etc/strelka/:ro - /opt/so/conf/strelka/backend/:/etc/strelka/:ro
- /opt/so/conf/strelka/backend/yara:/etc/yara/:ro - /opt/so/conf/strelka/backend/yara:/etc/yara/:ro

View File

@@ -6,12 +6,14 @@
{%- set DOMAINSTATS = salt['pillar.get']('master:domainstats', '0') -%} {%- set DOMAINSTATS = salt['pillar.get']('master:domainstats', '0') -%}
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%} {%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%} {%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
{%- set STRELKA = salt['pillar.get']('master:strelka', '1') -%}
base: base:
'*': '*':
- patch.os.schedule - patch.os.schedule
- motd - motd
- salt
'*_helix': '*_helix':
- ca - ca
@@ -35,7 +37,7 @@ base:
- firewall - firewall
- pcap - pcap
- suricata - suricata
- healthcheck - salt.beacons
{%- if BROVER != 'SURICATA' %} {%- if BROVER != 'SURICATA' %}
- zeek - zeek
{%- endif %} {%- endif %}
@@ -55,7 +57,8 @@ base:
- soc - soc
- firewall - firewall
- idstools - idstools
- healthcheck - auth
- salt.beacons
{%- if FLEETMASTER or FLEETNODE %} {%- if FLEETMASTER or FLEETNODE %}
- mysql - mysql
{%- endif %} {%- endif %}
@@ -63,11 +66,14 @@ base:
- wazuh - wazuh
{%- endif %} {%- endif %}
- elasticsearch - elasticsearch
- filebeat
- kibana - kibana
- pcap - pcap
- suricata - suricata
- zeek - zeek
{%- if STRELKA %}
- strelka
{%- endif %}
- filebeat
- curator - curator
- elastalert - elastalert
{%- if FLEETMASTER or FLEETNODE %} {%- if FLEETMASTER or FLEETNODE %}

View File

@@ -843,6 +843,7 @@ master_pillar() {
echo " wazuh: $WAZUH" >> $PILLARFILE echo " wazuh: $WAZUH" >> $PILLARFILE
echo " thehive: $THEHIVE" >> $PILLARFILE echo " thehive: $THEHIVE" >> $PILLARFILE
echo " playbook: $PLAYBOOK" >> $PILLARFILE echo " playbook: $PLAYBOOK" >> $PILLARFILE
echo " strelka: $STRELKA" >> $PILLARFILE
echo "" >> $PILLARFILE echo "" >> $PILLARFILE
echo "kratos:" >> $PILLARFILE echo "kratos:" >> $PILLARFILE
if [[ $REDIRECTINFO == 'OTHER' ]]; then if [[ $REDIRECTINFO == 'OTHER' ]]; then
@@ -993,6 +994,7 @@ process_components() {
WAZUH=0 WAZUH=0
THEHIVE=0 THEHIVE=0
PLAYBOOK=0 PLAYBOOK=0
STRELKA=0
IFS=$' ' IFS=$' '
for item in $(echo "$CLEAN"); do for item in $(echo "$CLEAN"); do
@@ -1004,6 +1006,7 @@ process_components() {
reserve_group_ids() { reserve_group_ids() {
# This is a hack to fix CentOS from taking group IDs that we need # This is a hack to fix CentOS from taking group IDs that we need
groupadd -g 928 kratos
groupadd -g 930 elasticsearch groupadd -g 930 elasticsearch
groupadd -g 931 logstash groupadd -g 931 logstash
groupadd -g 932 kibana groupadd -g 932 kibana

View File

@@ -716,6 +716,10 @@ if (whiptail_you_sure) ; then
echo -e "XXX\n93\nInstalling Playbook... \nXXX" echo -e "XXX\n93\nInstalling Playbook... \nXXX"
salt-call state.apply playbook >> $SETUPLOG 2>&1 salt-call state.apply playbook >> $SETUPLOG 2>&1
fi fi
if [[ $STRELKA == '1' ]]; then
echo -e "XXX\n95\nInstalling Strelka... \nXXX"
salt-call state.apply strelka >> $SETUPLOG 2>&1
fi
echo -e "XXX\n95\nSetting checkin to run on boot... \nXXX" echo -e "XXX\n95\nSetting checkin to run on boot... \nXXX"
checkin_at_boot >> $SETUPLOG 2>&1 checkin_at_boot >> $SETUPLOG 2>&1
echo -e "XX\n97\nFinishing touches... \nXXX" echo -e "XX\n97\nFinishing touches... \nXXX"

View File

@@ -214,7 +214,8 @@ whiptail_enable_components() {
"OSQUERY" "Enable Fleet with osquery" ON \ "OSQUERY" "Enable Fleet with osquery" ON \
"WAZUH" "Enable Wazuh" ON \ "WAZUH" "Enable Wazuh" ON \
"THEHIVE" "Enable TheHive" ON \ "THEHIVE" "Enable TheHive" ON \
"PLAYBOOK" "Enable Playbook" ON 3>&1 1>&2 2>&3 ) "PLAYBOOK" "Enable Playbook" ON \
"STRELKA" "Enable Strelka" ON 3>&1 1>&2 2>&3 )
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus whiptail_check_exitstatus $exitstatus