Merge branch 'dev' into kilo

This commit is contained in:
Jason Ertel
2021-08-12 15:01:12 -04:00
25 changed files with 601 additions and 25 deletions

1
HOTFIX
View File

@@ -0,0 +1 @@

View File

@@ -13,3 +13,4 @@ logstash:
- so/9500_output_beats.conf.jinja
- so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja
- so/9800_output_logscan.conf.jinja

View File

@@ -45,7 +45,8 @@
'schedule',
'soctopus',
'tcpreplay',
'docker_clean'
'docker_clean',
'learn'
],
'so-heavynode': [
'ca',
@@ -108,7 +109,8 @@
'zeek',
'schedule',
'tcpreplay',
'docker_clean'
'docker_clean',
'learn'
],
'so-manager': [
'salt.master',
@@ -127,7 +129,8 @@
'utility',
'schedule',
'soctopus',
'docker_clean'
'docker_clean',
'learn'
],
'so-managersearch': [
'salt.master',
@@ -146,7 +149,8 @@
'utility',
'schedule',
'soctopus',
'docker_clean'
'docker_clean',
'learn'
],
'so-node': [
'ca',
@@ -178,7 +182,8 @@
'schedule',
'soctopus',
'tcpreplay',
'docker_clean'
'docker_clean',
'learn'
],
'so-sensor': [
'ca',
@@ -237,7 +242,7 @@
{% do allowed_states.append('kibana') %}
{% endif %}
{% if CURATOR and grains.role in ['so-eval', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode'] %}
{% if grains.role in ['so-eval', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
{% do allowed_states.append('curator') %}
{% endif %}
@@ -296,4 +301,4 @@
{% endif %}
{# all nodes can always run salt.minion state #}
{% do allowed_states.append('salt.minion') %}
{% do allowed_states.append('salt.minion') %}

View File

@@ -22,6 +22,7 @@
/opt/so/log/salt/so-salt-minion-check
/opt/so/log/salt/minion
/opt/so/log/salt/master
/opt/so/log/logscan/*.log
{
{{ logrotate_conf | indent(width=4) }}
}

View File

@@ -0,0 +1,17 @@
# this script is used to delete the default Grafana dashboard folders that existed prior to Grafana dashboard and Salt management changes in 2.3.70
folders=$(curl -X GET http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders | jq -r '.[] | @base64')
delfolder=("Manager" "Manager Search" "Sensor Nodes" "Search Nodes" "Standalone" "Eval Mode")
for row in $folders; do
title=$(echo ${row} | base64 --decode | jq -r '.title')
uid=$(echo ${row} | base64 --decode | jq -r '.uid')
if [[ " ${delfolder[@]} " =~ " ${title} " ]]; then
curl -X DELETE http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders/$uid
fi
done
echo "so-grafana-dashboard-folder-delete has been run to delete default Grafana dashboard folders that existed prior to 2.3.70" > /opt/so/state/so-grafana-dashboard-folder-delete-complete
exit 0

View File

@@ -37,6 +37,7 @@ container_list() {
"so-idstools"
"so-kibana"
"so-kratos"
"so-logscan"
"so-nginx"
"so-pcaptools"
"so-soc"
@@ -58,6 +59,7 @@ container_list() {
"so-influxdb"
"so-kibana"
"so-kratos"
"so-logscan"
"so-logstash"
"so-mysql"
"so-nginx"

View File

@@ -0,0 +1,265 @@
#!/usr/bin/env python3
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from typing import List
import signal
import sys
import os
import re
import subprocess
import argparse
import textwrap
import yaml
import multiprocessing
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
so_status_conf = '/opt/so/conf/so-status/so-status.conf'
salt_proc: subprocess.CompletedProcess = None
# Temp store of modules, will likely be broken out into salt
def get_learn_modules():
return {
'logscan': { 'cpu_period': get_cpu_period(fraction=0.25), 'enabled': False, 'description': 'Scan log files against pre-trained models to alert on anomalies.' }
}
def get_cpu_period(fraction: float):
multiplier = 10000
num_cores = multiprocessing.cpu_count()
if num_cores <= 2:
fraction = 1.
num_used_cores = int(num_cores * fraction)
cpu_period = num_used_cores * multiplier
return cpu_period
def sigint_handler(*_):
print('Exiting gracefully on Ctrl-C')
if salt_proc is not None: salt_proc.send_signal(signal.SIGINT)
sys.exit(0)
def find_minion_pillar() -> str:
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
result = []
for root, _, files in os.walk(minion_pillar_dir):
for f_minion_id in files:
if re.search(regex, f_minion_id):
result.append(os.path.join(root, f_minion_id))
if len(result) == 0:
print('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?', file=sys.stderr)
sys.exit(3)
elif len(result) > 1:
res_str = ', '.join(f'\"{result}\"')
print('(This should not happen, the system is in an error state if you see this message.)\n', file=sys.stderr)
print('More than one manager-type pillar exists, minion id\'s listed below:', file=sys.stderr)
print(f' {res_str}', file=sys.stderr)
sys.exit(3)
else:
return result[0]
def read_pillar(pillar: str):
try:
with open(pillar, 'r') as pillar_file:
loaded_yaml = yaml.safe_load(pillar_file.read())
if loaded_yaml is None:
print(f'Could not parse {pillar}', file=sys.stderr)
sys.exit(3)
return loaded_yaml
except:
print(f'Could not open {pillar}', file=sys.stderr)
sys.exit(3)
def write_pillar(pillar: str, content: dict):
try:
with open(pillar, 'w') as pillar_file:
yaml.dump(content, pillar_file, default_flow_style=False)
except:
print(f'Could not open {pillar}', file=sys.stderr)
sys.exit(3)
def mod_so_status(action: str, item: str):
with open(so_status_conf, 'a+') as conf:
conf.seek(0)
containers = conf.readlines()
if f'so-{item}\n' in containers:
if action == 'remove': containers.remove(f'so-{item}\n')
if action == 'add': pass
else:
if action == 'remove': pass
if action == 'add': containers.append(f'so-{item}\n')
[containers.remove(c_name) for c_name in containers if c_name == '\n'] # remove extra newlines
conf.seek(0)
conf.truncate(0)
conf.writelines(containers)
def create_pillar_if_not_exist(pillar:str, content: dict):
pillar_dict = content
if pillar_dict.get('learn', {}).get('modules') is None:
pillar_dict['learn'] = {}
pillar_dict['learn']['modules'] = get_learn_modules()
content.update()
write_pillar(pillar, content)
return content
def apply(module_list: List):
return_code = 0
for module in module_list:
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', f'learn.{module}', 'queue=True']
print(f' Applying salt state for {module} module...')
salt_proc = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
if salt_proc.returncode != 0:
print(f' [ERROR] Failed to apply salt state for {module} module.')
return_code = salt_proc.returncode
return return_code
def check_apply(args: dict):
if args.apply:
print('Configuration updated. Applying changes:')
return apply(args.modules)
else:
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
answer = input(message)
while answer.lower() not in [ 'y', 'n', '' ]:
answer = input(message)
if answer.lower() in [ 'n', '' ]:
return 0
else:
print('Applying changes:')
return apply(args.modules)
def enable_disable_modules(args, enable: bool):
pillar_modules = args.pillar_dict.get('learn', {}).get('modules')
pillar_mod_names = args.pillar_dict.get('learn', {}).get('modules').keys()
action_str = 'add' if enable else 'remove'
if 'all' in args.modules:
for module, details in pillar_modules.items():
details['enabled'] = enable
mod_so_status(action_str, module)
args.pillar_dict.update()
write_pillar(args.pillar, args.pillar_dict)
else:
write_needed = False
for module in args.modules:
if module in pillar_mod_names:
if pillar_modules[module]['enabled'] == enable:
state_str = 'enabled' if enable else 'disabled'
print(f'{module} module already {state_str}.', file=sys.stderr)
else:
pillar_modules[module]['enabled'] = enable
mod_so_status(action_str, module)
write_needed = True
if write_needed:
args.pillar_dict.update()
write_pillar(args.pillar, args.pillar_dict)
cmd_ret = check_apply(args)
return cmd_ret
def enable_modules(args):
enable_disable_modules(args, enable=True)
def disable_modules(args):
enable_disable_modules(args, enable=False)
def list_modules(*_):
print('Available ML modules:')
for module, details in get_learn_modules().items():
print(f' - { module } : {details["description"]}')
return 0
def main():
beta_str = 'BETA - SUBJECT TO CHANGE\n'
apply_help='After ACTION the chosen modules, apply any necessary salt states.'
enable_apply_help = apply_help.replace('ACTION', 'enabling')
disable_apply_help = apply_help.replace('ACTION', 'disabling')
signal.signal(signal.SIGINT, sigint_handler)
if os.geteuid() != 0:
print('You must run this script as root', file=sys.stderr)
sys.exit(1)
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
subcommand_desc = textwrap.dedent(
"""\
enable Enable one or more ML modules.
disable Disable one or more ML modules.
list List all available ML modules.
"""
)
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
module_help_str = 'One or more ML modules, which can be listed using \'so-learn list\'. Use the keyword \'all\' to apply the action to all available modules.'
enable = subparsers.add_parser('enable')
enable.set_defaults(func=enable_modules)
enable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
enable.add_argument('--apply', action='store_const', const=True, required=False, help=enable_apply_help)
disable = subparsers.add_parser('disable')
disable.set_defaults(func=disable_modules)
disable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
disable.add_argument('--apply', action='store_const', const=True, required=False, help=disable_apply_help)
list = subparsers.add_parser('list')
list.set_defaults(func=list_modules)
args = main_parser.parse_args(sys.argv[1:])
args.pillar = find_minion_pillar()
args.pillar_dict = create_pillar_if_not_exist(args.pillar, read_pillar(args.pillar))
if hasattr(args, 'func'):
exit_code = args.func(args)
else:
if args.command is None:
print(beta_str)
main_parser.print_help()
sys.exit(0)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -31,7 +31,7 @@ if [[ $# -lt 1 ]]; then
echo "Usage: $0 <pcap-sample(s)>"
echo
echo "All PCAPs must be placed in the /opt/so/samples directory unless replaying"
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP sampes"
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP samples"
echo "are located in the /opt/samples directory inside of the image."
echo
echo "Customer provided PCAP example:"

View File

@@ -4,7 +4,7 @@
{%- if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('elasticsearch:es_port', '') -%}
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{%- elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone', 'so-manager'] %}
{%- set ELASTICSEARCH_HOST = salt['pillar.get']('manager:mainip', '') -%}
{%- set ELASTICSEARCH_PORT = salt['pillar.get']('manager:es_port', '') -%}
{%- endif -%}

View File

@@ -1,6 +1,6 @@
{% if grains['role'] in ['so-node', 'so-heavynode'] %}
{%- set elasticsearch = salt['pillar.get']('elasticsearch:mainip', '') -%}
{% elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone'] %}
{% elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone', 'so-manager'] %}
{%- set elasticsearch = salt['pillar.get']('manager:mainip', '') -%}
{%- endif %}
{%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}

View File

@@ -4,8 +4,9 @@
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone'] %}
{% if grains['role'] in ['so-eval', 'so-node', 'so-managersearch', 'so-heavynode', 'so-standalone', 'so-manager'] %}
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
{% from "curator/map.jinja" import CURATOROPTIONS with context %}
# Curator
# Create the group
curatorgroup:
@@ -118,8 +119,10 @@ so-curatordeletecron:
- dayweek: '*'
so-curator:
docker_container.running:
docker_container.{{ CURATOROPTIONS.status }}:
{% if CURATOROPTIONS.status == 'running' %}
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-curator:{{ VERSION }}
- start: {{ CURATOROPTIONS.start }}
- hostname: curator
- name: so-curator
- user: curator
@@ -129,11 +132,31 @@ so-curator:
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
- /opt/so/conf/curator/action/:/etc/curator/action:ro
- /opt/so/log/curator:/var/log/curator:rw
- require:
- file: actionconfs
- file: curconf
- file: curlogdir
{% else %}
- force: True
{% endif %}
append_so-curator_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-curator
- unless: grep -q so-curator /opt/so/conf/so-status/so-status.conf
{% if not CURATOROPTIONS.start %}
so-curator_so-status.disabled:
file.comment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-curator$
{% else %}
delete_so-curator_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-curator$
{% endif %}
# Begin Curator Cron Jobs

12
salt/curator/map.jinja Normal file
View File

@@ -0,0 +1,12 @@
{% set CURATOROPTIONS = {} %}
{% set ENABLED = salt['pillar.get']('curator:enabled', True) %}
{% set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
# don't start the docker container if searchnode and true clustering is enabled or curator disabled via pillar or true cluster not enabled and manager
{% if not ENABLED or (TRUECLUSTER and grains.id.split('_')|last == 'searchnode') or (not TRUECLUSTER and grains.id.split('_')|last == 'manager') %}
{% do CURATOROPTIONS.update({'start': False}) %}
{% do CURATOROPTIONS.update({'status': 'absent'}) %}
{% else %}
{% do CURATOROPTIONS.update({'start': True}) %}
{% do CURATOROPTIONS.update({'status': 'running'}) %}
{% endif %}

View File

@@ -0,0 +1,29 @@
{
"description": "logscan",
"processors": [
{ "set": { "field": "event.severity", "value": 2 } },
{ "json": { "field": "message", "add_to_root": true, "ignore_failure": true } },
{ "rename": { "field": "@timestamp", "target_field": "event.ingested", "ignore_missing": true } },
{ "date": { "field": "timestamp", "target_field": "event.created", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
{ "date": { "field": "start_time", "target_field": "@timestamp", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
{ "date": { "field": "start_time", "target_field": "event.start", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
{ "date": { "field": "end_time", "target_field": "event.end", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
{ "remove": { "field": "start_time", "ignore_missing": true } },
{ "remove": { "field": "end_time", "ignore_missing": true } },
{ "rename": { "field": "source_ip", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "top_source_ips", "target_field": "logscan.source.ips", "ignore_missing": true } },
{ "append": { "if": "ctx.source != null", "field": "logscan.source.ips", "value": "{{{source.ip}}}", "ignore_failure": true } },
{ "set": { "if": "ctx.model == 'k1'", "field": "rule.name", "value": "LOGSCAN K1 MODEL THRESHOLD" } },
{ "set": { "if": "ctx.model == 'k1'", "field": "rule.description", "value": "High number of logins from single IP in 1 minute window" } },
{ "set": { "if": "ctx.model == 'k5'", "field": "rule.name", "value": "LOGSCAN K5 MODEL THRESHOLD" } },
{ "set": { "if": "ctx.model == 'k5'", "field": "rule.description", "value": "High ratio of login failures from single IP in 5 minute window" } },
{ "set": { "if": "ctx.model == 'k60'", "field": "rule.name", "value": "LOGSCAN K60 MODEL THRESHOLD" } },
{ "set": { "if": "ctx.model == 'k60'", "field": "rule.description", "value": "Large number of login failures in 1 hour window" } },
{ "rename": { "field": "model", "target_field": "logscan.model" } },
{ "rename": { "field": "num_attempts", "target_field": "logscan.attempts.total.amount", "ignore_missing": true } },
{ "rename": { "field": "num_failed", "target_field": "logscan.attempts.failed.amount", "ignore_missing": true } },
{ "script": { "lang": "painless", "source": "ctx.logscan.attempts.succeeded.amount = ctx.logscan.attempts.total.amount - ctx.logscan.attempts.failed.amount" , "ignore_failure": true} },
{ "rename": { "field": "avg_failure_interval", "target_field": "logscan.attempts.failed.avg_interval", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -13,6 +13,7 @@
{ "rename": { "field": "message2.fileinfo.size", "target_field": "file.size", "ignore_missing": true } },
{ "rename": { "field": "message2.fileinfo.state", "target_field": "file.state", "ignore_missing": true } },
{ "rename": { "field": "message2.fileinfo.stored", "target_field": "file.saved", "ignore_missing": true } },
{ "rename": { "field": "message2.fileinfo.sha256", "target_field": "hash.sha256", "ignore_missing": true } },
{ "set": { "if": "ctx.network?.protocol != null", "field": "file.source", "value": "{{network.protocol}}" } },
{ "pipeline": { "name": "common" } }
]

View File

@@ -313,6 +313,10 @@
"type":"object",
"dynamic": true
},
"logscan": {
"type": "object",
"dynamic": true
},
"manager":{
"type":"object",
"dynamic": true

View File

@@ -112,6 +112,21 @@ filebeat.inputs:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
- type: log
paths:
- /logs/logscan/alerts.log
fields:
module: logscan
dataset: alert
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
clean_removed: true
close_removed: false
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-sensor', 'so-helix', 'so-heavynode', 'so-import'] %}
{%- if ZEEKVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('zeeklogs:enabled', '') %}
@@ -294,6 +309,9 @@ output.elasticsearch:
- index: "so-strelka"
when.contains:
module: "strelka"
- index: "so-logscan"
when.contains:
module: "logscan"
setup.template.enabled: false
{%- else %}

View File

@@ -44,6 +44,12 @@ grafanadashdir:
- group: 939
- makedirs: True
{% for type in ['eval','manager','managersearch','search_nodes','sensor_nodes','standalone'] %}
remove_dashboard_dir_{{type}}:
file.absent:
- name: /opt/so/conf/grafana/grafana_dashboards/{{type}}
{% endfor %}
grafana-dashboard-config:
file.managed:
- name: /opt/so/conf/grafana/etc/dashboards/dashboard.yml
@@ -82,6 +88,11 @@ grafana-config-files:
- source: salt://grafana/etc/files
- makedirs: True
so-grafana-dashboard-folder-delete:
cmd.run:
- name: /usr/sbin/so-grafana-dashboard-folder-delete
- unless: ls /opt/so/state/so-grafana-dashboard-folder-delete-complete
{% for dashboard in DASHBOARDS %}
{{dashboard}}-dashboard:
file.managed:

View File

@@ -0,0 +1,7 @@
[global]
ts_format = iso8601
scan_interval = 30s
log_level = info
[kratos]
log_path = kratos/kratos.log

19
salt/learn/init.sls Normal file
View File

@@ -0,0 +1,19 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% set module_dict = salt['pillar.get']('learn:modules', {} ) %}
{% if module_dict.items()|length != 0 %}
include:
{% for module, _ in module_dict.items() %}
- 'learn.{{ module }}'
{% endfor %}
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

56
salt/learn/logscan.sls Normal file
View File

@@ -0,0 +1,56 @@
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set logscan_cpu_period = salt['pillar.get']('learn:modules:logscan:cpu_period', 20000) %}
{% set enabled = salt['pillar.get']('learn:modules:logscan:enabled', False) %}
{% if enabled %}
{% set container_action = 'running' %}
{% else %}
{% set container_action = 'absent'%}
{% endif %}
logscan_data_dir:
file.directory:
- name: /nsm/logscan/data
- user: 939
- group: 939
- makedirs: True
logscan_conf_dir:
file.directory:
- name: /opt/so/conf/logscan
- user: 939
- group: 939
- makedirs: True
logscan_conf:
file.managed:
- name: /opt/so/conf/logscan/logscan.conf
- source: salt://learn/files/logscan.conf
- user: 939
- group: 939
- mode: 600
logscan_log_dir:
file.directory:
- name: /opt/so/log/logscan
- user: 939
- group: 939
so-logscan:
docker_container.{{ container_action }}:
{% if container_action == 'running' %}
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logscan:{{ VERSION }}
- hostname: logscan
- name: so-logscan
- binds:
- /nsm/logscan/data:/logscan/data:rw
- /opt/so/conf/logscan/logscan.conf:/logscan/logscan.conf:ro
- /opt/so/log/logscan:/logscan/output:rw
- /opt/so/log:/logscan/logs:ro
- cpu_period: {{ logscan_cpu_period }}
{% else %}
- force: true
{% endif %}

View File

@@ -0,0 +1,27 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
output {
if [module] =~ "logscan" {
elasticsearch {
id => "logscan_pipeline"
pipeline => "logscan.alert"
hosts => "{{ ES }}"
{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
{% endif %}
index => "so-logscan"
template_name => "so-common"
template => "/templates/so-common-template.json"
template_overwrite => true
ssl => true
ssl_certificate_verification => false
}
}
}

View File

@@ -7,7 +7,7 @@ suricata:
dir: /nsm/extracted
#write-fileinfo: "yes"
#force-filestore: "yes"
#stream-depth: 0
stream-depth: 0
#max-open-files: 1000
#force-hash: [sha1, md5]
xff:

View File

@@ -11,7 +11,6 @@
{% set FILEBEAT = salt['pillar.get']('filebeat:enabled', True) %}
{% set KIBANA = salt['pillar.get']('kibana:enabled', True) %}
{% set LOGSTASH = salt['pillar.get']('logstash:enabled', True) %}
{% set CURATOR = salt['pillar.get']('curator:enabled', True) %}
{% set REDIS = salt['pillar.get']('redis:enabled', True) %}
{% set STRELKA = salt['pillar.get']('strelka:enabled', '0') %}
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
@@ -127,9 +126,7 @@ base:
{%- if FILEBEAT %}
- filebeat
{%- endif %}
{%- if CURATOR %}
- curator
{%- endif %}
{%- if ELASTALERT %}
- elastalert
{%- endif %}
@@ -156,6 +153,7 @@ base:
{%- endif %}
- docker_clean
- pipeline.load
- learn
'*_manager and G@saltversion:{{saltversion}}':
- match: compound
@@ -191,6 +189,7 @@ base:
{%- if KIBANA %}
- kibana
{%- endif %}
- curator
{%- if ELASTALERT %}
- elastalert
{%- endif %}
@@ -218,6 +217,7 @@ base:
{%- endif %}
- docker_clean
- pipeline.load
- learn
'*_standalone and G@saltversion:{{saltversion}}':
- match: compound
@@ -265,9 +265,7 @@ base:
{%- if FILEBEAT %}
- filebeat
{%- endif %}
{%- if CURATOR %}
- curator
{%- endif %}
{%- if ELASTALERT %}
- elastalert
{%- endif %}
@@ -292,6 +290,7 @@ base:
{%- endif %}
- docker_clean
- pipeline.load
- learn
'*_searchnode and G@saltversion:{{saltversion}}':
- match: compound
@@ -310,9 +309,7 @@ base:
{%- if LOGSTASH %}
- logstash
{%- endif %}
{%- if CURATOR %}
- curator
{%- endif %}
{%- if FILEBEAT %}
- filebeat
{%- endif %}
@@ -354,9 +351,7 @@ base:
{%- if REDIS %}
- redis
{%- endif %}
{%- if CURATOR %}
- curator
{%- endif %}
{%- if KIBANA %}
- kibana
{%- endif %}
@@ -366,7 +361,6 @@ base:
{%- if FILEBEAT %}
- filebeat
{%- endif %}
- utility
- schedule
{%- if FLEETMANAGER or FLEETNODE %}
@@ -388,6 +382,7 @@ base:
{%- endif %}
- docker_clean
- pipeline.load
- learn
'*_heavynode and G@saltversion:{{saltversion}}':
- match: compound
@@ -409,9 +404,7 @@ base:
{%- if REDIS %}
- redis
{%- endif %}
{%- if CURATOR %}
- curator
{%- endif %}
{%- if FILEBEAT %}
- filebeat
{%- endif %}
@@ -478,3 +471,4 @@ base:
- schedule
- docker_clean
- pipeline.load
- learn

View File

@@ -0,0 +1,77 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TESTING=true
address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
ALLOW_CIDR=0.0.0.0/0
ALLOW_ROLE=a
BASICZEEK=2
BASICSURI=2
# BLOGS=
BNICS=eth1
ZEEKVERSION=ZEEK
# CURCLOSEDAYS=
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
LEARN_LOGSCAN_ENABLE=true
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
# MDNS=
# MGATEWAY=
# MIP=
# MMASK=
MNIC=eth0
# MSEARCH=
# MSRV=
# MTU=
NIDS=Suricata
# NODE_ES_HEAP_SIZE=
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
NODEUPDATES=MANAGER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
# REDIRECTHOST=
REDIRECTINFO=IP
RULESETUP=ETOPEN
# SHARDCOUNT=
# SKIP_REBOOT=
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
THEHIVE=1
WAZUH=1
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=0n10nus3r
WEBPASSWD2=0n10nus3r

View File

@@ -962,6 +962,12 @@ else
set_progress_str 99 'Waiting for TheHive to start up'
check_hive_init >> $setup_log 2>&1
fi
if [[ -n $LEARN_LOGSCAN_ENABLE ]]; then
set_progress_str 99 'Enabling logscan'
so-learn enable logscan --apply >> $setup_log 2>&1
fi
} | whiptail_gauge_post_setup "Running post-installation steps..."
whiptail_setup_complete