Merge remote-tracking branch 'remotes/origin/dev' into issue/2806

This commit is contained in:
m0duspwnens
2021-08-13 16:19:57 -04:00
24 changed files with 665 additions and 35 deletions

1
HOTFIX
View File

@@ -0,0 +1 @@

View File

@@ -13,3 +13,4 @@ logstash:
- so/9500_output_beats.conf.jinja - so/9500_output_beats.conf.jinja
- so/9600_output_ossec.conf.jinja - so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja - so/9700_output_strelka.conf.jinja
- so/9800_output_logscan.conf.jinja

View File

@@ -45,7 +45,8 @@
'schedule', 'schedule',
'soctopus', 'soctopus',
'tcpreplay', 'tcpreplay',
'docker_clean' 'docker_clean',
'learn'
], ],
'so-heavynode': [ 'so-heavynode': [
'ca', 'ca',
@@ -108,7 +109,8 @@
'zeek', 'zeek',
'schedule', 'schedule',
'tcpreplay', 'tcpreplay',
'docker_clean' 'docker_clean',
'learn'
], ],
'so-manager': [ 'so-manager': [
'salt.master', 'salt.master',
@@ -127,7 +129,8 @@
'utility', 'utility',
'schedule', 'schedule',
'soctopus', 'soctopus',
'docker_clean' 'docker_clean',
'learn'
], ],
'so-managersearch': [ 'so-managersearch': [
'salt.master', 'salt.master',
@@ -146,7 +149,8 @@
'utility', 'utility',
'schedule', 'schedule',
'soctopus', 'soctopus',
'docker_clean' 'docker_clean',
'learn'
], ],
'so-node': [ 'so-node': [
'ca', 'ca',
@@ -178,7 +182,8 @@
'schedule', 'schedule',
'soctopus', 'soctopus',
'tcpreplay', 'tcpreplay',
'docker_clean' 'docker_clean',
'learn'
], ],
'so-sensor': [ 'so-sensor': [
'ca', 'ca',

View File

@@ -22,6 +22,7 @@
/opt/so/log/salt/so-salt-minion-check /opt/so/log/salt/so-salt-minion-check
/opt/so/log/salt/minion /opt/so/log/salt/minion
/opt/so/log/salt/master /opt/so/log/salt/master
/opt/so/log/logscan/*.log
{ {
{{ logrotate_conf | indent(width=4) }} {{ logrotate_conf | indent(width=4) }}
} }

View File

@@ -0,0 +1,17 @@
# this script is used to delete the default Grafana dashboard folders that existed prior to Grafana dashboard and Salt management changes in 2.3.70
folders=$(curl -X GET http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders | jq -r '.[] | @base64')
delfolder=("Manager" "Manager Search" "Sensor Nodes" "Search Nodes" "Standalone" "Eval Mode")
for row in $folders; do
title=$(echo ${row} | base64 --decode | jq -r '.title')
uid=$(echo ${row} | base64 --decode | jq -r '.uid')
if [[ " ${delfolder[@]} " =~ " ${title} " ]]; then
curl -X DELETE http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders/$uid
fi
done
echo "so-grafana-dashboard-folder-delete has been run to delete default Grafana dashboard folders that existed prior to 2.3.70" > /opt/so/state/so-grafana-dashboard-folder-delete-complete
exit 0

View File

@@ -0,0 +1,58 @@
#!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. /usr/sbin/so-common
. /usr/sbin/so-image-common
usage() {
read -r -d '' message <<- EOM
usage: so-image-pull [-h] IMAGE [IMAGE ...]
positional arguments:
IMAGE One or more 'so-' prefixed images to download and verify.
optional arguments:
-h, --help Show this help message and exit.
EOM
echo "$message"
exit 1
}
for arg; do
shift
[[ "$arg" = "--quiet" || "$arg" = "-q" ]] && quiet=true && continue
set -- "$@" "$arg"
done
if [[ $# -eq 0 || $# -gt 1 ]] || [[ $1 == '-h' || $1 == '--help' ]]; then
usage
fi
TRUSTED_CONTAINERS=("$@")
set_version
for image in "${TRUSTED_CONTAINERS[@]}"; do
if ! docker images | grep "$image" | grep ":5000" | grep -q "$VERSION"; then
if [[ $quiet == true ]]; then
update_docker_containers "$image" "" "" "/dev/null"
else
update_docker_containers "$image" "" "" ""
fi
else
echo "$image:$VERSION image exists."
fi
done

0
salt/common/tools/sbin/so-influxdb-drop-autogen Normal file → Executable file
View File

303
salt/common/tools/sbin/so-learn Executable file
View File

@@ -0,0 +1,303 @@
#!/usr/bin/env python3
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import chain
from typing import List
import signal
import sys
import os
import re
import subprocess
import argparse
import textwrap
import yaml
import multiprocessing
import docker
import pty
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
so_status_conf = '/opt/so/conf/so-status/so-status.conf'
proc: subprocess.CompletedProcess = None
# Temp store of modules, will likely be broken out into salt
def get_learn_modules():
return {
'logscan': { 'cpu_period': get_cpu_period(fraction=0.25), 'enabled': False, 'description': 'Scan log files against pre-trained models to alert on anomalies.' }
}
def get_cpu_period(fraction: float):
multiplier = 10000
num_cores = multiprocessing.cpu_count()
if num_cores <= 2:
fraction = 1.
num_used_cores = int(num_cores * fraction)
cpu_period = num_used_cores * multiplier
return cpu_period
def sigint_handler(*_):
print('Exiting gracefully on Ctrl-C')
if proc is not None: proc.send_signal(signal.SIGINT)
sys.exit(1)
def find_minion_pillar() -> str:
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
result = []
for root, _, files in os.walk(minion_pillar_dir):
for f_minion_id in files:
if re.search(regex, f_minion_id):
result.append(os.path.join(root, f_minion_id))
if len(result) == 0:
print('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?', file=sys.stderr)
sys.exit(3)
elif len(result) > 1:
res_str = ', '.join(f'\"{result}\"')
print('(This should not happen, the system is in an error state if you see this message.)\n', file=sys.stderr)
print('More than one manager-type pillar exists, minion id\'s listed below:', file=sys.stderr)
print(f' {res_str}', file=sys.stderr)
sys.exit(3)
else:
return result[0]
def read_pillar(pillar: str):
try:
with open(pillar, 'r') as pillar_file:
loaded_yaml = yaml.safe_load(pillar_file.read())
if loaded_yaml is None:
print(f'Could not parse {pillar}', file=sys.stderr)
sys.exit(3)
return loaded_yaml
except:
print(f'Could not open {pillar}', file=sys.stderr)
sys.exit(3)
def write_pillar(pillar: str, content: dict):
try:
with open(pillar, 'w') as pillar_file:
yaml.dump(content, pillar_file, default_flow_style=False)
except:
print(f'Could not open {pillar}', file=sys.stderr)
sys.exit(3)
def mod_so_status(action: str, item: str):
with open(so_status_conf, 'a+') as conf:
conf.seek(0)
containers = conf.readlines()
if f'so-{item}\n' in containers:
if action == 'remove': containers.remove(f'so-{item}\n')
if action == 'add': pass
else:
if action == 'remove': pass
if action == 'add': containers.append(f'so-{item}\n')
[containers.remove(c_name) for c_name in containers if c_name == '\n'] # remove extra newlines
conf.seek(0)
conf.truncate(0)
conf.writelines(containers)
def create_pillar_if_not_exist(pillar:str, content: dict):
pillar_dict = content
if pillar_dict.get('learn', {}).get('modules') is None:
pillar_dict['learn'] = {}
pillar_dict['learn']['modules'] = get_learn_modules()
content.update()
write_pillar(pillar, content)
return content
def salt_call(module: str):
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', f'learn.{module}', 'queue=True']
print(f' Applying salt state for {module} module...')
proc = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return_code = proc.returncode
if return_code != 0:
print(f' [ERROR] Failed to apply salt state for {module} module.')
return return_code
def pull_image(module: str):
container_basename = f'so-{module}'
client = docker.from_env()
image_list = client.images.list(filters={ 'dangling': False })
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
basename_match = list(filter(lambda x: f'{container_basename}' in x, tag_list))
local_registry_match = list(filter(lambda x: ':5000' in x, basename_match))
if len(local_registry_match) == 0:
print(f'Pulling and verifying missing image for {module} (may take several minutes) ...')
pull_command = ['so-image-pull', '--quiet', container_basename]
proc = subprocess.run(pull_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return_code = proc.returncode
if return_code != 0:
print(f'[ERROR] Failed to pull image so-{module}, skipping state.')
else:
return_code = 0
return return_code
def apply(module_list: List):
return_code = 0
for module in module_list:
salt_ret = salt_call(module)
# Only update return_code if the command returned a non-zero return
if salt_ret != 0:
return_code = salt_ret
return return_code
def check_apply(args: dict):
if args.apply:
print('Configuration updated. Applying changes:')
return apply(args.modules)
else:
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
answer = input(message)
while answer.lower() not in [ 'y', 'n', '' ]:
answer = input(message)
if answer.lower() in [ 'n', '' ]:
return 0
else:
print('Applying changes:')
return apply(args.modules)
def enable_disable_modules(args, enable: bool):
pillar_modules = args.pillar_dict.get('learn', {}).get('modules')
pillar_mod_names = args.pillar_dict.get('learn', {}).get('modules').keys()
action_str = 'add' if enable else 'remove'
if 'all' in args.modules:
for module, details in pillar_modules.items():
details['enabled'] = enable
mod_so_status(action_str, module)
if enable: pull_image(module)
args.pillar_dict.update()
write_pillar(args.pillar, args.pillar_dict)
else:
write_needed = False
for module in args.modules:
if module in pillar_mod_names:
if pillar_modules[module]['enabled'] == enable:
state_str = 'enabled' if enable else 'disabled'
print(f'{module} module already {state_str}.', file=sys.stderr)
else:
if enable and pull_image(module) != 0:
continue
pillar_modules[module]['enabled'] = enable
mod_so_status(action_str, module)
write_needed = True
if write_needed:
args.pillar_dict.update()
write_pillar(args.pillar, args.pillar_dict)
cmd_ret = check_apply(args)
return cmd_ret
def enable_modules(args):
enable_disable_modules(args, enable=True)
def disable_modules(args):
enable_disable_modules(args, enable=False)
def list_modules(*_):
print('Available ML modules:')
for module, details in get_learn_modules().items():
print(f' - { module } : {details["description"]}')
return 0
def main():
beta_str = 'BETA - SUBJECT TO CHANGE\n'
apply_help='After ACTION the chosen modules, apply any necessary salt states.'
enable_apply_help = apply_help.replace('ACTION', 'enabling')
disable_apply_help = apply_help.replace('ACTION', 'disabling')
signal.signal(signal.SIGINT, sigint_handler)
if os.geteuid() != 0:
print('You must run this script as root', file=sys.stderr)
sys.exit(1)
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
subcommand_desc = textwrap.dedent(
"""\
enable Enable one or more ML modules.
disable Disable one or more ML modules.
list List all available ML modules.
"""
)
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
module_help_str = 'One or more ML modules, which can be listed using \'so-learn list\'. Use the keyword \'all\' to apply the action to all available modules.'
enable = subparsers.add_parser('enable')
enable.set_defaults(func=enable_modules)
enable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
enable.add_argument('--apply', action='store_const', const=True, required=False, help=enable_apply_help)
disable = subparsers.add_parser('disable')
disable.set_defaults(func=disable_modules)
disable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
disable.add_argument('--apply', action='store_const', const=True, required=False, help=disable_apply_help)
list = subparsers.add_parser('list')
list.set_defaults(func=list_modules)
args = main_parser.parse_args(sys.argv[1:])
args.pillar = find_minion_pillar()
args.pillar_dict = create_pillar_if_not_exist(args.pillar, read_pillar(args.pillar))
if hasattr(args, 'func'):
exit_code = args.func(args)
else:
if args.command is None:
print(beta_str)
main_parser.print_help()
sys.exit(0)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -31,7 +31,7 @@ if [[ $# -lt 1 ]]; then
echo "Usage: $0 <pcap-sample(s)>" echo "Usage: $0 <pcap-sample(s)>"
echo echo
echo "All PCAPs must be placed in the /opt/so/samples directory unless replaying" echo "All PCAPs must be placed in the /opt/so/samples directory unless replaying"
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP sampes" echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP samples"
echo "are located in the /opt/samples directory inside of the image." echo "are located in the /opt/samples directory inside of the image."
echo echo
echo "Customer provided PCAP example:" echo "Customer provided PCAP example:"

View File

@@ -306,7 +306,7 @@ function updateStatus() {
[[ $? != 0 ]] && fail "Unable to unlock credential record" [[ $? != 0 ]] && fail "Unable to unlock credential record"
fi fi
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)") updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url) | del(.created_at) | del(.updated_at)")
response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson") response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson")
[[ $? != 0 ]] && fail "Unable to mark user as locked" [[ $? != 0 ]] && fail "Unable to mark user as locked"

View File

@@ -0,0 +1,29 @@
{
"description": "logscan",
"processors": [
{ "set": { "field": "event.severity", "value": 2 } },
{ "json": { "field": "message", "add_to_root": true, "ignore_failure": true } },
{ "rename": { "field": "@timestamp", "target_field": "event.ingested", "ignore_missing": true } },
{ "date": { "field": "timestamp", "target_field": "event.created", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
{ "date": { "field": "start_time", "target_field": "@timestamp", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
{ "date": { "field": "start_time", "target_field": "event.start", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
{ "date": { "field": "end_time", "target_field": "event.end", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
{ "remove": { "field": "start_time", "ignore_missing": true } },
{ "remove": { "field": "end_time", "ignore_missing": true } },
{ "rename": { "field": "source_ip", "target_field": "source.ip", "ignore_missing": true } },
{ "rename": { "field": "top_source_ips", "target_field": "logscan.source.ips", "ignore_missing": true } },
{ "append": { "if": "ctx.source != null", "field": "logscan.source.ips", "value": "{{{source.ip}}}", "ignore_failure": true } },
{ "set": { "if": "ctx.model == 'k1'", "field": "rule.name", "value": "LOGSCAN K1 MODEL THRESHOLD" } },
{ "set": { "if": "ctx.model == 'k1'", "field": "rule.description", "value": "High number of logins from single IP in 1 minute window" } },
{ "set": { "if": "ctx.model == 'k5'", "field": "rule.name", "value": "LOGSCAN K5 MODEL THRESHOLD" } },
{ "set": { "if": "ctx.model == 'k5'", "field": "rule.description", "value": "High ratio of login failures from single IP in 5 minute window" } },
{ "set": { "if": "ctx.model == 'k60'", "field": "rule.name", "value": "LOGSCAN K60 MODEL THRESHOLD" } },
{ "set": { "if": "ctx.model == 'k60'", "field": "rule.description", "value": "Large number of login failures in 1 hour window" } },
{ "rename": { "field": "model", "target_field": "logscan.model" } },
{ "rename": { "field": "num_attempts", "target_field": "logscan.attempts.total.amount", "ignore_missing": true } },
{ "rename": { "field": "num_failed", "target_field": "logscan.attempts.failed.amount", "ignore_missing": true } },
{ "script": { "lang": "painless", "source": "ctx.logscan.attempts.succeeded.amount = ctx.logscan.attempts.total.amount - ctx.logscan.attempts.failed.amount" , "ignore_failure": true} },
{ "rename": { "field": "avg_failure_interval", "target_field": "logscan.attempts.failed.avg_interval", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -13,6 +13,7 @@
{ "rename": { "field": "message2.fileinfo.size", "target_field": "file.size", "ignore_missing": true } }, { "rename": { "field": "message2.fileinfo.size", "target_field": "file.size", "ignore_missing": true } },
{ "rename": { "field": "message2.fileinfo.state", "target_field": "file.state", "ignore_missing": true } }, { "rename": { "field": "message2.fileinfo.state", "target_field": "file.state", "ignore_missing": true } },
{ "rename": { "field": "message2.fileinfo.stored", "target_field": "file.saved", "ignore_missing": true } }, { "rename": { "field": "message2.fileinfo.stored", "target_field": "file.saved", "ignore_missing": true } },
{ "rename": { "field": "message2.fileinfo.sha256", "target_field": "hash.sha256", "ignore_missing": true } },
{ "set": { "if": "ctx.network?.protocol != null", "field": "file.source", "value": "{{network.protocol}}" } }, { "set": { "if": "ctx.network?.protocol != null", "field": "file.source", "value": "{{network.protocol}}" } },
{ "pipeline": { "name": "common" } } { "pipeline": { "name": "common" } }
] ]

View File

@@ -313,6 +313,10 @@
"type":"object", "type":"object",
"dynamic": true "dynamic": true
}, },
"logscan": {
"type": "object",
"dynamic": true
},
"manager":{ "manager":{
"type":"object", "type":"object",
"dynamic": true "dynamic": true

View File

@@ -112,6 +112,21 @@ filebeat.inputs:
fields: ["source", "prospector", "input", "offset", "beat"] fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true fields_under_root: true
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
- type: log
paths:
- /logs/logscan/alerts.log
fields:
module: logscan
dataset: alert
processors:
- drop_fields:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
clean_removed: true
close_removed: false
{%- endif %}
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-sensor', 'so-helix', 'so-heavynode', 'so-import'] %} {%- if grains['role'] in ['so-eval', 'so-standalone', 'so-sensor', 'so-helix', 'so-heavynode', 'so-import'] %}
{%- if ZEEKVER != 'SURICATA' %} {%- if ZEEKVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('zeeklogs:enabled', '') %} {%- for LOGNAME in salt['pillar.get']('zeeklogs:enabled', '') %}
@@ -294,6 +309,9 @@ output.elasticsearch:
- index: "so-strelka" - index: "so-strelka"
when.contains: when.contains:
module: "strelka" module: "strelka"
- index: "so-logscan"
when.contains:
module: "logscan"
setup.template.enabled: false setup.template.enabled: false
{%- else %} {%- else %}

View File

@@ -44,6 +44,12 @@ grafanadashdir:
- group: 939 - group: 939
- makedirs: True - makedirs: True
{% for type in ['eval','manager','managersearch','search_nodes','sensor_nodes','standalone'] %}
remove_dashboard_dir_{{type}}:
file.absent:
- name: /opt/so/conf/grafana/grafana_dashboards/{{type}}
{% endfor %}
grafana-dashboard-config: grafana-dashboard-config:
file.managed: file.managed:
- name: /opt/so/conf/grafana/etc/dashboards/dashboard.yml - name: /opt/so/conf/grafana/etc/dashboards/dashboard.yml
@@ -82,6 +88,11 @@ grafana-config-files:
- source: salt://grafana/etc/files - source: salt://grafana/etc/files
- makedirs: True - makedirs: True
so-grafana-dashboard-folder-delete:
cmd.run:
- name: /usr/sbin/so-grafana-dashboard-folder-delete
- unless: ls /opt/so/state/so-grafana-dashboard-folder-delete-complete
{% for dashboard in DASHBOARDS %} {% for dashboard in DASHBOARDS %}
{{dashboard}}-dashboard: {{dashboard}}-dashboard:
file.managed: file.managed:

View File

@@ -0,0 +1,7 @@
[global]
ts_format = iso8601
scan_interval = 30s
log_level = info
[kratos]
log_path = kratos/kratos.log

19
salt/learn/init.sls Normal file
View File

@@ -0,0 +1,19 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% set module_dict = salt['pillar.get']('learn:modules', {} ) %}
{% if module_dict.items()|length != 0 %}
include:
{% for module, _ in module_dict.items() %}
- 'learn.{{ module }}'
{% endfor %}
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

56
salt/learn/logscan.sls Normal file
View File

@@ -0,0 +1,56 @@
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set logscan_cpu_period = salt['pillar.get']('learn:modules:logscan:cpu_period', 20000) %}
{% set enabled = salt['pillar.get']('learn:modules:logscan:enabled', False) %}
{% if enabled %}
{% set container_action = 'running' %}
{% else %}
{% set container_action = 'absent'%}
{% endif %}
logscan_data_dir:
file.directory:
- name: /nsm/logscan/data
- user: 939
- group: 939
- makedirs: True
logscan_conf_dir:
file.directory:
- name: /opt/so/conf/logscan
- user: 939
- group: 939
- makedirs: True
logscan_conf:
file.managed:
- name: /opt/so/conf/logscan/logscan.conf
- source: salt://learn/files/logscan.conf
- user: 939
- group: 939
- mode: 600
logscan_log_dir:
file.directory:
- name: /opt/so/log/logscan
- user: 939
- group: 939
so-logscan:
docker_container.{{ container_action }}:
{% if container_action == 'running' %}
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logscan:{{ VERSION }}
- hostname: logscan
- name: so-logscan
- binds:
- /nsm/logscan/data:/logscan/data:rw
- /opt/so/conf/logscan/logscan.conf:/logscan/logscan.conf:ro
- /opt/so/log/logscan:/logscan/output:rw
- /opt/so/log:/logscan/logs:ro
- cpu_period: {{ logscan_cpu_period }}
{% else %}
- force: true
{% endif %}

View File

@@ -0,0 +1,27 @@
{%- if grains['role'] == 'so-eval' -%}
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
{%- else %}
{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
{%- endif %}
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
output {
if [module] =~ "logscan" {
elasticsearch {
id => "logscan_pipeline"
pipeline => "logscan.alert"
hosts => "{{ ES }}"
{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
{% endif %}
index => "so-logscan"
template_name => "so-common"
template => "/templates/so-common-template.json"
template_overwrite => true
ssl => true
ssl_certificate_verification => false
}
}
}

View File

@@ -7,7 +7,7 @@ suricata:
dir: /nsm/extracted dir: /nsm/extracted
#write-fileinfo: "yes" #write-fileinfo: "yes"
#force-filestore: "yes" #force-filestore: "yes"
#stream-depth: 0 stream-depth: 0
#max-open-files: 1000 #max-open-files: 1000
#force-hash: [sha1, md5] #force-hash: [sha1, md5]
xff: xff:

View File

@@ -153,6 +153,7 @@ base:
{%- endif %} {%- endif %}
- docker_clean - docker_clean
- pipeline.load - pipeline.load
- learn
'*_manager and G@saltversion:{{saltversion}}': '*_manager and G@saltversion:{{saltversion}}':
- match: compound - match: compound
@@ -216,6 +217,7 @@ base:
{%- endif %} {%- endif %}
- docker_clean - docker_clean
- pipeline.load - pipeline.load
- learn
'*_standalone and G@saltversion:{{saltversion}}': '*_standalone and G@saltversion:{{saltversion}}':
- match: compound - match: compound
@@ -288,6 +290,7 @@ base:
{%- endif %} {%- endif %}
- docker_clean - docker_clean
- pipeline.load - pipeline.load
- learn
'*_searchnode and G@saltversion:{{saltversion}}': '*_searchnode and G@saltversion:{{saltversion}}':
- match: compound - match: compound
@@ -358,7 +361,6 @@ base:
{%- if FILEBEAT %} {%- if FILEBEAT %}
- filebeat - filebeat
{%- endif %} {%- endif %}
- utility - utility
- schedule - schedule
{%- if FLEETMANAGER or FLEETNODE %} {%- if FLEETMANAGER or FLEETNODE %}
@@ -380,6 +382,7 @@ base:
{%- endif %} {%- endif %}
- docker_clean - docker_clean
- pipeline.load - pipeline.load
- learn
'*_heavynode and G@saltversion:{{saltversion}}': '*_heavynode and G@saltversion:{{saltversion}}':
- match: compound - match: compound
@@ -468,3 +471,4 @@ base:
- schedule - schedule
- docker_clean - docker_clean
- pipeline.load - pipeline.load
- learn

View File

@@ -0,0 +1,77 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TESTING=true
address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
ALLOW_CIDR=0.0.0.0/0
ALLOW_ROLE=a
BASICZEEK=2
BASICSURI=2
# BLOGS=
BNICS=eth1
ZEEKVERSION=ZEEK
# CURCLOSEDAYS=
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=standalone
install_type=STANDALONE
LEARN_LOGSCAN_ENABLE=true
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
# MDNS=
# MGATEWAY=
# MIP=
# MMASK=
MNIC=eth0
# MSEARCH=
# MSRV=
# MTU=
NIDS=Suricata
# NODE_ES_HEAP_SIZE=
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
NODEUPDATES=MANAGER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
# REDIRECTHOST=
REDIRECTINFO=IP
RULESETUP=ETOPEN
# SHARDCOUNT=
# SKIP_REBOOT=
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
THEHIVE=1
WAZUH=1
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=0n10nus3r
WEBPASSWD2=0n10nus3r

View File

@@ -962,6 +962,12 @@ else
set_progress_str 99 'Waiting for TheHive to start up' set_progress_str 99 'Waiting for TheHive to start up'
check_hive_init >> $setup_log 2>&1 check_hive_init >> $setup_log 2>&1
fi fi
if [[ -n $LEARN_LOGSCAN_ENABLE ]]; then
set_progress_str 99 'Enabling logscan'
so-learn enable logscan --apply >> $setup_log 2>&1
fi
} | whiptail_gauge_post_setup "Running post-installation steps..." } | whiptail_gauge_post_setup "Running post-installation steps..."
whiptail_setup_complete whiptail_setup_complete

View File

@@ -959,33 +959,18 @@ whiptail_management_interface_gateway() {
whiptail_management_interface_ip_mask() { whiptail_management_interface_ip_mask() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
manager_ip_mask=$(whiptail --title "$whiptail_title" --inputbox \ local msg
"Enter your IPv4 address with CIDR mask (e.g. 192.168.1.2/24):" 10 60 "$1" 3>&1 1>&2 2>&3) read -r -d '' msg <<- EOM
What IPv4 address would you like to assign to this Security Onion installation?
Please enter the IPv4 address with CIDR mask
(e.g. 192.168.1.2/24):
EOM
manager_ip_mask=$(whiptail --title "$whiptail_title" --inputbox "$msg" 12 60 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$? local exitstatus=$?
whiptail_check_exitstatus $exitstatus # whiptail_check_exitstatus $exitstatus
}
whiptail_management_interface_ip() {
[ -n "$TESTING" ] && return
MIP=$(whiptail --title "$whiptail_title" --inputbox \
"Enter your IP address:" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_management_interface_mask() {
[ -n "$TESTING" ] && return
MMASK=$(whiptail --title "$whiptail_title" --inputbox \
"Enter the bit mask for your subnet:" 10 60 24 3>&1 1>&2 2>&3)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
} }
whiptail_management_nic() { whiptail_management_nic() {
@@ -1734,7 +1719,7 @@ whiptail_so_allow_yesno() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
whiptail --title "$whiptail_title" \ whiptail --title "$whiptail_title" \
--yesno "Do you want to run so-allow to allow access to the web tools?" \ --yesno "Do you want to run so-allow to allow other machines to access this Security Onion installation via the web interface?" \
8 75 8 75
} }