diff --git a/HOTFIX b/HOTFIX
index e69de29bb..8b1378917 100644
--- a/HOTFIX
+++ b/HOTFIX
@@ -0,0 +1 @@
+
diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls
index 10fab2ed1..55b2070ce 100644
--- a/pillar/logstash/search.sls
+++ b/pillar/logstash/search.sls
@@ -13,3 +13,4 @@ logstash:
- so/9500_output_beats.conf.jinja
- so/9600_output_ossec.conf.jinja
- so/9700_output_strelka.conf.jinja
+ - so/9800_output_logscan.conf.jinja
diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja
index 0c9de34b0..00b3e6007 100644
--- a/salt/allowed_states.map.jinja
+++ b/salt/allowed_states.map.jinja
@@ -45,7 +45,8 @@
'schedule',
'soctopus',
'tcpreplay',
- 'docker_clean'
+ 'docker_clean',
+ 'learn'
],
'so-heavynode': [
'ca',
@@ -108,7 +109,8 @@
'zeek',
'schedule',
'tcpreplay',
- 'docker_clean'
+ 'docker_clean',
+ 'learn'
],
'so-manager': [
'salt.master',
@@ -127,7 +129,8 @@
'utility',
'schedule',
'soctopus',
- 'docker_clean'
+ 'docker_clean',
+ 'learn'
],
'so-managersearch': [
'salt.master',
@@ -146,7 +149,8 @@
'utility',
'schedule',
'soctopus',
- 'docker_clean'
+ 'docker_clean',
+ 'learn'
],
'so-node': [
'ca',
@@ -178,7 +182,8 @@
'schedule',
'soctopus',
'tcpreplay',
- 'docker_clean'
+ 'docker_clean',
+ 'learn'
],
'so-sensor': [
'ca',
diff --git a/salt/common/files/log-rotate.conf b/salt/common/files/log-rotate.conf
index 061b76271..35c6fd724 100644
--- a/salt/common/files/log-rotate.conf
+++ b/salt/common/files/log-rotate.conf
@@ -22,6 +22,7 @@
/opt/so/log/salt/so-salt-minion-check
/opt/so/log/salt/minion
/opt/so/log/salt/master
+/opt/so/log/logscan/*.log
{
{{ logrotate_conf | indent(width=4) }}
}
diff --git a/salt/common/tools/sbin/so-grafana-dashboard-folder-delete b/salt/common/tools/sbin/so-grafana-dashboard-folder-delete
new file mode 100755
index 000000000..f3338de84
--- /dev/null
+++ b/salt/common/tools/sbin/so-grafana-dashboard-folder-delete
@@ -0,0 +1,17 @@
+# this script is used to delete the default Grafana dashboard folders that existed prior to Grafana dashboard and Salt management changes in 2.3.70
+
+folders=$(curl -X GET http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders | jq -r '.[] | @base64')
+delfolder=("Manager" "Manager Search" "Sensor Nodes" "Search Nodes" "Standalone" "Eval Mode")
+
+for row in $folders; do
+ title=$(echo ${row} | base64 --decode | jq -r '.title')
+ uid=$(echo ${row} | base64 --decode | jq -r '.uid')
+
+ if [[ " ${delfolder[@]} " =~ " ${title} " ]]; then
+ curl -X DELETE http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders/$uid
+ fi
+done
+
+echo "so-grafana-dashboard-folder-delete has been run to delete default Grafana dashboard folders that existed prior to 2.3.70" > /opt/so/state/so-grafana-dashboard-folder-delete-complete
+
+exit 0
diff --git a/salt/common/tools/sbin/so-image-pull b/salt/common/tools/sbin/so-image-pull
new file mode 100755
index 000000000..cf312acec
--- /dev/null
+++ b/salt/common/tools/sbin/so-image-pull
@@ -0,0 +1,58 @@
+#!/bin/bash
+#
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+. /usr/sbin/so-common
+. /usr/sbin/so-image-common
+
+usage() {
+ read -r -d '' message <<- EOM
+ usage: so-image-pull [-h] IMAGE [IMAGE ...]
+
+ positional arguments:
+ IMAGE One or more 'so-' prefixed images to download and verify.
+
+ optional arguments:
+ -h, --help Show this help message and exit.
+ EOM
+ echo "$message"
+ exit 1
+}
+
+for arg; do
+ shift
+ [[ "$arg" = "--quiet" || "$arg" = "-q" ]] && quiet=true && continue
+ set -- "$@" "$arg"
+done
+
+if [[ $# -eq 0 || $# -gt 1 ]] || [[ $1 == '-h' || $1 == '--help' ]]; then
+ usage
+fi
+
+TRUSTED_CONTAINERS=("$@")
+set_version
+
+for image in "${TRUSTED_CONTAINERS[@]}"; do
+ if ! docker images | grep "$image" | grep ":5000" | grep -q "$VERSION"; then
+ if [[ $quiet == true ]]; then
+ update_docker_containers "$image" "" "" "/dev/null"
+ else
+ update_docker_containers "$image" "" "" ""
+ fi
+ else
+ echo "$image:$VERSION image exists."
+ fi
+done
diff --git a/salt/common/tools/sbin/so-influxdb-drop-autogen b/salt/common/tools/sbin/so-influxdb-drop-autogen
old mode 100644
new mode 100755
diff --git a/salt/common/tools/sbin/so-learn b/salt/common/tools/sbin/so-learn
new file mode 100755
index 000000000..273f1b8f4
--- /dev/null
+++ b/salt/common/tools/sbin/so-learn
@@ -0,0 +1,303 @@
+#!/usr/bin/env python3
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+from itertools import chain
+from typing import List
+
+import signal
+import sys
+import os
+import re
+import subprocess
+import argparse
+import textwrap
+import yaml
+import multiprocessing
+import docker
+import pty
+
+minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
+so_status_conf = '/opt/so/conf/so-status/so-status.conf'
+proc: subprocess.CompletedProcess = None
+
+# Temp store of modules, will likely be broken out into salt
+def get_learn_modules():
+ return {
+ 'logscan': { 'cpu_period': get_cpu_period(fraction=0.25), 'enabled': False, 'description': 'Scan log files against pre-trained models to alert on anomalies.' }
+ }
+
+
+def get_cpu_period(fraction: float):
+ multiplier = 10000
+
+ num_cores = multiprocessing.cpu_count()
+ if num_cores <= 2:
+ fraction = 1.
+
+ num_used_cores = int(num_cores * fraction)
+ cpu_period = num_used_cores * multiplier
+ return cpu_period
+
+
+def sigint_handler(*_):
+ print('Exiting gracefully on Ctrl-C')
+ if proc is not None: proc.send_signal(signal.SIGINT)
+ sys.exit(1)
+
+
+def find_minion_pillar() -> str:
+ regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
+
+ result = []
+ for root, _, files in os.walk(minion_pillar_dir):
+ for f_minion_id in files:
+ if re.search(regex, f_minion_id):
+ result.append(os.path.join(root, f_minion_id))
+
+ if len(result) == 0:
+ print('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?', file=sys.stderr)
+ sys.exit(3)
+ elif len(result) > 1:
+ res_str = ', '.join(f'\"{result}\"')
+ print('(This should not happen, the system is in an error state if you see this message.)\n', file=sys.stderr)
+ print('More than one manager-type pillar exists, minion id\'s listed below:', file=sys.stderr)
+ print(f' {res_str}', file=sys.stderr)
+ sys.exit(3)
+ else:
+ return result[0]
+
+
+def read_pillar(pillar: str):
+ try:
+ with open(pillar, 'r') as pillar_file:
+ loaded_yaml = yaml.safe_load(pillar_file.read())
+ if loaded_yaml is None:
+ print(f'Could not parse {pillar}', file=sys.stderr)
+ sys.exit(3)
+ return loaded_yaml
+ except:
+ print(f'Could not open {pillar}', file=sys.stderr)
+ sys.exit(3)
+
+
+def write_pillar(pillar: str, content: dict):
+ try:
+ with open(pillar, 'w') as pillar_file:
+ yaml.dump(content, pillar_file, default_flow_style=False)
+ except:
+ print(f'Could not open {pillar}', file=sys.stderr)
+ sys.exit(3)
+
+
+def mod_so_status(action: str, item: str):
+ with open(so_status_conf, 'a+') as conf:
+ conf.seek(0)
+ containers = conf.readlines()
+
+ if f'so-{item}\n' in containers:
+ if action == 'remove': containers.remove(f'so-{item}\n')
+ if action == 'add': pass
+ else:
+ if action == 'remove': pass
+ if action == 'add': containers.append(f'so-{item}\n')
+
+ [containers.remove(c_name) for c_name in containers if c_name == '\n'] # remove extra newlines
+
+ conf.seek(0)
+ conf.truncate(0)
+ conf.writelines(containers)
+
+
+def create_pillar_if_not_exist(pillar:str, content: dict):
+ pillar_dict = content
+
+ if pillar_dict.get('learn', {}).get('modules') is None:
+ pillar_dict['learn'] = {}
+ pillar_dict['learn']['modules'] = get_learn_modules()
+ content.update()
+ write_pillar(pillar, content)
+
+ return content
+
+
+def salt_call(module: str):
+ salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', f'learn.{module}', 'queue=True']
+
+ print(f' Applying salt state for {module} module...')
+ proc = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ return_code = proc.returncode
+ if return_code != 0:
+ print(f' [ERROR] Failed to apply salt state for {module} module.')
+
+ return return_code
+
+
+def pull_image(module: str):
+ container_basename = f'so-{module}'
+
+ client = docker.from_env()
+ image_list = client.images.list(filters={ 'dangling': False })
+ tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
+ basename_match = list(filter(lambda x: f'{container_basename}' in x, tag_list))
+ local_registry_match = list(filter(lambda x: ':5000' in x, basename_match))
+
+ if len(local_registry_match) == 0:
+ print(f'Pulling and verifying missing image for {module} (may take several minutes) ...')
+ pull_command = ['so-image-pull', '--quiet', container_basename]
+
+ proc = subprocess.run(pull_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ return_code = proc.returncode
+ if return_code != 0:
+ print(f'[ERROR] Failed to pull image so-{module}, skipping state.')
+ else:
+ return_code = 0
+ return return_code
+
+
+def apply(module_list: List):
+ return_code = 0
+ for module in module_list:
+ salt_ret = salt_call(module)
+ # Only update return_code if the command returned a non-zero return
+ if salt_ret != 0:
+ return_code = salt_ret
+
+ return return_code
+
+
+def check_apply(args: dict):
+ if args.apply:
+ print('Configuration updated. Applying changes:')
+ return apply(args.modules)
+ else:
+ message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
+ answer = input(message)
+ while answer.lower() not in [ 'y', 'n', '' ]:
+ answer = input(message)
+ if answer.lower() in [ 'n', '' ]:
+ return 0
+ else:
+ print('Applying changes:')
+ return apply(args.modules)
+
+
+def enable_disable_modules(args, enable: bool):
+ pillar_modules = args.pillar_dict.get('learn', {}).get('modules')
+ pillar_mod_names = args.pillar_dict.get('learn', {}).get('modules').keys()
+
+ action_str = 'add' if enable else 'remove'
+
+ if 'all' in args.modules:
+ for module, details in pillar_modules.items():
+ details['enabled'] = enable
+ mod_so_status(action_str, module)
+ if enable: pull_image(module)
+ args.pillar_dict.update()
+ write_pillar(args.pillar, args.pillar_dict)
+ else:
+ write_needed = False
+ for module in args.modules:
+ if module in pillar_mod_names:
+ if pillar_modules[module]['enabled'] == enable:
+ state_str = 'enabled' if enable else 'disabled'
+ print(f'{module} module already {state_str}.', file=sys.stderr)
+ else:
+ if enable and pull_image(module) != 0:
+ continue
+ pillar_modules[module]['enabled'] = enable
+ mod_so_status(action_str, module)
+ write_needed = True
+ if write_needed:
+ args.pillar_dict.update()
+ write_pillar(args.pillar, args.pillar_dict)
+
+ cmd_ret = check_apply(args)
+ return cmd_ret
+
+
+def enable_modules(args):
+ enable_disable_modules(args, enable=True)
+
+
+def disable_modules(args):
+ enable_disable_modules(args, enable=False)
+
+
+def list_modules(*_):
+ print('Available ML modules:')
+ for module, details in get_learn_modules().items():
+ print(f' - { module } : {details["description"]}')
+ return 0
+
+
+def main():
+ beta_str = 'BETA - SUBJECT TO CHANGE\n'
+
+ apply_help='After ACTION the chosen modules, apply any necessary salt states.'
+ enable_apply_help = apply_help.replace('ACTION', 'enabling')
+ disable_apply_help = apply_help.replace('ACTION', 'disabling')
+
+ signal.signal(signal.SIGINT, sigint_handler)
+
+ if os.geteuid() != 0:
+ print('You must run this script as root', file=sys.stderr)
+ sys.exit(1)
+
+ main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ subcommand_desc = textwrap.dedent(
+ """\
+ enable Enable one or more ML modules.
+ disable Disable one or more ML modules.
+ list List all available ML modules.
+ """
+ )
+
+ subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
+
+ module_help_str = 'One or more ML modules, which can be listed using \'so-learn list\'. Use the keyword \'all\' to apply the action to all available modules.'
+
+ enable = subparsers.add_parser('enable')
+ enable.set_defaults(func=enable_modules)
+ enable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
+ enable.add_argument('--apply', action='store_const', const=True, required=False, help=enable_apply_help)
+
+ disable = subparsers.add_parser('disable')
+ disable.set_defaults(func=disable_modules)
+ disable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
+ disable.add_argument('--apply', action='store_const', const=True, required=False, help=disable_apply_help)
+
+ list = subparsers.add_parser('list')
+ list.set_defaults(func=list_modules)
+
+ args = main_parser.parse_args(sys.argv[1:])
+ args.pillar = find_minion_pillar()
+ args.pillar_dict = create_pillar_if_not_exist(args.pillar, read_pillar(args.pillar))
+
+ if hasattr(args, 'func'):
+ exit_code = args.func(args)
+ else:
+ if args.command is None:
+ print(beta_str)
+ main_parser.print_help()
+ sys.exit(0)
+
+ sys.exit(exit_code)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/salt/common/tools/sbin/so-tcpreplay b/salt/common/tools/sbin/so-tcpreplay
index 3c1ce8fb9..e85474c67 100755
--- a/salt/common/tools/sbin/so-tcpreplay
+++ b/salt/common/tools/sbin/so-tcpreplay
@@ -31,7 +31,7 @@ if [[ $# -lt 1 ]]; then
echo "Usage: $0 "
echo
echo "All PCAPs must be placed in the /opt/so/samples directory unless replaying"
- echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP sampes"
+ echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP samples"
echo "are located in the /opt/samples directory inside of the image."
echo
echo "Customer provided PCAP example:"
diff --git a/salt/common/tools/sbin/so-user b/salt/common/tools/sbin/so-user
index f0c064d03..742c3ca5d 100755
--- a/salt/common/tools/sbin/so-user
+++ b/salt/common/tools/sbin/so-user
@@ -306,7 +306,7 @@ function updateStatus() {
[[ $? != 0 ]] && fail "Unable to unlock credential record"
fi
- updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
+ updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url) | del(.created_at) | del(.updated_at)")
response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson")
[[ $? != 0 ]] && fail "Unable to mark user as locked"
diff --git a/salt/elasticsearch/files/ingest/logscan.alert b/salt/elasticsearch/files/ingest/logscan.alert
new file mode 100644
index 000000000..7473060a7
--- /dev/null
+++ b/salt/elasticsearch/files/ingest/logscan.alert
@@ -0,0 +1,29 @@
+{
+ "description": "logscan",
+ "processors": [
+ { "set": { "field": "event.severity", "value": 2 } },
+ { "json": { "field": "message", "add_to_root": true, "ignore_failure": true } },
+ { "rename": { "field": "@timestamp", "target_field": "event.ingested", "ignore_missing": true } },
+ { "date": { "field": "timestamp", "target_field": "event.created", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
+ { "date": { "field": "start_time", "target_field": "@timestamp", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
+ { "date": { "field": "start_time", "target_field": "event.start", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
+ { "date": { "field": "end_time", "target_field": "event.end", "formats": [ "ISO8601", "UNIX" ], "ignore_failure": true } },
+ { "remove": { "field": "start_time", "ignore_missing": true } },
+ { "remove": { "field": "end_time", "ignore_missing": true } },
+ { "rename": { "field": "source_ip", "target_field": "source.ip", "ignore_missing": true } },
+ { "rename": { "field": "top_source_ips", "target_field": "logscan.source.ips", "ignore_missing": true } },
+ { "append": { "if": "ctx.source != null", "field": "logscan.source.ips", "value": "{{{source.ip}}}", "ignore_failure": true } },
+ { "set": { "if": "ctx.model == 'k1'", "field": "rule.name", "value": "LOGSCAN K1 MODEL THRESHOLD" } },
+ { "set": { "if": "ctx.model == 'k1'", "field": "rule.description", "value": "High number of logins from single IP in 1 minute window" } },
+ { "set": { "if": "ctx.model == 'k5'", "field": "rule.name", "value": "LOGSCAN K5 MODEL THRESHOLD" } },
+ { "set": { "if": "ctx.model == 'k5'", "field": "rule.description", "value": "High ratio of login failures from single IP in 5 minute window" } },
+ { "set": { "if": "ctx.model == 'k60'", "field": "rule.name", "value": "LOGSCAN K60 MODEL THRESHOLD" } },
+ { "set": { "if": "ctx.model == 'k60'", "field": "rule.description", "value": "Large number of login failures in 1 hour window" } },
+ { "rename": { "field": "model", "target_field": "logscan.model" } },
+ { "rename": { "field": "num_attempts", "target_field": "logscan.attempts.total.amount", "ignore_missing": true } },
+ { "rename": { "field": "num_failed", "target_field": "logscan.attempts.failed.amount", "ignore_missing": true } },
+ { "script": { "lang": "painless", "source": "ctx.logscan.attempts.succeeded.amount = ctx.logscan.attempts.total.amount - ctx.logscan.attempts.failed.amount" , "ignore_failure": true} },
+ { "rename": { "field": "avg_failure_interval", "target_field": "logscan.attempts.failed.avg_interval", "ignore_missing": true } },
+ { "pipeline": { "name": "common" } }
+ ]
+}
diff --git a/salt/elasticsearch/files/ingest/suricata.fileinfo b/salt/elasticsearch/files/ingest/suricata.fileinfo
index d5147fb40..fe9e4b109 100644
--- a/salt/elasticsearch/files/ingest/suricata.fileinfo
+++ b/salt/elasticsearch/files/ingest/suricata.fileinfo
@@ -13,6 +13,7 @@
{ "rename": { "field": "message2.fileinfo.size", "target_field": "file.size", "ignore_missing": true } },
{ "rename": { "field": "message2.fileinfo.state", "target_field": "file.state", "ignore_missing": true } },
{ "rename": { "field": "message2.fileinfo.stored", "target_field": "file.saved", "ignore_missing": true } },
+ { "rename": { "field": "message2.fileinfo.sha256", "target_field": "hash.sha256", "ignore_missing": true } },
{ "set": { "if": "ctx.network?.protocol != null", "field": "file.source", "value": "{{network.protocol}}" } },
{ "pipeline": { "name": "common" } }
]
diff --git a/salt/elasticsearch/templates/so/so-common-template.json b/salt/elasticsearch/templates/so/so-common-template.json
index 8afac271c..777bf3f53 100644
--- a/salt/elasticsearch/templates/so/so-common-template.json
+++ b/salt/elasticsearch/templates/so/so-common-template.json
@@ -313,6 +313,10 @@
"type":"object",
"dynamic": true
},
+ "logscan": {
+ "type": "object",
+ "dynamic": true
+ },
"manager":{
"type":"object",
"dynamic": true
diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml
index f904ccfa6..0c27e3c1b 100644
--- a/salt/filebeat/etc/filebeat.yml
+++ b/salt/filebeat/etc/filebeat.yml
@@ -112,6 +112,21 @@ filebeat.inputs:
fields: ["source", "prospector", "input", "offset", "beat"]
fields_under_root: true
+{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %}
+- type: log
+ paths:
+ - /logs/logscan/alerts.log
+ fields:
+ module: logscan
+ dataset: alert
+ processors:
+ - drop_fields:
+ fields: ["source", "prospector", "input", "offset", "beat"]
+ fields_under_root: true
+ clean_removed: true
+ close_removed: false
+{%- endif %}
+
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-sensor', 'so-helix', 'so-heavynode', 'so-import'] %}
{%- if ZEEKVER != 'SURICATA' %}
{%- for LOGNAME in salt['pillar.get']('zeeklogs:enabled', '') %}
@@ -294,6 +309,9 @@ output.elasticsearch:
- index: "so-strelka"
when.contains:
module: "strelka"
+ - index: "so-logscan"
+ when.contains:
+ module: "logscan"
setup.template.enabled: false
{%- else %}
diff --git a/salt/grafana/init.sls b/salt/grafana/init.sls
index dbb659217..1c1bdfb2e 100644
--- a/salt/grafana/init.sls
+++ b/salt/grafana/init.sls
@@ -44,6 +44,12 @@ grafanadashdir:
- group: 939
- makedirs: True
+{% for type in ['eval','manager','managersearch','search_nodes','sensor_nodes','standalone'] %}
+remove_dashboard_dir_{{type}}:
+ file.absent:
+ - name: /opt/so/conf/grafana/grafana_dashboards/{{type}}
+{% endfor %}
+
grafana-dashboard-config:
file.managed:
- name: /opt/so/conf/grafana/etc/dashboards/dashboard.yml
@@ -82,6 +88,11 @@ grafana-config-files:
- source: salt://grafana/etc/files
- makedirs: True
+so-grafana-dashboard-folder-delete:
+ cmd.run:
+ - name: /usr/sbin/so-grafana-dashboard-folder-delete
+ - unless: ls /opt/so/state/so-grafana-dashboard-folder-delete-complete
+
{% for dashboard in DASHBOARDS %}
{{dashboard}}-dashboard:
file.managed:
diff --git a/salt/learn/files/logscan.conf b/salt/learn/files/logscan.conf
new file mode 100644
index 000000000..d7aa30734
--- /dev/null
+++ b/salt/learn/files/logscan.conf
@@ -0,0 +1,7 @@
+[global]
+ts_format = iso8601
+scan_interval = 30s
+log_level = info
+
+[kratos]
+log_path = kratos/kratos.log
diff --git a/salt/learn/init.sls b/salt/learn/init.sls
new file mode 100644
index 000000000..fb5b89802
--- /dev/null
+++ b/salt/learn/init.sls
@@ -0,0 +1,19 @@
+{% from 'allowed_states.map.jinja' import allowed_states %}
+{% if sls in allowed_states %}
+
+{% set module_dict = salt['pillar.get']('learn:modules', {} ) %}
+
+{% if module_dict.items()|length != 0 %}
+include:
+{% for module, _ in module_dict.items() %}
+ - 'learn.{{ module }}'
+{% endfor %}
+{% endif %}
+
+{% else %}
+
+{{sls}}_state_not_allowed:
+ test.fail_without_changes:
+ - name: {{sls}}_state_not_allowed
+
+{% endif %}
diff --git a/salt/learn/logscan.sls b/salt/learn/logscan.sls
new file mode 100644
index 000000000..cc8bb2996
--- /dev/null
+++ b/salt/learn/logscan.sls
@@ -0,0 +1,56 @@
+{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
+{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
+{% set MANAGER = salt['grains.get']('master') %}
+{% set logscan_cpu_period = salt['pillar.get']('learn:modules:logscan:cpu_period', 20000) %}
+{% set enabled = salt['pillar.get']('learn:modules:logscan:enabled', False) %}
+
+{% if enabled %}
+ {% set container_action = 'running' %}
+{% else %}
+ {% set container_action = 'absent'%}
+{% endif %}
+
+
+logscan_data_dir:
+ file.directory:
+ - name: /nsm/logscan/data
+ - user: 939
+ - group: 939
+ - makedirs: True
+
+logscan_conf_dir:
+ file.directory:
+ - name: /opt/so/conf/logscan
+ - user: 939
+ - group: 939
+ - makedirs: True
+
+logscan_conf:
+ file.managed:
+ - name: /opt/so/conf/logscan/logscan.conf
+ - source: salt://learn/files/logscan.conf
+ - user: 939
+ - group: 939
+ - mode: 600
+
+logscan_log_dir:
+ file.directory:
+ - name: /opt/so/log/logscan
+ - user: 939
+ - group: 939
+
+so-logscan:
+ docker_container.{{ container_action }}:
+ {% if container_action == 'running' %}
+ - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-logscan:{{ VERSION }}
+ - hostname: logscan
+ - name: so-logscan
+ - binds:
+ - /nsm/logscan/data:/logscan/data:rw
+ - /opt/so/conf/logscan/logscan.conf:/logscan/logscan.conf:ro
+ - /opt/so/log/logscan:/logscan/output:rw
+ - /opt/so/log:/logscan/logs:ro
+ - cpu_period: {{ logscan_cpu_period }}
+ {% else %}
+ - force: true
+ {% endif %}
diff --git a/salt/logstash/pipelines/config/so/9800_output_logscan.conf.jinja b/salt/logstash/pipelines/config/so/9800_output_logscan.conf.jinja
new file mode 100644
index 000000000..86944d155
--- /dev/null
+++ b/salt/logstash/pipelines/config/so/9800_output_logscan.conf.jinja
@@ -0,0 +1,27 @@
+{%- if grains['role'] == 'so-eval' -%}
+{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
+{%- else %}
+{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%}
+{%- endif %}
+{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
+{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
+
+output {
+ if [module] =~ "logscan" {
+ elasticsearch {
+ id => "logscan_pipeline"
+ pipeline => "logscan.alert"
+ hosts => "{{ ES }}"
+ {% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
+ user => "{{ ES_USER }}"
+ password => "{{ ES_PASS }}"
+ {% endif %}
+ index => "so-logscan"
+ template_name => "so-common"
+ template => "/templates/so-common-template.json"
+ template_overwrite => true
+ ssl => true
+ ssl_certificate_verification => false
+ }
+ }
+}
diff --git a/salt/suricata/suricata_meta.yaml b/salt/suricata/suricata_meta.yaml
index 90b220000..1c3855501 100644
--- a/salt/suricata/suricata_meta.yaml
+++ b/salt/suricata/suricata_meta.yaml
@@ -7,7 +7,7 @@ suricata:
dir: /nsm/extracted
#write-fileinfo: "yes"
#force-filestore: "yes"
- #stream-depth: 0
+ stream-depth: 0
#max-open-files: 1000
#force-hash: [sha1, md5]
xff:
diff --git a/salt/top.sls b/salt/top.sls
index 0b5322bcc..4f0792c0c 100644
--- a/salt/top.sls
+++ b/salt/top.sls
@@ -153,6 +153,7 @@ base:
{%- endif %}
- docker_clean
- pipeline.load
+ - learn
'*_manager and G@saltversion:{{saltversion}}':
- match: compound
@@ -216,6 +217,7 @@ base:
{%- endif %}
- docker_clean
- pipeline.load
+ - learn
'*_standalone and G@saltversion:{{saltversion}}':
- match: compound
@@ -288,6 +290,7 @@ base:
{%- endif %}
- docker_clean
- pipeline.load
+ - learn
'*_searchnode and G@saltversion:{{saltversion}}':
- match: compound
@@ -358,7 +361,6 @@ base:
{%- if FILEBEAT %}
- filebeat
{%- endif %}
-
- utility
- schedule
{%- if FLEETMANAGER or FLEETNODE %}
@@ -380,6 +382,7 @@ base:
{%- endif %}
- docker_clean
- pipeline.load
+ - learn
'*_heavynode and G@saltversion:{{saltversion}}':
- match: compound
@@ -468,3 +471,4 @@ base:
- schedule
- docker_clean
- pipeline.load
+ - learn
diff --git a/setup/automation/standalone-iso-logscan b/setup/automation/standalone-iso-logscan
new file mode 100644
index 000000000..d83ad73db
--- /dev/null
+++ b/setup/automation/standalone-iso-logscan
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+TESTING=true
+
+address_type=DHCP
+ADMINUSER=onionuser
+ADMINPASS1=onionuser
+ADMINPASS2=onionuser
+ALLOW_CIDR=0.0.0.0/0
+ALLOW_ROLE=a
+BASICZEEK=2
+BASICSURI=2
+# BLOGS=
+BNICS=eth1
+ZEEKVERSION=ZEEK
+# CURCLOSEDAYS=
+# EVALADVANCED=BASIC
+GRAFANA=1
+# HELIXAPIKEY=
+HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
+HNSENSOR=inherit
+HOSTNAME=standalone
+install_type=STANDALONE
+LEARN_LOGSCAN_ENABLE=true
+# LSINPUTBATCHCOUNT=
+# LSINPUTTHREADS=
+# LSPIPELINEBATCH=
+# LSPIPELINEWORKERS=
+MANAGERADV=BASIC
+# MDNS=
+# MGATEWAY=
+# MIP=
+# MMASK=
+MNIC=eth0
+# MSEARCH=
+# MSRV=
+# MTU=
+NIDS=Suricata
+# NODE_ES_HEAP_SIZE=
+# NODE_LS_HEAP_SIZE=
+NODESETUP=NODEBASIC
+NSMSETUP=BASIC
+NODEUPDATES=MANAGER
+# OINKCODE=
+OSQUERY=1
+# PATCHSCHEDULEDAYS=
+# PATCHSCHEDULEHOURS=
+PATCHSCHEDULENAME=auto
+PLAYBOOK=1
+# REDIRECTHOST=
+REDIRECTINFO=IP
+RULESETUP=ETOPEN
+# SHARDCOUNT=
+# SKIP_REBOOT=
+SOREMOTEPASS1=onionuser
+SOREMOTEPASS2=onionuser
+STRELKA=1
+THEHIVE=1
+WAZUH=1
+WEBUSER=onionuser@somewhere.invalid
+WEBPASSWD1=0n10nus3r
+WEBPASSWD2=0n10nus3r
diff --git a/setup/so-setup b/setup/so-setup
index 68490657f..958d8aea1 100755
--- a/setup/so-setup
+++ b/setup/so-setup
@@ -962,6 +962,12 @@ else
set_progress_str 99 'Waiting for TheHive to start up'
check_hive_init >> $setup_log 2>&1
fi
+
+ if [[ -n $LEARN_LOGSCAN_ENABLE ]]; then
+ set_progress_str 99 'Enabling logscan'
+ so-learn enable logscan --apply >> $setup_log 2>&1
+ fi
+
} | whiptail_gauge_post_setup "Running post-installation steps..."
whiptail_setup_complete
diff --git a/setup/so-whiptail b/setup/so-whiptail
index 961924afa..780411841 100755
--- a/setup/so-whiptail
+++ b/setup/so-whiptail
@@ -959,33 +959,18 @@ whiptail_management_interface_gateway() {
whiptail_management_interface_ip_mask() {
[ -n "$TESTING" ] && return
- manager_ip_mask=$(whiptail --title "$whiptail_title" --inputbox \
- "Enter your IPv4 address with CIDR mask (e.g. 192.168.1.2/24):" 10 60 "$1" 3>&1 1>&2 2>&3)
+ local msg
+ read -r -d '' msg <<- EOM
+ What IPv4 address would you like to assign to this Security Onion installation?
+
+ Please enter the IPv4 address with CIDR mask
+ (e.g. 192.168.1.2/24):
+ EOM
+
+ manager_ip_mask=$(whiptail --title "$whiptail_title" --inputbox "$msg" 12 60 "$1" 3>&1 1>&2 2>&3)
local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-}
-
-whiptail_management_interface_ip() {
-
- [ -n "$TESTING" ] && return
-
- MIP=$(whiptail --title "$whiptail_title" --inputbox \
- "Enter your IP address:" 10 60 X.X.X.X 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
-}
-
-whiptail_management_interface_mask() {
-
- [ -n "$TESTING" ] && return
-
- MMASK=$(whiptail --title "$whiptail_title" --inputbox \
- "Enter the bit mask for your subnet:" 10 60 24 3>&1 1>&2 2>&3)
-
- local exitstatus=$?
- whiptail_check_exitstatus $exitstatus
+ # whiptail_check_exitstatus $exitstatus
}
whiptail_management_nic() {
@@ -1734,7 +1719,7 @@ whiptail_so_allow_yesno() {
[ -n "$TESTING" ] && return
whiptail --title "$whiptail_title" \
- --yesno "Do you want to run so-allow to allow access to the web tools?" \
+ --yesno "Do you want to run so-allow to allow other machines to access this Security Onion installation via the web interface?" \
8 75
}