mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Merge remote-tracking branch 'origin/2.4/dev' into idstools-refactor
This commit is contained in:
2
.github/workflows/pythontest.yml
vendored
2
.github/workflows/pythontest.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "salt/sensoroni/files/analyzers/**"
|
||||
- "salt/manager/tools/sbin"
|
||||
- "salt/manager/tools/sbin/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set PCAP_BPF_STATUS = 0 %}
|
||||
{% set STENO_BPF_COMPILED = "" %}
|
||||
|
||||
{% if GLOBALS.pcap_engine == "TRANSITION" %}
|
||||
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
|
||||
{% else %}
|
||||
@@ -8,3 +11,11 @@
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
||||
{% set PCAPBPF = BPFMERGED.pcap %}
|
||||
{% endif %}
|
||||
|
||||
{% if PCAPBPF %}
|
||||
{% set PCAP_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ PCAPBPF|join(" "), cwd='/root') %}
|
||||
{% if PCAP_BPF_CALC['retcode'] == 0 %}
|
||||
{% set PCAP_BPF_STATUS = 1 %}
|
||||
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
bpf:
|
||||
pcap:
|
||||
description: List of BPF filters to apply to Stenographer.
|
||||
description: List of BPF filters to apply to the PCAP engine.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
suricata:
|
||||
description: List of BPF filters to apply to Suricata.
|
||||
description: List of BPF filters to apply to Suricata. This will apply to alerts and, if enabled, to metadata and PCAP logs generated by Suricata.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||
{% set SURICATA_BPF_STATUS = 0 %}
|
||||
{% import 'bpf/macros.jinja' as MACROS %}
|
||||
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
|
||||
|
||||
{% set SURICATABPF = BPFMERGED.suricata %}
|
||||
|
||||
{% if SURICATABPF %}
|
||||
{% set SURICATA_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ SURICATABPF|join(" "), cwd='/root') %}
|
||||
{% if SURICATA_BPF_CALC['retcode'] == 0 %}
|
||||
{% set SURICATA_BPF_STATUS = 1 %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||
{% set ZEEK_BPF_STATUS = 0 %}
|
||||
{% import 'bpf/macros.jinja' as MACROS %}
|
||||
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
|
||||
|
||||
{% set ZEEKBPF = BPFMERGED.zeek %}
|
||||
|
||||
{% if ZEEKBPF %}
|
||||
{% set ZEEK_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ ZEEKBPF|join(" "), cwd='/root') %}
|
||||
{% if ZEEK_BPF_CALC['retcode'] == 0 %}
|
||||
{% set ZEEK_BPF_STATUS = 1 %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -29,9 +29,26 @@ fi
|
||||
|
||||
interface="$1"
|
||||
shift
|
||||
tcpdump -i $interface -ddd $@ | tail -n+2 |
|
||||
while read line; do
|
||||
|
||||
# Capture tcpdump output and exit code
|
||||
tcpdump_output=$(tcpdump -i "$interface" -ddd "$@" 2>&1)
|
||||
tcpdump_exit=$?
|
||||
|
||||
if [ $tcpdump_exit -ne 0 ]; then
|
||||
echo "$tcpdump_output" >&2
|
||||
exit $tcpdump_exit
|
||||
fi
|
||||
|
||||
# Process the output, skipping the first line
|
||||
echo "$tcpdump_output" | tail -n+2 | while read -r line; do
|
||||
cols=( $line )
|
||||
printf "%04x%02x%02x%08x" ${cols[0]} ${cols[1]} ${cols[2]} ${cols[3]}
|
||||
printf "%04x%02x%02x%08x" "${cols[0]}" "${cols[1]}" "${cols[2]}" "${cols[3]}"
|
||||
done
|
||||
|
||||
# Check if the pipeline succeeded
|
||||
if [ "${PIPESTATUS[0]}" -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
exit 0
|
||||
|
||||
@@ -395,7 +395,7 @@ is_manager_node() {
|
||||
}
|
||||
|
||||
is_sensor_node() {
|
||||
# Check to see if this is a sensor (forward) node
|
||||
# Check to see if this is a sensor node
|
||||
is_single_node_grid && return 0
|
||||
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
|
||||
}
|
||||
|
||||
@@ -26,8 +26,8 @@ def showUsage(args):
|
||||
print(' Where:', file=sys.stderr)
|
||||
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml', file=sys.stderr)
|
||||
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2', file=sys.stderr)
|
||||
print(' VALUE - Value to set for a given key', file=sys.stderr)
|
||||
print(' LISTITEM - Item to append to a given key\'s list value', file=sys.stderr)
|
||||
print(' VALUE - Value to set for a given key. Can be a literal value or file:<path> to load from a YAML file.', file=sys.stderr)
|
||||
print(' LISTITEM - Item to append to a given key\'s list value. Can be a literal value or file:<path> to load from a YAML file.', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -58,7 +58,13 @@ def appendItem(content, key, listItem):
|
||||
|
||||
|
||||
def convertType(value):
|
||||
if isinstance(value, str) and len(value) > 0 and (not value.startswith("0") or len(value) == 1):
|
||||
if isinstance(value, str) and value.startswith("file:"):
|
||||
path = value[5:] # Remove "file:" prefix
|
||||
if not os.path.exists(path):
|
||||
print(f"File '{path}' does not exist.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return loadYaml(path)
|
||||
elif isinstance(value, str) and len(value) > 0 and (not value.startswith("0") or len(value) == 1):
|
||||
if "." in value:
|
||||
try:
|
||||
value = float(value)
|
||||
|
||||
@@ -361,6 +361,29 @@ class TestRemove(unittest.TestCase):
|
||||
self.assertEqual(soyaml.convertType("FALSE"), False)
|
||||
self.assertEqual(soyaml.convertType(""), "")
|
||||
|
||||
def test_convert_file(self):
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
# Create a temporary YAML file
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
f.write("test:\n - name: hi\n color: blue\n")
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
result = soyaml.convertType(f"file:{temp_file}")
|
||||
expected = {"test": [{"name": "hi", "color": "blue"}]}
|
||||
self.assertEqual(result, expected)
|
||||
finally:
|
||||
os.unlink(temp_file)
|
||||
|
||||
def test_convert_file_nonexistent(self):
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||
soyaml.convertType("file:/nonexistent/file.yaml")
|
||||
self.assertEqual(cm.exception.code, 1)
|
||||
self.assertIn("File '/nonexistent/file.yaml' does not exist.", mock_stderr.getvalue())
|
||||
|
||||
def test_get_int(self):
|
||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||
filename = "/tmp/so-yaml_test-get.yaml"
|
||||
|
||||
@@ -1807,7 +1807,7 @@ This appears to be a distributed deployment. Other nodes should update themselve
|
||||
|
||||
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
||||
|
||||
If it looks like you’re missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
|
||||
If it looks like you’re missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Sensor nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
|
||||
|
||||
For more information, please see $DOC_BASE_URL/soup.html#distributed-deployments.
|
||||
|
||||
|
||||
@@ -8,12 +8,9 @@
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from "pcap/config.map.jinja" import PCAPMERGED %}
|
||||
{% from 'bpf/pcap.map.jinja' import PCAPBPF %}
|
||||
|
||||
{% set BPF_COMPILED = "" %}
|
||||
{% from 'bpf/pcap.map.jinja' import PCAPBPF, PCAP_BPF_STATUS, PCAP_BPF_CALC, STENO_BPF_COMPILED %}
|
||||
|
||||
# PCAP Section
|
||||
|
||||
stenographergroup:
|
||||
group.present:
|
||||
- name: stenographer
|
||||
@@ -40,18 +37,12 @@ pcap_sbin:
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
{% if PCAPBPF %}
|
||||
{% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
|
||||
{% if BPF_CALC['stderr'] == "" %}
|
||||
{% set BPF_COMPILED = ",\\\"--filter=" + BPF_CALC['stdout'] + "\\\"" %}
|
||||
{% else %}
|
||||
|
||||
bpfcompilationfailure:
|
||||
{% if PCAPBPF and not PCAP_BPF_STATUS %}
|
||||
stenoPCAPbpfcompilationfailure:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "BPF Compilation Failed - Discarding Specified BPF"
|
||||
{% endif %}
|
||||
- comment: "BPF Syntax Error - Discarding Specified BPF. Error: {{ PCAP_BPF_CALC['stderr'] }}"
|
||||
{% endif %}
|
||||
|
||||
stenoconf:
|
||||
@@ -64,7 +55,7 @@ stenoconf:
|
||||
- template: jinja
|
||||
- defaults:
|
||||
PCAPMERGED: {{ PCAPMERGED }}
|
||||
BPF_COMPILED: "{{ BPF_COMPILED }}"
|
||||
STENO_BPF_COMPILED: "{{ STENO_BPF_COMPILED }}"
|
||||
|
||||
stenoca:
|
||||
file.directory:
|
||||
|
||||
@@ -6,6 +6,6 @@
|
||||
, "Interface": "{{ pillar.sensor.interface }}"
|
||||
, "Port": 1234
|
||||
, "Host": "127.0.0.1"
|
||||
, "Flags": ["-v", "--blocks={{ PCAPMERGED.config.blocks }}", "--preallocate_file_mb={{ PCAPMERGED.config.preallocate_file_mb }}", "--aiops={{ PCAPMERGED.config.aiops }}", "--uid=stenographer", "--gid=stenographer"{{ BPF_COMPILED }}]
|
||||
, "Flags": ["-v", "--blocks={{ PCAPMERGED.config.blocks }}", "--preallocate_file_mb={{ PCAPMERGED.config.preallocate_file_mb }}", "--aiops={{ PCAPMERGED.config.aiops }}", "--uid=stenographer", "--gid=stenographer"{{ STENO_BPF_COMPILED }}]
|
||||
, "CertPath": "/etc/stenographer/certs"
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ pcap:
|
||||
description: By default, Stenographer limits the number of files in the pcap directory to 30000 to avoid limitations with the ext3 filesystem. However, if you're using the ext4 or xfs filesystems, then it is safe to increase this value. So if you have a large amount of storage and find that you only have 3 weeks worth of PCAP on disk while still having plenty of free space, then you may want to increase this default setting.
|
||||
helpLink: stenographer.html
|
||||
diskfreepercentage:
|
||||
description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated forward nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and Elasticsearch at the same time (like eval and standalone installations), then you’ll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21.
|
||||
description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated Sensor nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and Elasticsearch at the same time (like eval and standalone installations), then you’ll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21.
|
||||
helpLink: stenographer.html
|
||||
blocks:
|
||||
description: The number of 1MB packet blocks used by Stenographer and AF_PACKET to store packets in memory, per thread. You shouldn't need to change this.
|
||||
|
||||
@@ -15,7 +15,7 @@ sensoroni:
|
||||
sensoronikey:
|
||||
soc_host:
|
||||
suripcap:
|
||||
pcapMaxCount: 999999
|
||||
pcapMaxCount: 100000
|
||||
analyzers:
|
||||
echotrail:
|
||||
base_url: https://api.echotrail.io/insights/
|
||||
|
||||
@@ -2630,4 +2630,9 @@ soc:
|
||||
displayName: GPT-OSS 120B
|
||||
contextLimitSmall: 128000
|
||||
contextLimitLarge: 128000
|
||||
lowBalanceColorAlert: 500000
|
||||
lowBalanceColorAlert: 500000
|
||||
- id: qwen-235b
|
||||
displayName: QWEN 235B
|
||||
contextLimitSmall: 256000
|
||||
contextLimitLarge: 256000
|
||||
lowBalanceColorAlert: 500000
|
||||
|
||||
@@ -7,9 +7,47 @@
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'bpf/suricata.map.jinja' import SURICATABPF %}
|
||||
{% from 'suricata/map.jinja' import SURICATAMERGED %}
|
||||
{% set BPF_STATUS = 0 %}
|
||||
{% from 'bpf/suricata.map.jinja' import SURICATABPF, SURICATA_BPF_STATUS, SURICATA_BPF_CALC %}
|
||||
|
||||
suridir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/suricata
|
||||
- user: 940
|
||||
- group: 940
|
||||
|
||||
{% if GLOBALS.pcap_engine in ["SURICATA", "TRANSITION"] %}
|
||||
{% from 'bpf/pcap.map.jinja' import PCAPBPF, PCAP_BPF_STATUS, PCAP_BPF_CALC %}
|
||||
# BPF compilation and configuration
|
||||
{% if PCAPBPF and not PCAP_BPF_STATUS %}
|
||||
suriPCAPbpfcompilationfailure:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "BPF Syntax Error - Discarding Specified BPF. Error: {{ PCAP_BPF_CALC['stderr'] }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
# BPF applied to all of Suricata - alerts/metadata/pcap
|
||||
suribpf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/suricata/bpf
|
||||
- user: 940
|
||||
- group: 940
|
||||
{% if SURICATA_BPF_STATUS %}
|
||||
- contents: {{ SURICATABPF }}
|
||||
{% else %}
|
||||
- contents:
|
||||
- ""
|
||||
{% endif %}
|
||||
|
||||
{% if SURICATABPF and not SURICATA_BPF_STATUS %}
|
||||
suribpfcompilationfailure:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "BPF Syntax Error - Discarding Specified BPF. Error: {{ SURICATA_BPF_CALC['stderr'] }}"
|
||||
{% endif %}
|
||||
|
||||
# Add Suricata Group
|
||||
suricatagroup:
|
||||
@@ -135,32 +173,6 @@ suriclassifications:
|
||||
- user: 940
|
||||
- group: 940
|
||||
|
||||
# BPF compilation and configuration
|
||||
{% if SURICATABPF %}
|
||||
{% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %}
|
||||
{% if BPF_CALC['stderr'] == "" %}
|
||||
{% set BPF_STATUS = 1 %}
|
||||
{% else %}
|
||||
suribpfcompilationfailure:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "BPF Syntax Error - Discarding Specified BPF"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
suribpf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/suricata/bpf
|
||||
- user: 940
|
||||
- group: 940
|
||||
{% if BPF_STATUS %}
|
||||
- contents: {{ SURICATABPF }}
|
||||
{% else %}
|
||||
- contents:
|
||||
- ""
|
||||
{% endif %}
|
||||
|
||||
so-suricata-eve-clean:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-suricata-eve-clean
|
||||
|
||||
@@ -34,7 +34,7 @@ suricata:
|
||||
threads: 1
|
||||
tpacket-v3: "yes"
|
||||
ring-size: 5000
|
||||
block-size: 32768
|
||||
block-size: 69632
|
||||
block-timeout: 10
|
||||
use-emergency-flush: "yes"
|
||||
buffer-size: 32768
|
||||
@@ -97,6 +97,11 @@ suricata:
|
||||
- 4789
|
||||
TEREDO_PORTS:
|
||||
- 3544
|
||||
SIP_PORTS:
|
||||
- 5060
|
||||
- 5061
|
||||
GENEVE_PORTS:
|
||||
- 6081
|
||||
default-log-dir: /var/log/suricata/
|
||||
stats:
|
||||
enabled: "yes"
|
||||
@@ -134,14 +139,6 @@ suricata:
|
||||
header: X-Forwarded-For
|
||||
unified2-alert:
|
||||
enabled: "no"
|
||||
http-log:
|
||||
enabled: "no"
|
||||
filename: http.log
|
||||
append: "yes"
|
||||
tls-log:
|
||||
enabled: "no"
|
||||
filename: tls.log
|
||||
append: "yes"
|
||||
tls-store:
|
||||
enabled: "no"
|
||||
pcap-log:
|
||||
@@ -157,9 +154,6 @@ suricata:
|
||||
totals: "yes"
|
||||
threads: "no"
|
||||
null-values: "yes"
|
||||
syslog:
|
||||
enabled: "no"
|
||||
facility: local5
|
||||
drop:
|
||||
enabled: "no"
|
||||
file-store:
|
||||
@@ -206,6 +200,9 @@ suricata:
|
||||
enabled: "yes"
|
||||
detection-ports:
|
||||
dp: 443
|
||||
ja3-fingerprints: auto
|
||||
ja4-fingerprints: auto
|
||||
encryption-handling: track-only
|
||||
dcerpc:
|
||||
enabled: "yes"
|
||||
ftp:
|
||||
@@ -255,19 +252,21 @@ suricata:
|
||||
libhtp:
|
||||
default-config:
|
||||
personality: IDS
|
||||
request-body-limit: 100kb
|
||||
response-body-limit: 100kb
|
||||
request-body-minimal-inspect-size: 32kb
|
||||
request-body-inspect-window: 4kb
|
||||
response-body-minimal-inspect-size: 40kb
|
||||
response-body-inspect-window: 16kb
|
||||
request-body-limit: 100 KiB
|
||||
response-body-limit: 100 KiB
|
||||
request-body-minimal-inspect-size: 32 KiB
|
||||
request-body-inspect-window: 4 KiB
|
||||
response-body-minimal-inspect-size: 40 KiB
|
||||
response-body-inspect-window: 16 KiB
|
||||
response-body-decompress-layer-limit: 2
|
||||
http-body-inline: auto
|
||||
swf-decompression:
|
||||
enabled: "yes"
|
||||
enabled: "no"
|
||||
type: both
|
||||
compress-depth: 0
|
||||
decompress-depth: 0
|
||||
compress-depth: 100 KiB
|
||||
decompress-depth: 100 KiB
|
||||
randomize-inspection-sizes: "yes"
|
||||
randomize-inspection-range: 10
|
||||
double-decode-path: "no"
|
||||
double-decode-query: "no"
|
||||
server-config:
|
||||
@@ -401,8 +400,12 @@ suricata:
|
||||
vxlan:
|
||||
enabled: true
|
||||
ports: $VXLAN_PORTS
|
||||
erspan:
|
||||
geneve:
|
||||
enabled: true
|
||||
ports: $GENEVE_PORTS
|
||||
max-layers: 16
|
||||
recursion-level:
|
||||
use-for-tracking: true
|
||||
detect:
|
||||
profile: medium
|
||||
custom-values:
|
||||
@@ -422,7 +425,12 @@ suricata:
|
||||
spm-algo: auto
|
||||
luajit:
|
||||
states: 128
|
||||
|
||||
security:
|
||||
lua:
|
||||
allow-rules: false
|
||||
max-bytes: 500000
|
||||
max-instructions: 500000
|
||||
allow-restricted-functions: false
|
||||
profiling:
|
||||
rules:
|
||||
enabled: "yes"
|
||||
|
||||
@@ -10,6 +10,12 @@
|
||||
|
||||
{# before we change outputs back to list, enable pcap-log if suricata is the pcapengine #}
|
||||
{% if GLOBALS.pcap_engine in ["SURICATA", "TRANSITION"] %}
|
||||
|
||||
{% from 'bpf/pcap.map.jinja' import PCAPBPF, PCAP_BPF_STATUS %}
|
||||
{% if PCAPBPF and PCAP_BPF_STATUS %}
|
||||
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'bpf-filter': PCAPBPF|join(" ")}) %}
|
||||
{% endif %}
|
||||
|
||||
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'enabled': 'yes'}) %}
|
||||
{# move the items in suricata.pcap into suricata.config.outputs.pcap-log. these items were placed under suricata.config for ease of access in SOC #}
|
||||
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'compression': SURICATAMERGED.pcap.compression}) %}
|
||||
|
||||
@@ -190,6 +190,8 @@ suricata:
|
||||
FTP_PORTS: *suriportgroup
|
||||
VXLAN_PORTS: *suriportgroup
|
||||
TEREDO_PORTS: *suriportgroup
|
||||
SIP_PORTS: *suriportgroup
|
||||
GENEVE_PORTS: *suriportgroup
|
||||
outputs:
|
||||
eve-log:
|
||||
types:
|
||||
@@ -209,7 +211,7 @@ suricata:
|
||||
helpLink: suricata.html
|
||||
pcap-log:
|
||||
enabled:
|
||||
description: This value is ignored by SO. pcapengine in globals takes precidence.
|
||||
description: This value is ignored by SO. pcapengine in globals takes precedence.
|
||||
readonly: True
|
||||
helpLink: suricata.html
|
||||
advanced: True
|
||||
@@ -297,3 +299,10 @@ suricata:
|
||||
ports:
|
||||
description: Ports to listen for. This should be a variable.
|
||||
helpLink: suricata.html
|
||||
geneve:
|
||||
enabled:
|
||||
description: Enable VXLAN capabilities.
|
||||
helpLink: suricata.html
|
||||
ports:
|
||||
description: Ports to listen for. This should be a variable.
|
||||
helpLink: suricata.html
|
||||
|
||||
@@ -7,5 +7,5 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
retry 60 3 'docker exec so-suricata /opt/suricata/bin/suricatasc -c reload-rules /var/run/suricata/suricata-command.socket' '{"message": "done", "return": "OK"}' || fail "The Suricata container was not ready in time."
|
||||
retry 60 3 'docker exec so-suricata /opt/suricata/bin/suricatasc -c ruleset-reload-nonblocking /var/run/suricata/suricata-command.socket' '{"message": "done", "return": "OK"}' || fail "The Suricata container was not ready in time."
|
||||
retry 60 3 'docker exec so-suricata /opt/suricata/bin/suricatasc -c reload-rules /var/run/suricata/suricata-command.socket' '{"message":"done","return":"OK"}' || fail "The Suricata container was not ready in time."
|
||||
retry 60 3 'docker exec so-suricata /opt/suricata/bin/suricatasc -c ruleset-reload-nonblocking /var/run/suricata/suricata-command.socket' '{"message":"done","return":"OK"}' || fail "The Suricata container was not ready in time."
|
||||
|
||||
@@ -8,8 +8,7 @@
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from "zeek/config.map.jinja" import ZEEKMERGED %}
|
||||
{% from 'bpf/zeek.map.jinja' import ZEEKBPF %}
|
||||
{% set BPF_STATUS = 0 %}
|
||||
{% from 'bpf/zeek.map.jinja' import ZEEKBPF, ZEEK_BPF_STATUS, ZEEK_BPF_CALC %}
|
||||
|
||||
# Add Zeek group
|
||||
zeekgroup:
|
||||
@@ -158,18 +157,13 @@ zeekja4cfg:
|
||||
- user: 937
|
||||
- group: 939
|
||||
|
||||
# BPF compilation and configuration
|
||||
{% if ZEEKBPF %}
|
||||
{% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %}
|
||||
{% if BPF_CALC['stderr'] == "" %}
|
||||
{% set BPF_STATUS = 1 %}
|
||||
{% else %}
|
||||
# BPF compilation failed
|
||||
{% if ZEEKBPF and not ZEEK_BPF_STATUS %}
|
||||
zeekbpfcompilationfailure:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "BPF Syntax Error - Discarding Specified BPF"
|
||||
{% endif %}
|
||||
- comment: "BPF Syntax Error - Discarding Specified BPF. Error: {{ ZEEK_BPF_CALC['stderr'] }}"
|
||||
{% endif %}
|
||||
|
||||
zeekbpf:
|
||||
@@ -177,7 +171,7 @@ zeekbpf:
|
||||
- name: /opt/so/conf/zeek/bpf
|
||||
- user: 940
|
||||
- group: 940
|
||||
{% if BPF_STATUS %}
|
||||
{% if ZEEK_BPF_STATUS %}
|
||||
- contents: {{ ZEEKBPF }}
|
||||
{% else %}
|
||||
- contents:
|
||||
|
||||
@@ -676,8 +676,8 @@ whiptail_install_type_dist_existing() {
|
||||
EOM
|
||||
|
||||
install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 7 \
|
||||
"SENSOR" "Create a forward only sensor " \
|
||||
"SEARCHNODE" "Add a search node with parsing " \
|
||||
"SENSOR" "Add a Sensor Node for monitoring network traffic " \
|
||||
"SEARCHNODE" "Add a Search Node with parsing " \
|
||||
"FLEET" "Dedicated Elastic Fleet Node " \
|
||||
"HEAVYNODE" "Sensor + Search Node " \
|
||||
"IDH" "Intrusion Detection Honeypot Node " \
|
||||
|
||||
Reference in New Issue
Block a user