mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Compare commits
3 Commits
cc8fb96047
...
delta
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f6fb717c1 | ||
|
|
ded520c2c1 | ||
|
|
a77157391c |
2
.github/workflows/pythontest.yml
vendored
2
.github/workflows/pythontest.yml
vendored
@@ -4,7 +4,7 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- "salt/sensoroni/files/analyzers/**"
|
- "salt/sensoroni/files/analyzers/**"
|
||||||
- "salt/manager/tools/sbin/**"
|
- "salt/manager/tools/sbin"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
|||||||
@@ -43,8 +43,6 @@ base:
|
|||||||
- secrets
|
- secrets
|
||||||
- manager.soc_manager
|
- manager.soc_manager
|
||||||
- manager.adv_manager
|
- manager.adv_manager
|
||||||
- idstools.soc_idstools
|
|
||||||
- idstools.adv_idstools
|
|
||||||
- logstash.nodes
|
- logstash.nodes
|
||||||
- logstash.soc_logstash
|
- logstash.soc_logstash
|
||||||
- logstash.adv_logstash
|
- logstash.adv_logstash
|
||||||
@@ -117,8 +115,6 @@ base:
|
|||||||
- elastalert.adv_elastalert
|
- elastalert.adv_elastalert
|
||||||
- manager.soc_manager
|
- manager.soc_manager
|
||||||
- manager.adv_manager
|
- manager.adv_manager
|
||||||
- idstools.soc_idstools
|
|
||||||
- idstools.adv_idstools
|
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
@@ -158,8 +154,6 @@ base:
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
- secrets
|
- secrets
|
||||||
- healthcheck.standalone
|
- healthcheck.standalone
|
||||||
- idstools.soc_idstools
|
|
||||||
- idstools.adv_idstools
|
|
||||||
- kratos.soc_kratos
|
- kratos.soc_kratos
|
||||||
- kratos.adv_kratos
|
- kratos.adv_kratos
|
||||||
- hydra.soc_hydra
|
- hydra.soc_hydra
|
||||||
|
|||||||
@@ -38,7 +38,6 @@
|
|||||||
'hydra',
|
'hydra',
|
||||||
'elasticfleet',
|
'elasticfleet',
|
||||||
'elastic-fleet-package-registry',
|
'elastic-fleet-package-registry',
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
'suricata.manager',
|
||||||
'utility'
|
'utility'
|
||||||
] %}
|
] %}
|
||||||
|
|||||||
@@ -1,7 +1,4 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% set PCAP_BPF_STATUS = 0 %}
|
|
||||||
{% set STENO_BPF_COMPILED = "" %}
|
|
||||||
|
|
||||||
{% if GLOBALS.pcap_engine == "TRANSITION" %}
|
{% if GLOBALS.pcap_engine == "TRANSITION" %}
|
||||||
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
|
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
|
||||||
{% else %}
|
{% else %}
|
||||||
@@ -11,11 +8,3 @@
|
|||||||
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
||||||
{% set PCAPBPF = BPFMERGED.pcap %}
|
{% set PCAPBPF = BPFMERGED.pcap %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if PCAPBPF %}
|
|
||||||
{% set PCAP_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ PCAPBPF|join(" "), cwd='/root') %}
|
|
||||||
{% if PCAP_BPF_CALC['retcode'] == 0 %}
|
|
||||||
{% set PCAP_BPF_STATUS = 1 %}
|
|
||||||
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
bpf:
|
bpf:
|
||||||
pcap:
|
pcap:
|
||||||
description: List of BPF filters to apply to the PCAP engine.
|
description: List of BPF filters to apply to Stenographer.
|
||||||
multiline: True
|
multiline: True
|
||||||
forcedType: "[]string"
|
forcedType: "[]string"
|
||||||
helpLink: bpf.html
|
helpLink: bpf.html
|
||||||
suricata:
|
suricata:
|
||||||
description: List of BPF filters to apply to Suricata. This will apply to alerts and, if enabled, to metadata and PCAP logs generated by Suricata.
|
description: List of BPF filters to apply to Suricata.
|
||||||
multiline: True
|
multiline: True
|
||||||
forcedType: "[]string"
|
forcedType: "[]string"
|
||||||
helpLink: bpf.html
|
helpLink: bpf.html
|
||||||
|
|||||||
@@ -1,16 +1,7 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||||
{% set SURICATA_BPF_STATUS = 0 %}
|
|
||||||
{% import 'bpf/macros.jinja' as MACROS %}
|
{% import 'bpf/macros.jinja' as MACROS %}
|
||||||
|
|
||||||
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
|
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
|
||||||
|
|
||||||
{% set SURICATABPF = BPFMERGED.suricata %}
|
{% set SURICATABPF = BPFMERGED.suricata %}
|
||||||
|
|
||||||
{% if SURICATABPF %}
|
|
||||||
{% set SURICATA_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ SURICATABPF|join(" "), cwd='/root') %}
|
|
||||||
{% if SURICATA_BPF_CALC['retcode'] == 0 %}
|
|
||||||
{% set SURICATA_BPF_STATUS = 1 %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|||||||
@@ -1,16 +1,7 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||||
{% set ZEEK_BPF_STATUS = 0 %}
|
|
||||||
{% import 'bpf/macros.jinja' as MACROS %}
|
{% import 'bpf/macros.jinja' as MACROS %}
|
||||||
|
|
||||||
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
|
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
|
||||||
|
|
||||||
{% set ZEEKBPF = BPFMERGED.zeek %}
|
{% set ZEEKBPF = BPFMERGED.zeek %}
|
||||||
|
|
||||||
{% if ZEEKBPF %}
|
|
||||||
{% set ZEEK_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ ZEEKBPF|join(" "), cwd='/root') %}
|
|
||||||
{% if ZEEK_BPF_CALC['retcode'] == 0 %}
|
|
||||||
{% set ZEEK_BPF_STATUS = 1 %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|||||||
@@ -29,26 +29,9 @@ fi
|
|||||||
|
|
||||||
interface="$1"
|
interface="$1"
|
||||||
shift
|
shift
|
||||||
|
tcpdump -i $interface -ddd $@ | tail -n+2 |
|
||||||
# Capture tcpdump output and exit code
|
while read line; do
|
||||||
tcpdump_output=$(tcpdump -i "$interface" -ddd "$@" 2>&1)
|
|
||||||
tcpdump_exit=$?
|
|
||||||
|
|
||||||
if [ $tcpdump_exit -ne 0 ]; then
|
|
||||||
echo "$tcpdump_output" >&2
|
|
||||||
exit $tcpdump_exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Process the output, skipping the first line
|
|
||||||
echo "$tcpdump_output" | tail -n+2 | while read -r line; do
|
|
||||||
cols=( $line )
|
cols=( $line )
|
||||||
printf "%04x%02x%02x%08x" "${cols[0]}" "${cols[1]}" "${cols[2]}" "${cols[3]}"
|
printf "%04x%02x%02x%08x" ${cols[0]} ${cols[1]} ${cols[2]} ${cols[3]}
|
||||||
done
|
done
|
||||||
|
|
||||||
# Check if the pipeline succeeded
|
|
||||||
if [ "${PIPESTATUS[0]}" -ne 0 ]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
exit 0
|
|
||||||
|
|||||||
@@ -395,7 +395,7 @@ is_manager_node() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
is_sensor_node() {
|
is_sensor_node() {
|
||||||
# Check to see if this is a sensor node
|
# Check to see if this is a sensor (forward) node
|
||||||
is_single_node_grid && return 0
|
is_single_node_grid && return 0
|
||||||
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
|
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,7 +25,6 @@ container_list() {
|
|||||||
if [ $MANAGERCHECK == 'so-import' ]; then
|
if [ $MANAGERCHECK == 'so-import' ]; then
|
||||||
TRUSTED_CONTAINERS=(
|
TRUSTED_CONTAINERS=(
|
||||||
"so-elasticsearch"
|
"so-elasticsearch"
|
||||||
"so-idstools"
|
|
||||||
"so-influxdb"
|
"so-influxdb"
|
||||||
"so-kibana"
|
"so-kibana"
|
||||||
"so-kratos"
|
"so-kratos"
|
||||||
@@ -49,7 +48,6 @@ container_list() {
|
|||||||
"so-elastic-fleet-package-registry"
|
"so-elastic-fleet-package-registry"
|
||||||
"so-elasticsearch"
|
"so-elasticsearch"
|
||||||
"so-idh"
|
"so-idh"
|
||||||
"so-idstools"
|
|
||||||
"so-influxdb"
|
"so-influxdb"
|
||||||
"so-kafka"
|
"so-kafka"
|
||||||
"so-kibana"
|
"so-kibana"
|
||||||
@@ -69,7 +67,6 @@ container_list() {
|
|||||||
)
|
)
|
||||||
else
|
else
|
||||||
TRUSTED_CONTAINERS=(
|
TRUSTED_CONTAINERS=(
|
||||||
"so-idstools"
|
|
||||||
"so-elasticsearch"
|
"so-elasticsearch"
|
||||||
"so-logstash"
|
"so-logstash"
|
||||||
"so-nginx"
|
"so-nginx"
|
||||||
|
|||||||
@@ -24,11 +24,6 @@ docker:
|
|||||||
custom_bind_mounts: []
|
custom_bind_mounts: []
|
||||||
extra_hosts: []
|
extra_hosts: []
|
||||||
extra_env: []
|
extra_env: []
|
||||||
'so-idstools':
|
|
||||||
final_octet: 25
|
|
||||||
custom_bind_mounts: []
|
|
||||||
extra_hosts: []
|
|
||||||
extra_env: []
|
|
||||||
'so-influxdb':
|
'so-influxdb':
|
||||||
final_octet: 26
|
final_octet: 26
|
||||||
port_bindings:
|
port_bindings:
|
||||||
|
|||||||
@@ -41,7 +41,6 @@ docker:
|
|||||||
forcedType: "[]string"
|
forcedType: "[]string"
|
||||||
so-elastic-fleet: *dockerOptions
|
so-elastic-fleet: *dockerOptions
|
||||||
so-elasticsearch: *dockerOptions
|
so-elasticsearch: *dockerOptions
|
||||||
so-idstools: *dockerOptions
|
|
||||||
so-influxdb: *dockerOptions
|
so-influxdb: *dockerOptions
|
||||||
so-kibana: *dockerOptions
|
so-kibana: *dockerOptions
|
||||||
so-kratos: *dockerOptions
|
so-kratos: *dockerOptions
|
||||||
@@ -102,4 +101,4 @@ docker:
|
|||||||
multiline: True
|
multiline: True
|
||||||
forcedType: "[]string"
|
forcedType: "[]string"
|
||||||
so-zeek: *dockerOptions
|
so-zeek: *dockerOptions
|
||||||
so-kafka: *dockerOptions
|
so-kafka: *dockerOptions
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
Elastic License 2.0. #}
|
|
||||||
|
|
||||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
|
||||||
|
|
||||||
{# advanced config_yaml options for elasticfleet logstash output #}
|
|
||||||
{% set ADV_OUTPUT_LOGSTASH_RAW = ELASTICFLEETMERGED.config.outputs.logstash %}
|
|
||||||
{% set ADV_OUTPUT_LOGSTASH = {} %}
|
|
||||||
{% for k, v in ADV_OUTPUT_LOGSTASH_RAW.items() %}
|
|
||||||
{% if v != "" and v is not none %}
|
|
||||||
{% if k == 'queue_mem_events' %}
|
|
||||||
{# rename queue_mem_events queue.mem.events #}
|
|
||||||
{% do ADV_OUTPUT_LOGSTASH.update({'queue.mem.events':v}) %}
|
|
||||||
{% elif k == 'loadbalance' %}
|
|
||||||
{% if v %}
|
|
||||||
{# only include loadbalance config when its True #}
|
|
||||||
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
|
|
||||||
{% endif %}
|
|
||||||
{% else %}
|
|
||||||
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
{% set LOGSTASH_CONFIG_YAML_RAW = [] %}
|
|
||||||
{% if ADV_OUTPUT_LOGSTASH %}
|
|
||||||
{% for k, v in ADV_OUTPUT_LOGSTASH.items() %}
|
|
||||||
{% do LOGSTASH_CONFIG_YAML_RAW.append(k ~ ': ' ~ v) %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% set LOGSTASH_CONFIG_YAML = LOGSTASH_CONFIG_YAML_RAW | join('\\n') if LOGSTASH_CONFIG_YAML_RAW else '' %}
|
|
||||||
@@ -10,19 +10,12 @@ elasticfleet:
|
|||||||
grid_enrollment: ''
|
grid_enrollment: ''
|
||||||
defend_filters:
|
defend_filters:
|
||||||
enable_auto_configuration: False
|
enable_auto_configuration: False
|
||||||
outputs:
|
|
||||||
logstash:
|
|
||||||
bulk_max_size: ''
|
|
||||||
worker: ''
|
|
||||||
queue_mem_events: ''
|
|
||||||
timeout: ''
|
|
||||||
loadbalance: False
|
|
||||||
compression_level: ''
|
|
||||||
subscription_integrations: False
|
subscription_integrations: False
|
||||||
auto_upgrade_integrations: False
|
auto_upgrade_integrations: False
|
||||||
logging:
|
logging:
|
||||||
zeek:
|
zeek:
|
||||||
excluded:
|
excluded:
|
||||||
|
- analyzer
|
||||||
- broker
|
- broker
|
||||||
- capture_loss
|
- capture_loss
|
||||||
- cluster
|
- cluster
|
||||||
|
|||||||
@@ -121,9 +121,6 @@
|
|||||||
"phases": {
|
"phases": {
|
||||||
"cold": {
|
"cold": {
|
||||||
"actions": {
|
"actions": {
|
||||||
"allocate":{
|
|
||||||
"number_of_replicas": ""
|
|
||||||
},
|
|
||||||
"set_priority": {"priority": 0}
|
"set_priority": {"priority": 0}
|
||||||
},
|
},
|
||||||
"min_age": "60d"
|
"min_age": "60d"
|
||||||
@@ -140,31 +137,12 @@
|
|||||||
"max_age": "30d",
|
"max_age": "30d",
|
||||||
"max_primary_shard_size": "50gb"
|
"max_primary_shard_size": "50gb"
|
||||||
},
|
},
|
||||||
"forcemerge":{
|
|
||||||
"max_num_segments": ""
|
|
||||||
},
|
|
||||||
"shrink":{
|
|
||||||
"max_primary_shard_size": "",
|
|
||||||
"method": "COUNT",
|
|
||||||
"number_of_shards": ""
|
|
||||||
},
|
|
||||||
"set_priority": {"priority": 100}
|
"set_priority": {"priority": 100}
|
||||||
},
|
},
|
||||||
"min_age": "0ms"
|
"min_age": "0ms"
|
||||||
},
|
},
|
||||||
"warm": {
|
"warm": {
|
||||||
"actions": {
|
"actions": {
|
||||||
"allocate": {
|
|
||||||
"number_of_replicas": ""
|
|
||||||
},
|
|
||||||
"forcemerge": {
|
|
||||||
"max_num_segments": ""
|
|
||||||
},
|
|
||||||
"shrink":{
|
|
||||||
"max_primary_shard_size": "",
|
|
||||||
"method": "COUNT",
|
|
||||||
"number_of_shards": ""
|
|
||||||
},
|
|
||||||
"set_priority": {"priority": 50}
|
"set_priority": {"priority": 50}
|
||||||
},
|
},
|
||||||
"min_age": "30d"
|
"min_age": "30d"
|
||||||
|
|||||||
@@ -50,46 +50,6 @@ elasticfleet:
|
|||||||
global: True
|
global: True
|
||||||
forcedType: bool
|
forcedType: bool
|
||||||
helpLink: elastic-fleet.html
|
helpLink: elastic-fleet.html
|
||||||
outputs:
|
|
||||||
logstash:
|
|
||||||
bulk_max_size:
|
|
||||||
description: The maximum number of events to bulk in a single Logstash request.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
worker:
|
|
||||||
description: The number of workers per configured host publishing events.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: true
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
queue_mem_events:
|
|
||||||
title: queued events
|
|
||||||
description: The number of events the queue can store. This value should be evenly divisible by the smaller of 'bulk_max_size' to avoid sending partial batches to the output.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
timeout:
|
|
||||||
description: The number of seconds to wait for responses from the Logstash server before timing out. Eg 30s
|
|
||||||
regex: ^[0-9]+s$
|
|
||||||
advanced: True
|
|
||||||
global: True
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
loadbalance:
|
|
||||||
description: If true and multiple Logstash hosts are configured, the output plugin load balances published events onto all Logstash hosts. If false, the output plugin sends all events to one host (determined at random) and switches to another host if the selected one becomes unresponsive.
|
|
||||||
forcedType: bool
|
|
||||||
advanced: True
|
|
||||||
global: True
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
compression:
|
|
||||||
description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression).
|
|
||||||
regex: ^[1-9]$
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
global: True
|
|
||||||
helpLink: elastic-fleet.html
|
|
||||||
server:
|
server:
|
||||||
custom_fqdn:
|
custom_fqdn:
|
||||||
description: Custom FQDN for Agents to connect to. One per line.
|
description: Custom FQDN for Agents to connect to. One per line.
|
||||||
|
|||||||
@@ -3,13 +3,11 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||||
# this file except in compliance with the Elastic License 2.0.
|
# this file except in compliance with the Elastic License 2.0.
|
||||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||||
{%- from 'elasticfleet/config.map.jinja' import LOGSTASH_CONFIG_YAML %}
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
FORCE_UPDATE=false
|
|
||||||
# Only run on Managers
|
# Only run on Managers
|
||||||
if ! is_manager_node; then
|
if ! is_manager_node; then
|
||||||
printf "Not a Manager Node... Exiting"
|
printf "Not a Manager Node... Exiting"
|
||||||
@@ -24,7 +22,7 @@ function update_logstash_outputs() {
|
|||||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
--argjson SECRETS "$SECRETS" \
|
--argjson SECRETS "$SECRETS" \
|
||||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||||
else
|
else
|
||||||
JSON_STRING=$(jq -n \
|
JSON_STRING=$(jq -n \
|
||||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
@@ -99,18 +97,9 @@ function update_kafka_outputs() {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CURRENT_LOGSTASH_ADV_CONFIG=$(jq -r '.item.config_yaml // ""' <<< "$RAW_JSON")
|
|
||||||
CURRENT_LOGSTASH_ADV_CONFIG_HASH=$(sha256sum <<< "$CURRENT_LOGSTASH_ADV_CONFIG" | awk '{print $1}')
|
|
||||||
NEW_LOGSTASH_ADV_CONFIG=$'{{ LOGSTASH_CONFIG_YAML }}'
|
|
||||||
NEW_LOGSTASH_ADV_CONFIG_HASH=$(sha256sum <<< "$NEW_LOGSTASH_ADV_CONFIG" | awk '{print $1}')
|
|
||||||
|
|
||||||
if [ "$CURRENT_LOGSTASH_ADV_CONFIG_HASH" != "$NEW_LOGSTASH_ADV_CONFIG_HASH" ]; then
|
|
||||||
FORCE_UPDATE=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the current list of Logstash outputs & hash them
|
# Get the current list of Logstash outputs & hash them
|
||||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
||||||
CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||||
|
|
||||||
declare -a NEW_LIST=()
|
declare -a NEW_LIST=()
|
||||||
|
|
||||||
@@ -159,10 +148,10 @@ function update_kafka_outputs() {
|
|||||||
|
|
||||||
# Sort & hash the new list of Logstash Outputs
|
# Sort & hash the new list of Logstash Outputs
|
||||||
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
||||||
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||||
|
|
||||||
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
||||||
if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then
|
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
||||||
printf "\nHashes match - no update needed.\n"
|
printf "\nHashes match - no update needed.\n"
|
||||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||||
|
|
||||||
|
|||||||
@@ -72,8 +72,6 @@ elasticsearch:
|
|||||||
actions:
|
actions:
|
||||||
set_priority:
|
set_priority:
|
||||||
priority: 0
|
priority: 0
|
||||||
allocate:
|
|
||||||
number_of_replicas: ""
|
|
||||||
min_age: 60d
|
min_age: 60d
|
||||||
delete:
|
delete:
|
||||||
actions:
|
actions:
|
||||||
@@ -86,25 +84,11 @@ elasticsearch:
|
|||||||
max_primary_shard_size: 50gb
|
max_primary_shard_size: 50gb
|
||||||
set_priority:
|
set_priority:
|
||||||
priority: 100
|
priority: 100
|
||||||
forcemerge:
|
|
||||||
max_num_segments: ""
|
|
||||||
shrink:
|
|
||||||
max_primary_shard_size: ""
|
|
||||||
method: COUNT
|
|
||||||
number_of_shards: ""
|
|
||||||
min_age: 0ms
|
min_age: 0ms
|
||||||
warm:
|
warm:
|
||||||
actions:
|
actions:
|
||||||
set_priority:
|
set_priority:
|
||||||
priority: 50
|
priority: 50
|
||||||
forcemerge:
|
|
||||||
max_num_segments: ""
|
|
||||||
shrink:
|
|
||||||
max_primary_shard_size: ""
|
|
||||||
method: COUNT
|
|
||||||
number_of_shards: ""
|
|
||||||
allocate:
|
|
||||||
number_of_replicas: ""
|
|
||||||
min_age: 30d
|
min_age: 30d
|
||||||
so-case:
|
so-case:
|
||||||
index_sorting: false
|
index_sorting: false
|
||||||
@@ -261,6 +245,7 @@ elasticsearch:
|
|||||||
set_priority:
|
set_priority:
|
||||||
priority: 50
|
priority: 50
|
||||||
min_age: 30d
|
min_age: 30d
|
||||||
|
warm: 7
|
||||||
so-detection:
|
so-detection:
|
||||||
index_sorting: false
|
index_sorting: false
|
||||||
index_template:
|
index_template:
|
||||||
@@ -599,6 +584,7 @@ elasticsearch:
|
|||||||
set_priority:
|
set_priority:
|
||||||
priority: 50
|
priority: 50
|
||||||
min_age: 30d
|
min_age: 30d
|
||||||
|
warm: 7
|
||||||
so-import:
|
so-import:
|
||||||
index_sorting: false
|
index_sorting: false
|
||||||
index_template:
|
index_template:
|
||||||
@@ -946,6 +932,7 @@ elasticsearch:
|
|||||||
set_priority:
|
set_priority:
|
||||||
priority: 50
|
priority: 50
|
||||||
min_age: 30d
|
min_age: 30d
|
||||||
|
warm: 7
|
||||||
so-hydra:
|
so-hydra:
|
||||||
close: 30
|
close: 30
|
||||||
delete: 365
|
delete: 365
|
||||||
@@ -1056,6 +1043,7 @@ elasticsearch:
|
|||||||
set_priority:
|
set_priority:
|
||||||
priority: 50
|
priority: 50
|
||||||
min_age: 30d
|
min_age: 30d
|
||||||
|
warm: 7
|
||||||
so-lists:
|
so-lists:
|
||||||
index_sorting: false
|
index_sorting: false
|
||||||
index_template:
|
index_template:
|
||||||
@@ -1139,8 +1127,6 @@ elasticsearch:
|
|||||||
actions:
|
actions:
|
||||||
set_priority:
|
set_priority:
|
||||||
priority: 0
|
priority: 0
|
||||||
allocate:
|
|
||||||
number_of_replicas: ""
|
|
||||||
min_age: 60d
|
min_age: 60d
|
||||||
delete:
|
delete:
|
||||||
actions:
|
actions:
|
||||||
@@ -1153,25 +1139,11 @@ elasticsearch:
|
|||||||
max_primary_shard_size: 50gb
|
max_primary_shard_size: 50gb
|
||||||
set_priority:
|
set_priority:
|
||||||
priority: 100
|
priority: 100
|
||||||
forcemerge:
|
|
||||||
max_num_segments: ""
|
|
||||||
shrink:
|
|
||||||
max_primary_shard_size: ""
|
|
||||||
method: COUNT
|
|
||||||
number_of_shards: ""
|
|
||||||
min_age: 0ms
|
min_age: 0ms
|
||||||
warm:
|
warm:
|
||||||
actions:
|
actions:
|
||||||
set_priority:
|
set_priority:
|
||||||
priority: 50
|
priority: 50
|
||||||
allocate:
|
|
||||||
number_of_replicas: ""
|
|
||||||
forcemerge:
|
|
||||||
max_num_segments: ""
|
|
||||||
shrink:
|
|
||||||
max_primary_shard_size: ""
|
|
||||||
method: COUNT
|
|
||||||
number_of_shards: ""
|
|
||||||
min_age: 30d
|
min_age: 30d
|
||||||
so-logs-detections_x_alerts:
|
so-logs-detections_x_alerts:
|
||||||
index_sorting: false
|
index_sorting: false
|
||||||
@@ -3151,6 +3123,7 @@ elasticsearch:
|
|||||||
set_priority:
|
set_priority:
|
||||||
priority: 50
|
priority: 50
|
||||||
min_age: 30d
|
min_age: 30d
|
||||||
|
warm: 7
|
||||||
so-logs-system_x_application:
|
so-logs-system_x_application:
|
||||||
index_sorting: false
|
index_sorting: false
|
||||||
index_template:
|
index_template:
|
||||||
|
|||||||
@@ -1,79 +1,15 @@
|
|||||||
{
|
{
|
||||||
"description": "suricata.alert",
|
"description" : "suricata.alert",
|
||||||
"processors": [
|
"processors" : [
|
||||||
{
|
{ "set": { "if": "ctx.event?.imported != true", "field": "_index", "value": "logs-suricata.alerts-so" } },
|
||||||
"set": {
|
{ "set": { "field": "tags","value": "alert" }},
|
||||||
"if": "ctx.event?.imported != true",
|
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
|
||||||
"field": "_index",
|
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
|
||||||
"value": "logs-suricata.alerts-so"
|
{ "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } },
|
||||||
}
|
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
|
||||||
},
|
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
|
||||||
{
|
{ "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
|
||||||
"set": {
|
{ "dissect": { "field": "rule.rule", "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", "ignore_missing": true, "ignore_failure": true } },
|
||||||
"field": "tags",
|
{ "pipeline": { "name": "common.nids" } }
|
||||||
"value": "alert"
|
]
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.alert",
|
|
||||||
"target_field": "rule",
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "rule.signature",
|
|
||||||
"target_field": "rule.name",
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "rule.ref",
|
|
||||||
"target_field": "rule.version",
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "rule.signature_id",
|
|
||||||
"target_field": "rule.uuid",
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "rule.signature_id",
|
|
||||||
"target_field": "rule.signature",
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.payload_printable",
|
|
||||||
"target_field": "network.data.decoded",
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"dissect": {
|
|
||||||
"field": "rule.rule",
|
|
||||||
"pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}",
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "common.nids"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
@@ -1,136 +1,21 @@
|
|||||||
{
|
{
|
||||||
"description": "suricata.dns",
|
"description" : "suricata.dns",
|
||||||
"processors": [
|
"processors" : [
|
||||||
{
|
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||||
"rename": {
|
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||||
"field": "message2.proto",
|
{ "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } },
|
||||||
"target_field": "network.transport",
|
{ "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } },
|
||||||
"ignore_missing": true
|
{ "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } },
|
||||||
}
|
{ "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } },
|
||||||
},
|
{ "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
||||||
{
|
{ "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } },
|
||||||
"rename": {
|
{ "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } },
|
||||||
"field": "message2.app_proto",
|
{ "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
||||||
"target_field": "network.protocol",
|
{ "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||||
"ignore_missing": true
|
{ "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
||||||
}
|
{ "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
|
||||||
},
|
{ "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||||
{
|
{ "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
||||||
"rename": {
|
{ "pipeline": { "name": "common" } }
|
||||||
"field": "message2.dns.type",
|
]
|
||||||
"target_field": "dns.query.type",
|
}
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.tx_id",
|
|
||||||
"target_field": "dns.tx_id",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.id",
|
|
||||||
"target_field": "dns.id",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.version",
|
|
||||||
"target_field": "dns.version",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "suricata.dnsv3",
|
|
||||||
"ignore_missing_pipeline": true,
|
|
||||||
"if": "ctx?.dns?.version != null && ctx?.dns?.version == 3",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.rrname",
|
|
||||||
"target_field": "dns.query.name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.rrtype",
|
|
||||||
"target_field": "dns.query.type_name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.flags",
|
|
||||||
"target_field": "dns.flags",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.qr",
|
|
||||||
"target_field": "dns.qr",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.rd",
|
|
||||||
"target_field": "dns.recursion.desired",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.ra",
|
|
||||||
"target_field": "dns.recursion.available",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.opcode",
|
|
||||||
"target_field": "dns.opcode",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.rcode",
|
|
||||||
"target_field": "dns.response.code_name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.grouped.A",
|
|
||||||
"target_field": "dns.answers.data",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.grouped.CNAME",
|
|
||||||
"target_field": "dns.answers.name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')",
|
|
||||||
"name": "dns.tld"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "common"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,56 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.dns.queries",
|
|
||||||
"target_field": "dns.queries",
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.name = ctx?.dns?.queries[0].rrname;\n}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.type_name = ctx?.dns?.queries[0].rrtype;\n}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"foreach": {
|
|
||||||
"field": "dns.queries",
|
|
||||||
"processor": {
|
|
||||||
"rename": {
|
|
||||||
"field": "_ingest._value.rrname",
|
|
||||||
"target_field": "_ingest._value.name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"foreach": {
|
|
||||||
"field": "dns.queries",
|
|
||||||
"processor": {
|
|
||||||
"rename": {
|
|
||||||
"field": "_ingest._value.rrtype",
|
|
||||||
"target_field": "_ingest._value.type_name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "suricata.tld",
|
|
||||||
"ignore_missing_pipeline": true,
|
|
||||||
"if": "ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
{
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.top_level_domain = q.name.substring(q.name.lastIndexOf('.') + 1);\n }\n }\n}",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.query_without_tld = q.name.substring(0, q.name.lastIndexOf('.'));\n }\n }\n}",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.parent_domain = q.query_without_tld.substring(q.query_without_tld.lastIndexOf('.') + 1);\n }\n }\n}",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.subdomain = q.query_without_tld.substring(0, q.query_without_tld.lastIndexOf('.'));\n }\n }\n}",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null && q.top_level_domain != null) {\n q.highest_registered_domain = q.parent_domain + \".\" + q.top_level_domain;\n }\n }\n}",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.subdomain != null) {\n q.subdomain_length = q.subdomain.length();\n }\n }\n}",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null) {\n q.parent_domain_length = q.parent_domain.length();\n }\n }\n}",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n q.remove('query_without_tld');\n }\n}",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,61 +0,0 @@
|
|||||||
{
|
|
||||||
"description": "zeek.analyzer",
|
|
||||||
"processors": [
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "event.dataset",
|
|
||||||
"value": "analyzer"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"remove": {
|
|
||||||
"field": [
|
|
||||||
"host"
|
|
||||||
],
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"json": {
|
|
||||||
"field": "message",
|
|
||||||
"target_field": "message2",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "network.protocol",
|
|
||||||
"copy_from": "message2.analyzer_name",
|
|
||||||
"ignore_empty_value": true,
|
|
||||||
"if": "ctx?.message2?.analyzer_kind == 'protocol'"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"field": "network.protocol",
|
|
||||||
"ignore_empty_value": true,
|
|
||||||
"if": "ctx?.message2?.analyzer_kind != 'protocol'",
|
|
||||||
"copy_from": "message2.proto"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"lowercase": {
|
|
||||||
"field": "network.protocol",
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.failure_reason",
|
|
||||||
"target_field": "error.reason",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "zeek.common"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,227 +1,35 @@
|
|||||||
{
|
{
|
||||||
"description": "zeek.dns",
|
"description" : "zeek.dns",
|
||||||
"processors": [
|
"processors" : [
|
||||||
{
|
{ "set": { "field": "event.dataset", "value": "dns" } },
|
||||||
"set": {
|
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||||
"field": "event.dataset",
|
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||||
"value": "dns"
|
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||||
}
|
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||||
},
|
{ "rename": { "field": "message2.trans_id", "target_field": "dns.id", "ignore_missing": true } },
|
||||||
{
|
{ "rename": { "field": "message2.rtt", "target_field": "event.duration", "ignore_missing": true } },
|
||||||
"remove": {
|
{ "rename": { "field": "message2.query", "target_field": "dns.query.name", "ignore_missing": true } },
|
||||||
"field": [
|
{ "rename": { "field": "message2.qclass", "target_field": "dns.query.class", "ignore_missing": true } },
|
||||||
"host"
|
{ "rename": { "field": "message2.qclass_name", "target_field": "dns.query.class_name", "ignore_missing": true } },
|
||||||
],
|
{ "rename": { "field": "message2.qtype", "target_field": "dns.query.type", "ignore_missing": true } },
|
||||||
"ignore_failure": true
|
{ "rename": { "field": "message2.qtype_name", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
||||||
}
|
{ "rename": { "field": "message2.rcode", "target_field": "dns.response.code", "ignore_missing": true } },
|
||||||
},
|
{ "rename": { "field": "message2.rcode_name", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
||||||
{
|
{ "rename": { "field": "message2.AA", "target_field": "dns.authoritative", "ignore_missing": true } },
|
||||||
"json": {
|
{ "rename": { "field": "message2.TC", "target_field": "dns.truncated", "ignore_missing": true } },
|
||||||
"field": "message",
|
{ "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
||||||
"target_field": "message2",
|
{ "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||||
"ignore_failure": true
|
{ "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
|
||||||
}
|
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||||
},
|
{ "foreach": {"field": "dns.answers.name","processor": {"pipeline": {"name": "common.ip_validation"}},"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null","ignore_failure": true}},
|
||||||
{
|
{ "foreach": {"field": "temp._valid_ips","processor": {"append": {"field": "dns.resolved_ip","allow_duplicates": false,"value": "{{{_ingest._value}}}","ignore_failure": true}},"ignore_failure": true}},
|
||||||
"dot_expander": {
|
{ "script": { "source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }","ignore_failure": true }},
|
||||||
"field": "id.orig_h",
|
{ "remove": {"field": ["temp"], "ignore_missing": true ,"ignore_failure": true } },
|
||||||
"path": "message2",
|
{ "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
|
||||||
"ignore_failure": true
|
{ "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
|
||||||
}
|
{ "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
|
||||||
},
|
{ "set": { "if": "ctx._index == 'so-zeek'", "field": "_index", "value": "so-zeek_dns", "override": true } },
|
||||||
{
|
{ "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
||||||
"rename": {
|
{ "pipeline": { "name": "zeek.common" } }
|
||||||
"field": "message2.proto",
|
]
|
||||||
"target_field": "network.transport",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.trans_id",
|
|
||||||
"target_field": "dns.id",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.rtt",
|
|
||||||
"target_field": "event.duration",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.query",
|
|
||||||
"target_field": "dns.query.name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.qclass",
|
|
||||||
"target_field": "dns.query.class",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.qclass_name",
|
|
||||||
"target_field": "dns.query.class_name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.qtype",
|
|
||||||
"target_field": "dns.query.type",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.qtype_name",
|
|
||||||
"target_field": "dns.query.type_name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.rcode",
|
|
||||||
"target_field": "dns.response.code",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.rcode_name",
|
|
||||||
"target_field": "dns.response.code_name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.AA",
|
|
||||||
"target_field": "dns.authoritative",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.TC",
|
|
||||||
"target_field": "dns.truncated",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.RD",
|
|
||||||
"target_field": "dns.recursion.desired",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.RA",
|
|
||||||
"target_field": "dns.recursion.available",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.Z",
|
|
||||||
"target_field": "dns.reserved",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.answers",
|
|
||||||
"target_field": "dns.answers.name",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"foreach": {
|
|
||||||
"field": "dns.answers.name",
|
|
||||||
"processor": {
|
|
||||||
"pipeline": {
|
|
||||||
"name": "common.ip_validation"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"foreach": {
|
|
||||||
"field": "temp._valid_ips",
|
|
||||||
"processor": {
|
|
||||||
"append": {
|
|
||||||
"field": "dns.resolved_ip",
|
|
||||||
"allow_duplicates": false,
|
|
||||||
"value": "{{{_ingest._value}}}",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"remove": {
|
|
||||||
"field": [
|
|
||||||
"temp"
|
|
||||||
],
|
|
||||||
"ignore_missing": true,
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.TTLs",
|
|
||||||
"target_field": "dns.ttls",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"rename": {
|
|
||||||
"field": "message2.rejected",
|
|
||||||
"target_field": "dns.query.rejected",
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"script": {
|
|
||||||
"lang": "painless",
|
|
||||||
"source": "ctx.dns.query.length = ctx.dns.query.name.length()",
|
|
||||||
"ignore_failure": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"set": {
|
|
||||||
"if": "ctx._index == 'so-zeek'",
|
|
||||||
"field": "_index",
|
|
||||||
"value": "so-zeek_dns",
|
|
||||||
"override": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')",
|
|
||||||
"name": "dns.tld"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"pipeline": {
|
|
||||||
"name": "zeek.common"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
20
salt/elasticsearch/files/ingest/zeek.dpd
Normal file
20
salt/elasticsearch/files/ingest/zeek.dpd
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"description" : "zeek.dpd",
|
||||||
|
"processors" : [
|
||||||
|
{ "set": { "field": "event.dataset", "value": "dpd" } },
|
||||||
|
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||||
|
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||||
|
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||||
|
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
|
||||||
|
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
|
||||||
|
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
|
||||||
|
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
|
||||||
|
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
|
||||||
|
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
|
||||||
|
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "message2.analyzer", "target_field": "observer.analyzer", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "message2.failure_reason", "target_field": "error.reason", "ignore_missing": true } },
|
||||||
|
{ "pipeline": { "name": "zeek.common" } }
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -131,47 +131,6 @@ elasticsearch:
|
|||||||
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
|
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
|
||||||
global: True
|
global: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
shrink:
|
|
||||||
method:
|
|
||||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
|
||||||
options:
|
|
||||||
- COUNT
|
|
||||||
- SIZE
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
forcedType: string
|
|
||||||
number_of_shards:
|
|
||||||
title: shard count
|
|
||||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
max_primary_shard_size:
|
|
||||||
title: max shard size
|
|
||||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
|
||||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
|
||||||
global: True
|
|
||||||
forcedType: string
|
|
||||||
advanced: True
|
|
||||||
allow_write_after_shrink:
|
|
||||||
description: Allow writes after shrink.
|
|
||||||
global: True
|
|
||||||
forcedType: bool
|
|
||||||
default: False
|
|
||||||
advanced: True
|
|
||||||
forcemerge:
|
|
||||||
max_num_segments:
|
|
||||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
index_codec:
|
|
||||||
title: compression
|
|
||||||
description: Use higher compression for stored fields at the cost of slower performance.
|
|
||||||
forcedType: bool
|
|
||||||
global: True
|
|
||||||
default: False
|
|
||||||
advanced: True
|
|
||||||
cold:
|
cold:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
|
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
|
||||||
@@ -185,12 +144,6 @@ elasticsearch:
|
|||||||
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
|
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
|
||||||
global: True
|
global: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
allocate:
|
|
||||||
number_of_replicas:
|
|
||||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
|
||||||
forcedType: int
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
warm:
|
warm:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
|
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
|
||||||
@@ -205,52 +158,6 @@ elasticsearch:
|
|||||||
forcedType: int
|
forcedType: int
|
||||||
global: True
|
global: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
shrink:
|
|
||||||
method:
|
|
||||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
|
||||||
options:
|
|
||||||
- COUNT
|
|
||||||
- SIZE
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
number_of_shards:
|
|
||||||
title: shard count
|
|
||||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
max_primary_shard_size:
|
|
||||||
title: max shard size
|
|
||||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
|
||||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
|
||||||
global: True
|
|
||||||
forcedType: string
|
|
||||||
advanced: True
|
|
||||||
allow_write_after_shrink:
|
|
||||||
description: Allow writes after shrink.
|
|
||||||
global: True
|
|
||||||
forcedType: bool
|
|
||||||
default: False
|
|
||||||
advanced: True
|
|
||||||
forcemerge:
|
|
||||||
max_num_segments:
|
|
||||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
index_codec:
|
|
||||||
title: compression
|
|
||||||
description: Use higher compression for stored fields at the cost of slower performance.
|
|
||||||
forcedType: bool
|
|
||||||
global: True
|
|
||||||
default: False
|
|
||||||
advanced: True
|
|
||||||
allocate:
|
|
||||||
number_of_replicas:
|
|
||||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
|
||||||
forcedType: int
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
delete:
|
delete:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
|
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
|
||||||
@@ -380,47 +287,6 @@ elasticsearch:
|
|||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
shrink:
|
|
||||||
method:
|
|
||||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
|
||||||
options:
|
|
||||||
- COUNT
|
|
||||||
- SIZE
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
forcedType: string
|
|
||||||
number_of_shards:
|
|
||||||
title: shard count
|
|
||||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
max_primary_shard_size:
|
|
||||||
title: max shard size
|
|
||||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
|
||||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
|
||||||
global: True
|
|
||||||
forcedType: string
|
|
||||||
advanced: True
|
|
||||||
allow_write_after_shrink:
|
|
||||||
description: Allow writes after shrink.
|
|
||||||
global: True
|
|
||||||
forcedType: bool
|
|
||||||
default: False
|
|
||||||
advanced: True
|
|
||||||
forcemerge:
|
|
||||||
max_num_segments:
|
|
||||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
index_codec:
|
|
||||||
title: compression
|
|
||||||
description: Use higher compression for stored fields at the cost of slower performance.
|
|
||||||
forcedType: bool
|
|
||||||
global: True
|
|
||||||
default: False
|
|
||||||
advanced: True
|
|
||||||
warm:
|
warm:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
|
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
|
||||||
@@ -448,52 +314,6 @@ elasticsearch:
|
|||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
shrink:
|
|
||||||
method:
|
|
||||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
|
||||||
options:
|
|
||||||
- COUNT
|
|
||||||
- SIZE
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
number_of_shards:
|
|
||||||
title: shard count
|
|
||||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
max_primary_shard_size:
|
|
||||||
title: max shard size
|
|
||||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
|
||||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
|
||||||
global: True
|
|
||||||
forcedType: string
|
|
||||||
advanced: True
|
|
||||||
allow_write_after_shrink:
|
|
||||||
description: Allow writes after shrink.
|
|
||||||
global: True
|
|
||||||
forcedType: bool
|
|
||||||
default: False
|
|
||||||
advanced: True
|
|
||||||
forcemerge:
|
|
||||||
max_num_segments:
|
|
||||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
|
||||||
global: True
|
|
||||||
forcedType: int
|
|
||||||
advanced: True
|
|
||||||
index_codec:
|
|
||||||
title: compression
|
|
||||||
description: Use higher compression for stored fields at the cost of slower performance.
|
|
||||||
forcedType: bool
|
|
||||||
global: True
|
|
||||||
default: False
|
|
||||||
advanced: True
|
|
||||||
allocate:
|
|
||||||
number_of_replicas:
|
|
||||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
|
||||||
forcedType: int
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
cold:
|
cold:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
|
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
|
||||||
@@ -510,12 +330,6 @@ elasticsearch:
|
|||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
allocate:
|
|
||||||
number_of_replicas:
|
|
||||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
|
||||||
forcedType: int
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
delete:
|
delete:
|
||||||
min_age:
|
min_age:
|
||||||
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
|
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
|
||||||
|
|||||||
@@ -61,55 +61,5 @@
|
|||||||
{% do settings.index_template.template.settings.index.pop('sort') %}
|
{% do settings.index_template.template.settings.index.pop('sort') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{# advanced ilm actions #}
|
|
||||||
{% if settings.policy is defined and settings.policy.phases is defined %}
|
|
||||||
{% set PHASE_NAMES = ["hot", "warm", "cold"] %}
|
|
||||||
{% for P in PHASE_NAMES %}
|
|
||||||
{% if settings.policy.phases[P] is defined and settings.policy.phases[P].actions is defined %}
|
|
||||||
{% set PHASE = settings.policy.phases[P].actions %}
|
|
||||||
{# remove allocate action if number_of_replicas isn't configured #}
|
|
||||||
{% if PHASE.allocate is defined %}
|
|
||||||
{% if PHASE.allocate.number_of_replicas is not defined or PHASE.allocate.number_of_replicas == "" %}
|
|
||||||
{% do PHASE.pop('allocate', none) %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{# start shrink action #}
|
|
||||||
{% if PHASE.shrink is defined %}
|
|
||||||
{% if PHASE.shrink.method is defined %}
|
|
||||||
{% if PHASE.shrink.method == 'COUNT' and PHASE.shrink.number_of_shards is defined and PHASE.shrink.number_of_shards %}
|
|
||||||
{# remove max_primary_shard_size value when doing shrink operation by count vs size #}
|
|
||||||
{% do PHASE.shrink.pop('max_primary_shard_size', none) %}
|
|
||||||
{% elif PHASE.shrink.method == 'SIZE' and PHASE.shrink.max_primary_shard_size is defined and PHASE.shrink.max_primary_shard_size %}
|
|
||||||
{# remove number_of_shards value when doing shrink operation by size vs count #}
|
|
||||||
{% do PHASE.shrink.pop('number_of_shards', none) %}
|
|
||||||
{% else %}
|
|
||||||
{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #}
|
|
||||||
{% do PHASE.pop('shrink', none) %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{# always remove shrink method since its only used for SOC config, not in the actual ilm policy #}
|
|
||||||
{% if PHASE.shrink is defined %}
|
|
||||||
{% do PHASE.shrink.pop('method', none) %}
|
|
||||||
{% endif %}
|
|
||||||
{# end shrink action #}
|
|
||||||
{# start force merge #}
|
|
||||||
{% if PHASE.forcemerge is defined %}
|
|
||||||
{% if PHASE.forcemerge.index_codec is defined and PHASE.forcemerge.index_codec %}
|
|
||||||
{% do PHASE.forcemerge.update({'index_codec': 'best_compression'}) %}
|
|
||||||
{% else %}
|
|
||||||
{% do PHASE.forcemerge.pop('index_codec', none) %}
|
|
||||||
{% endif %}
|
|
||||||
{% if PHASE.forcemerge.max_num_segments is not defined or not PHASE.forcemerge.max_num_segments %}
|
|
||||||
{# max_num_segments is empty, drop it #}
|
|
||||||
{% do PHASE.pop('forcemerge', none) %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{# end force merge #}
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %}
|
{% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|||||||
@@ -206,7 +206,7 @@ fail() {
|
|||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
echo -e "\nDISCLAIMER: Script output is based on current data patterns, but are approximations solely intended to assist with getting a general ILM policy configured."
|
echo -e "\nDISCLAIMER: Script output is based on current data patterns, but are approximations soley intended to assist with getting a general ILM policy configured."
|
||||||
|
|
||||||
ORG_ID=$(lookup_org_id)
|
ORG_ID=$(lookup_org_id)
|
||||||
[ -n "$ORG_ID" ] || fail "Unable to resolve InfluxDB org id"
|
[ -n "$ORG_ID" ] || fail "Unable to resolve InfluxDB org id"
|
||||||
@@ -756,7 +756,7 @@ if [ "$should_trigger_recommendations" = true ]; then
|
|||||||
|
|
||||||
ilm_output=$(so-elasticsearch-query "${index}/_ilm/explain" --fail 2>/dev/null) || true
|
ilm_output=$(so-elasticsearch-query "${index}/_ilm/explain" --fail 2>/dev/null) || true
|
||||||
if [ -n "$ilm_output" ]; then
|
if [ -n "$ilm_output" ]; then
|
||||||
policy=$(echo "$ilm_output" | jq -r '.indices | to_entries | .[0].value.policy // empty' 2>/dev/null)
|
policy=$(echo "$ilm_output" | jq --arg idx "$index" -r ".indices[$idx].policy // empty" 2>/dev/null)
|
||||||
fi
|
fi
|
||||||
if [ -n "$policy" ] && [ -n "${policy_ages[$policy]:-}" ]; then
|
if [ -n "$policy" ] && [ -n "${policy_ages[$policy]:-}" ]; then
|
||||||
delete_min_age=${policy_ages[$policy]}
|
delete_min_age=${policy_ages[$policy]}
|
||||||
@@ -1024,12 +1024,8 @@ else
|
|||||||
if [ "$ilm_indices_immediate" -gt 0 ]; then
|
if [ "$ilm_indices_immediate" -gt 0 ]; then
|
||||||
echo -e "${BOLD}Deleting now:${NC} $ilm_indices_immediate indices (~${ilm_delete_immediate_gb} GB, $ilm_shards_immediate shards)"
|
echo -e "${BOLD}Deleting now:${NC} $ilm_indices_immediate indices (~${ilm_delete_immediate_gb} GB, $ilm_shards_immediate shards)"
|
||||||
fi
|
fi
|
||||||
if [ "$ilm_indices_30d" -gt 0 ]; then
|
if [ "$ilm_indices_7d" -gt 0 ]; then
|
||||||
if [ "$ilm_delete_scheduled_30d" -gt 0 ] && [ "$ilm_indices_scheduled_30d" -gt 0 ]; then
|
echo -e "${BOLD}Storage to be freed (7d):${NC} $ilm_indices_7d indices (~${ilm_delete_7d_gb} GB, $ilm_shards_7d shards)"
|
||||||
echo -e "${BOLD}Storage to be freed (30d):${NC} $ilm_indices_30d indices (~${ilm_delete_30d_gb} GB, $ilm_shards_30d shards)"
|
|
||||||
elif [ "$ilm_indices_7d" -gt 0 ]; then
|
|
||||||
echo -e "${BOLD}Storage to be freed (7d):${NC} $ilm_indices_7d indices (~${ilm_delete_7d_gb} GB, $ilm_shards_7d shards)"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
log_title "LOG" "Retention Projection"
|
log_title "LOG" "Retention Projection"
|
||||||
|
|||||||
@@ -1,65 +0,0 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
|
||||||
|
|
||||||
include:
|
|
||||||
- idstools.sync_files
|
|
||||||
|
|
||||||
idstoolslogdir:
|
|
||||||
file.directory:
|
|
||||||
- name: /opt/so/log/idstools
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
idstools_sbin:
|
|
||||||
file.recurse:
|
|
||||||
- name: /usr/sbin
|
|
||||||
- source: salt://idstools/tools/sbin
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- file_mode: 755
|
|
||||||
|
|
||||||
# If this is used, exclude so-rule-update
|
|
||||||
#idstools_sbin_jinja:
|
|
||||||
# file.recurse:
|
|
||||||
# - name: /usr/sbin
|
|
||||||
# - source: salt://idstools/tools/sbin_jinja
|
|
||||||
# - user: 939
|
|
||||||
# - group: 939
|
|
||||||
# - file_mode: 755
|
|
||||||
# - template: jinja
|
|
||||||
|
|
||||||
idstools_so-rule-update:
|
|
||||||
file.managed:
|
|
||||||
- name: /usr/sbin/so-rule-update
|
|
||||||
- source: salt://idstools/tools/sbin_jinja/so-rule-update
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- mode: 755
|
|
||||||
- template: jinja
|
|
||||||
|
|
||||||
suricatacustomdirsfile:
|
|
||||||
file.directory:
|
|
||||||
- name: /nsm/rules/detect-suricata/custom_file
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
suricatacustomdirsurl:
|
|
||||||
file.directory:
|
|
||||||
- name: /nsm/rules/detect-suricata/custom_temp
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
|
|
||||||
{% else %}
|
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
|
||||||
test.fail_without_changes:
|
|
||||||
- name: {{sls}}_state_not_allowed
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
idstools:
|
|
||||||
enabled: False
|
|
||||||
config:
|
|
||||||
urls: []
|
|
||||||
ruleset: ETOPEN
|
|
||||||
oinkcode: ""
|
|
||||||
sids:
|
|
||||||
enabled: []
|
|
||||||
disabled: []
|
|
||||||
modify: []
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
|
||||||
|
|
||||||
include:
|
|
||||||
- idstools.sostatus
|
|
||||||
|
|
||||||
so-idstools:
|
|
||||||
docker_container.absent:
|
|
||||||
- force: True
|
|
||||||
|
|
||||||
so-idstools_so-status.disabled:
|
|
||||||
file.comment:
|
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
|
||||||
- regex: ^so-idstools$
|
|
||||||
|
|
||||||
so-rule-update:
|
|
||||||
cron.absent:
|
|
||||||
- identifier: so-rule-update
|
|
||||||
|
|
||||||
{% else %}
|
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
|
||||||
test.fail_without_changes:
|
|
||||||
- name: {{sls}}_state_not_allowed
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,91 +0,0 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
|
||||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
{% set proxy = salt['pillar.get']('manager:proxy') %}
|
|
||||||
|
|
||||||
include:
|
|
||||||
- idstools.config
|
|
||||||
- idstools.sostatus
|
|
||||||
|
|
||||||
so-idstools:
|
|
||||||
docker_container.running:
|
|
||||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-idstools:{{ GLOBALS.so_version }}
|
|
||||||
- hostname: so-idstools
|
|
||||||
- user: socore
|
|
||||||
- networks:
|
|
||||||
- sobridge:
|
|
||||||
- ipv4_address: {{ DOCKER.containers['so-idstools'].ip }}
|
|
||||||
{% if proxy %}
|
|
||||||
- environment:
|
|
||||||
- http_proxy={{ proxy }}
|
|
||||||
- https_proxy={{ proxy }}
|
|
||||||
- no_proxy={{ salt['pillar.get']('manager:no_proxy') }}
|
|
||||||
{% if DOCKER.containers['so-idstools'].extra_env %}
|
|
||||||
{% for XTRAENV in DOCKER.containers['so-idstools'].extra_env %}
|
|
||||||
- {{ XTRAENV }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
{% elif DOCKER.containers['so-idstools'].extra_env %}
|
|
||||||
- environment:
|
|
||||||
{% for XTRAENV in DOCKER.containers['so-idstools'].extra_env %}
|
|
||||||
- {{ XTRAENV }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
- binds:
|
|
||||||
- /opt/so/conf/idstools/etc:/opt/so/idstools/etc:ro
|
|
||||||
- /opt/so/rules/nids/suri:/opt/so/rules/nids/suri:rw
|
|
||||||
- /nsm/rules/:/nsm/rules/:rw
|
|
||||||
{% if DOCKER.containers['so-idstools'].custom_bind_mounts %}
|
|
||||||
{% for BIND in DOCKER.containers['so-idstools'].custom_bind_mounts %}
|
|
||||||
- {{ BIND }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
- extra_hosts:
|
|
||||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
|
||||||
{% if DOCKER.containers['so-idstools'].extra_hosts %}
|
|
||||||
{% for XTRAHOST in DOCKER.containers['so-idstools'].extra_hosts %}
|
|
||||||
- {{ XTRAHOST }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
- watch:
|
|
||||||
- file: idstoolsetcsync
|
|
||||||
- file: idstools_so-rule-update
|
|
||||||
|
|
||||||
delete_so-idstools_so-status.disabled:
|
|
||||||
file.uncomment:
|
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
|
||||||
- regex: ^so-idstools$
|
|
||||||
|
|
||||||
so-rule-update:
|
|
||||||
cron.present:
|
|
||||||
- name: /usr/sbin/so-rule-update > /opt/so/log/idstools/download_cron.log 2>&1
|
|
||||||
- identifier: so-rule-update
|
|
||||||
- user: root
|
|
||||||
- minute: '1'
|
|
||||||
- hour: '7'
|
|
||||||
|
|
||||||
# order this last to give so-idstools container time to be ready
|
|
||||||
run_so-rule-update:
|
|
||||||
cmd.run:
|
|
||||||
- name: '/usr/sbin/so-rule-update > /opt/so/log/idstools/download_idstools_state.log 2>&1'
|
|
||||||
- require:
|
|
||||||
- docker_container: so-idstools
|
|
||||||
- onchanges:
|
|
||||||
- file: idstools_so-rule-update
|
|
||||||
- file: idstoolsetcsync
|
|
||||||
- file: synclocalnidsrules
|
|
||||||
- order: last
|
|
||||||
|
|
||||||
{% else %}
|
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
|
||||||
test.fail_without_changes:
|
|
||||||
- name: {{sls}}_state_not_allowed
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
{%- set disabled_sids = salt['pillar.get']('idstools:sids:disabled', {}) -%}
|
|
||||||
# idstools - disable.conf
|
|
||||||
|
|
||||||
# Example of disabling a rule by signature ID (gid is optional).
|
|
||||||
# 1:2019401
|
|
||||||
# 2019401
|
|
||||||
|
|
||||||
# Example of disabling a rule by regular expression.
|
|
||||||
# - All regular expression matches are case insensitive.
|
|
||||||
# re:hearbleed
|
|
||||||
# re:MS(0[7-9]|10)-\d+
|
|
||||||
{%- if disabled_sids != None %}
|
|
||||||
{%- for sid in disabled_sids %}
|
|
||||||
{{ sid }}
|
|
||||||
{%- endfor %}
|
|
||||||
{%- endif %}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
{%- set enabled_sids = salt['pillar.get']('idstools:sids:enabled', {}) -%}
|
|
||||||
# idstools-rulecat - enable.conf
|
|
||||||
|
|
||||||
# Example of enabling a rule by signature ID (gid is optional).
|
|
||||||
# 1:2019401
|
|
||||||
# 2019401
|
|
||||||
|
|
||||||
# Example of enabling a rule by regular expression.
|
|
||||||
# - All regular expression matches are case insensitive.
|
|
||||||
# re:hearbleed
|
|
||||||
# re:MS(0[7-9]|10)-\d+
|
|
||||||
{%- if enabled_sids != None %}
|
|
||||||
{%- for sid in enabled_sids %}
|
|
||||||
{{ sid }}
|
|
||||||
{%- endfor %}
|
|
||||||
{%- endif %}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
{%- set modify_sids = salt['pillar.get']('idstools:sids:modify', {}) -%}
|
|
||||||
# idstools-rulecat - modify.conf
|
|
||||||
|
|
||||||
# Format: <sid> "<from>" "<to>"
|
|
||||||
|
|
||||||
# Example changing the seconds for rule 2019401 to 3600.
|
|
||||||
#2019401 "seconds \d+" "seconds 3600"
|
|
||||||
{%- if modify_sids != None %}
|
|
||||||
{%- for sid in modify_sids %}
|
|
||||||
{{ sid }}
|
|
||||||
{%- endfor %}
|
|
||||||
{%- endif %}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
|
|
||||||
{%- from 'soc/merged.map.jinja' import SOCMERGED -%}
|
|
||||||
--suricata-version=7.0.3
|
|
||||||
--merged=/opt/so/rules/nids/suri/all.rules
|
|
||||||
--output=/nsm/rules/detect-suricata/custom_temp
|
|
||||||
--local=/opt/so/rules/nids/suri/local.rules
|
|
||||||
{%- if GLOBALS.md_engine == "SURICATA" %}
|
|
||||||
--local=/opt/so/rules/nids/suri/extraction.rules
|
|
||||||
--local=/opt/so/rules/nids/suri/filters.rules
|
|
||||||
{%- endif %}
|
|
||||||
--url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules
|
|
||||||
--disable=/opt/so/idstools/etc/disable.conf
|
|
||||||
--enable=/opt/so/idstools/etc/enable.conf
|
|
||||||
--modify=/opt/so/idstools/etc/modify.conf
|
|
||||||
{%- if SOCMERGED.config.server.modules.suricataengine.customRulesets %}
|
|
||||||
{%- for ruleset in SOCMERGED.config.server.modules.suricataengine.customRulesets %}
|
|
||||||
{%- if 'url' in ruleset %}
|
|
||||||
--url={{ ruleset.url }}
|
|
||||||
{%- elif 'file' in ruleset %}
|
|
||||||
--local={{ ruleset.file }}
|
|
||||||
{%- endif %}
|
|
||||||
{%- endfor %}
|
|
||||||
{%- endif %}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
{% from 'idstools/map.jinja' import IDSTOOLSMERGED %}
|
|
||||||
|
|
||||||
include:
|
|
||||||
{% if IDSTOOLSMERGED.enabled %}
|
|
||||||
- idstools.enabled
|
|
||||||
{% else %}
|
|
||||||
- idstools.disabled
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
Elastic License 2.0. #}
|
|
||||||
|
|
||||||
{% import_yaml 'idstools/defaults.yaml' as IDSTOOLSDEFAULTS with context %}
|
|
||||||
{% set IDSTOOLSMERGED = salt['pillar.get']('idstools', IDSTOOLSDEFAULTS.idstools, merge=True) %}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
# Extract all PDF mime type
|
|
||||||
alert http any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100000; rev:1;)
|
|
||||||
alert smtp any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100001; rev:1;)
|
|
||||||
alert nfs any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100002; rev:1;)
|
|
||||||
alert smb any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; noalert; sid:1100003; rev:1;)
|
|
||||||
# Extract EXE/DLL file types
|
|
||||||
alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100004; rev:1;)
|
|
||||||
alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100005; rev:1;)
|
|
||||||
alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100006; rev:1;)
|
|
||||||
alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; noalert; sid:1100007; rev:1;)
|
|
||||||
alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100008; rev:1;)
|
|
||||||
alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100009; rev:1;)
|
|
||||||
alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100010; rev:1;)
|
|
||||||
alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; noalert; sid:1100011; rev:1;)
|
|
||||||
|
|
||||||
# Extract all Zip files
|
|
||||||
alert http any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100012; rev:1;)
|
|
||||||
alert smtp any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100013; rev:1;)
|
|
||||||
alert nfs any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100014; rev:1;)
|
|
||||||
alert smb any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; noalert; sid:1100015; rev:1;)
|
|
||||||
|
|
||||||
# Extract Word Docs
|
|
||||||
alert http any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100016; rev:1;)
|
|
||||||
alert smtp any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100017; rev:1;)
|
|
||||||
alert nfs any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100018; rev:1;)
|
|
||||||
alert smb any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; noalert; sid:1100019; rev:1;)
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
# Start the filters at sid 1200000
|
|
||||||
# Example of filtering out *google.com from being in the dns log.
|
|
||||||
#config dns any any -> any any (dns.query; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200000;)
|
|
||||||
# Example of filtering out *google.com from being in the http log.
|
|
||||||
#config http any any -> any any (http.host; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200001;)
|
|
||||||
# Example of filtering out someuseragent from being in the http log.
|
|
||||||
#config http any any -> any any (http.user_agent; content:"someuseragent"; config: logging disable, type tx, scope tx; sid:1200002;)
|
|
||||||
# Example of filtering out Google's certificate from being in the ssl log.
|
|
||||||
#config tls any any -> any any (tls.fingerprint; content:"4f:a4:5e:58:7e:d9:db:20:09:d7:b6:c7:ff:58:c4:7b:dc:3f:55:b4"; config: logging disable, type tx, scope tx; sid:1200003;)
|
|
||||||
# Example of filtering out a md5 of a file from being in the files log.
|
|
||||||
#config fileinfo any any -> any any (fileinfo.filemd5; content:"7a125dc69c82d5caf94d3913eecde4b5"; config: logging disable, type tx, scope tx; sid:1200004;)
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
# Add your custom Suricata rules in this file.
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
idstools:
|
|
||||||
enabled:
|
|
||||||
description: Enables or disables the IDStools process which is used by the Detection system.
|
|
||||||
config:
|
|
||||||
oinkcode:
|
|
||||||
description: Enter your registration code or oinkcode for paid NIDS rulesets.
|
|
||||||
title: Registration Code
|
|
||||||
global: True
|
|
||||||
forcedType: string
|
|
||||||
helpLink: rules.html
|
|
||||||
ruleset:
|
|
||||||
description: 'Defines the ruleset you want to run. Options are ETOPEN or ETPRO. Once you have changed the ruleset here, you will need to wait for the rule update to take place (every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Suricata --> Full Update. WARNING! Changing the ruleset will remove all existing non-overlapping Suricata rules of the previous ruleset and their associated overrides. This removal cannot be undone.'
|
|
||||||
global: True
|
|
||||||
regex: ETPRO\b|ETOPEN\b
|
|
||||||
helpLink: rules.html
|
|
||||||
urls:
|
|
||||||
description: This is a list of additional rule download locations. This feature is currently disabled.
|
|
||||||
global: True
|
|
||||||
multiline: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
readonly: True
|
|
||||||
helpLink: rules.html
|
|
||||||
sids:
|
|
||||||
disabled:
|
|
||||||
description: Contains the list of NIDS rules (or regex patterns) disabled across the grid. This setting is readonly; Use the Detections screen to disable rules.
|
|
||||||
global: True
|
|
||||||
multiline: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
regex: \d*|re:.*
|
|
||||||
helpLink: managing-alerts.html
|
|
||||||
readonlyUi: True
|
|
||||||
advanced: true
|
|
||||||
enabled:
|
|
||||||
description: Contains the list of NIDS rules (or regex patterns) enabled across the grid. This setting is readonly; Use the Detections screen to enable rules.
|
|
||||||
global: True
|
|
||||||
multiline: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
regex: \d*|re:.*
|
|
||||||
helpLink: managing-alerts.html
|
|
||||||
readonlyUi: True
|
|
||||||
advanced: true
|
|
||||||
modify:
|
|
||||||
description: Contains the list of NIDS rules (SID "REGEX_SEARCH_TERM" "REGEX_REPLACE_TERM"). This setting is readonly; Use the Detections screen to modify rules.
|
|
||||||
global: True
|
|
||||||
multiline: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
helpLink: managing-alerts.html
|
|
||||||
readonlyUi: True
|
|
||||||
advanced: true
|
|
||||||
rules:
|
|
||||||
local__rules:
|
|
||||||
description: Contains the list of custom NIDS rules applied to the grid. This setting is readonly; Use the Detections screen to adjust rules.
|
|
||||||
file: True
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
title: Local Rules
|
|
||||||
helpLink: local-rules.html
|
|
||||||
readonlyUi: True
|
|
||||||
filters__rules:
|
|
||||||
description: If you are using Suricata for metadata, then you can set custom filters for that metadata here.
|
|
||||||
file: True
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
title: Filter Rules
|
|
||||||
helpLink: suricata.html
|
|
||||||
extraction__rules:
|
|
||||||
description: If you are using Suricata for metadata, then you can set a list of MIME types for file extraction here.
|
|
||||||
file: True
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
title: Extraction Rules
|
|
||||||
helpLink: suricata.html
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
|
||||||
|
|
||||||
append_so-idstools_so-status.conf:
|
|
||||||
file.append:
|
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
|
||||||
- text: so-idstools
|
|
||||||
- unless: grep -q so-idstools /opt/so/conf/so-status/so-status.conf
|
|
||||||
|
|
||||||
{% else %}
|
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
|
||||||
test.fail_without_changes:
|
|
||||||
- name: {{sls}}_state_not_allowed
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
idstoolsdir:
|
|
||||||
file.directory:
|
|
||||||
- name: /opt/so/conf/idstools/etc
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
idstoolsetcsync:
|
|
||||||
file.recurse:
|
|
||||||
- name: /opt/so/conf/idstools/etc
|
|
||||||
- source: salt://idstools/etc
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- template: jinja
|
|
||||||
|
|
||||||
rulesdir:
|
|
||||||
file.directory:
|
|
||||||
- name: /opt/so/rules/nids/suri
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- makedirs: True
|
|
||||||
|
|
||||||
# Don't show changes because all.rules can be large
|
|
||||||
synclocalnidsrules:
|
|
||||||
file.recurse:
|
|
||||||
- name: /opt/so/rules/nids/suri/
|
|
||||||
- source: salt://idstools/rules/
|
|
||||||
- user: 939
|
|
||||||
- group: 939
|
|
||||||
- show_changes: False
|
|
||||||
- include_pat: 'E@.rules'
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
/usr/sbin/so-restart idstools $1
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
/usr/sbin/so-start idstools $1
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
/usr/sbin/so-stop idstools $1
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# if this script isn't already running
|
|
||||||
if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %}
|
|
||||||
|
|
||||||
{%- set proxy = salt['pillar.get']('manager:proxy') %}
|
|
||||||
{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %}
|
|
||||||
|
|
||||||
{%- if proxy %}
|
|
||||||
# Download the rules from the internet
|
|
||||||
export http_proxy={{ proxy }}
|
|
||||||
export https_proxy={{ proxy }}
|
|
||||||
export no_proxy="{{ noproxy }}"
|
|
||||||
{%- endif %}
|
|
||||||
|
|
||||||
mkdir -p /nsm/rules/suricata
|
|
||||||
chown -R socore:socore /nsm/rules/suricata
|
|
||||||
{%- if not GLOBALS.airgap %}
|
|
||||||
# Download the rules from the internet
|
|
||||||
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
|
|
||||||
docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
|
|
||||||
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
|
|
||||||
docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
|
|
||||||
{%- endif %}
|
|
||||||
{%- endif %}
|
|
||||||
|
|
||||||
|
|
||||||
argstr=""
|
|
||||||
for arg in "$@"; do
|
|
||||||
argstr="${argstr} \"${arg}\""
|
|
||||||
done
|
|
||||||
|
|
||||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"
|
|
||||||
|
|
||||||
fi
|
|
||||||
@@ -1,15 +1,5 @@
|
|||||||
logrotate:
|
logrotate:
|
||||||
config:
|
config:
|
||||||
/opt/so/log/idstools/*_x_log:
|
|
||||||
- daily
|
|
||||||
- rotate 14
|
|
||||||
- missingok
|
|
||||||
- copytruncate
|
|
||||||
- compress
|
|
||||||
- create
|
|
||||||
- extension .log
|
|
||||||
- dateext
|
|
||||||
- dateyesterday
|
|
||||||
/opt/so/log/nginx/*_x_log:
|
/opt/so/log/nginx/*_x_log:
|
||||||
- daily
|
- daily
|
||||||
- rotate 14
|
- rotate 14
|
||||||
|
|||||||
@@ -1,12 +1,5 @@
|
|||||||
logrotate:
|
logrotate:
|
||||||
config:
|
config:
|
||||||
"/opt/so/log/idstools/*_x_log":
|
|
||||||
description: List of logrotate options for this file.
|
|
||||||
title: /opt/so/log/idstools/*.log
|
|
||||||
advanced: True
|
|
||||||
multiline: True
|
|
||||||
global: True
|
|
||||||
forcedType: "[]string"
|
|
||||||
"/opt/so/log/nginx/*_x_log":
|
"/opt/so/log/nginx/*_x_log":
|
||||||
description: List of logrotate options for this file.
|
description: List of logrotate options for this file.
|
||||||
title: /opt/so/log/nginx/*.log
|
title: /opt/so/log/nginx/*.log
|
||||||
|
|||||||
@@ -25,11 +25,13 @@
|
|||||||
{% set index_settings = es.get('index_settings', {}) %}
|
{% set index_settings = es.get('index_settings', {}) %}
|
||||||
{% set input = index_settings.get('so-logs', {}) %}
|
{% set input = index_settings.get('so-logs', {}) %}
|
||||||
{% for k in matched_integration_names %}
|
{% for k in matched_integration_names %}
|
||||||
{% do index_settings.update({k: input}) %}
|
{% if k not in index_settings %}
|
||||||
|
{% set _ = index_settings.update({k: input}) %}
|
||||||
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for k in addon_integration_keys %}
|
{% for k in addon_integration_keys %}
|
||||||
{% if k not in matched_integration_names and k in index_settings %}
|
{% if k not in matched_integration_names and k in index_settings %}
|
||||||
{% do index_settings.pop(k) %}
|
{% set _ = index_settings.pop(k) %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{{ data }}
|
{{ data }}
|
||||||
@@ -43,12 +45,14 @@
|
|||||||
{% set es = data.get('elasticsearch', {}) %}
|
{% set es = data.get('elasticsearch', {}) %}
|
||||||
{% set index_settings = es.get('index_settings', {}) %}
|
{% set index_settings = es.get('index_settings', {}) %}
|
||||||
{% for k in matched_integration_names %}
|
{% for k in matched_integration_names %}
|
||||||
{% set input = ADDON_INTEGRATION_DEFAULTS[k] %}
|
{% if k not in index_settings %}
|
||||||
{% do index_settings.update({k: input})%}
|
{% set input = ADDON_INTEGRATION_DEFAULTS[k] %}
|
||||||
|
{% set _ = index_settings.update({k: input})%}
|
||||||
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for k in addon_integration_keys %}
|
{% for k in addon_integration_keys %}
|
||||||
{% if k not in matched_integration_names and k in index_settings %}
|
{% if k not in matched_integration_names and k in index_settings %}
|
||||||
{% do index_settings.pop(k) %}
|
{% set _ = index_settings.pop(k) %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{{ data }}
|
{{ data }}
|
||||||
|
|||||||
@@ -604,16 +604,6 @@ function add_kratos_to_minion() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function add_idstools_to_minion() {
|
|
||||||
printf '%s\n'\
|
|
||||||
"idstools:"\
|
|
||||||
" enabled: True"\
|
|
||||||
" " >> $PILLARFILE
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
log "ERROR" "Failed to add idstools configuration to $PILLARFILE"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function add_elastic_fleet_package_registry_to_minion() {
|
function add_elastic_fleet_package_registry_to_minion() {
|
||||||
printf '%s\n'\
|
printf '%s\n'\
|
||||||
@@ -741,7 +731,6 @@ function createEVAL() {
|
|||||||
add_soc_to_minion || return 1
|
add_soc_to_minion || return 1
|
||||||
add_registry_to_minion || return 1
|
add_registry_to_minion || return 1
|
||||||
add_kratos_to_minion || return 1
|
add_kratos_to_minion || return 1
|
||||||
add_idstools_to_minion || return 1
|
|
||||||
add_elastic_fleet_package_registry_to_minion || return 1
|
add_elastic_fleet_package_registry_to_minion || return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -762,7 +751,6 @@ function createSTANDALONE() {
|
|||||||
add_soc_to_minion || return 1
|
add_soc_to_minion || return 1
|
||||||
add_registry_to_minion || return 1
|
add_registry_to_minion || return 1
|
||||||
add_kratos_to_minion || return 1
|
add_kratos_to_minion || return 1
|
||||||
add_idstools_to_minion || return 1
|
|
||||||
add_elastic_fleet_package_registry_to_minion || return 1
|
add_elastic_fleet_package_registry_to_minion || return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -779,7 +767,6 @@ function createMANAGER() {
|
|||||||
add_soc_to_minion || return 1
|
add_soc_to_minion || return 1
|
||||||
add_registry_to_minion || return 1
|
add_registry_to_minion || return 1
|
||||||
add_kratos_to_minion || return 1
|
add_kratos_to_minion || return 1
|
||||||
add_idstools_to_minion || return 1
|
|
||||||
add_elastic_fleet_package_registry_to_minion || return 1
|
add_elastic_fleet_package_registry_to_minion || return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -796,7 +783,6 @@ function createMANAGERSEARCH() {
|
|||||||
add_soc_to_minion || return 1
|
add_soc_to_minion || return 1
|
||||||
add_registry_to_minion || return 1
|
add_registry_to_minion || return 1
|
||||||
add_kratos_to_minion || return 1
|
add_kratos_to_minion || return 1
|
||||||
add_idstools_to_minion || return 1
|
|
||||||
add_elastic_fleet_package_registry_to_minion || return 1
|
add_elastic_fleet_package_registry_to_minion || return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -811,7 +797,6 @@ function createIMPORT() {
|
|||||||
add_soc_to_minion || return 1
|
add_soc_to_minion || return 1
|
||||||
add_registry_to_minion || return 1
|
add_registry_to_minion || return 1
|
||||||
add_kratos_to_minion || return 1
|
add_kratos_to_minion || return 1
|
||||||
add_idstools_to_minion || return 1
|
|
||||||
add_elastic_fleet_package_registry_to_minion || return 1
|
add_elastic_fleet_package_registry_to_minion || return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -896,7 +881,6 @@ function createMANAGERHYPE() {
|
|||||||
add_soc_to_minion || return 1
|
add_soc_to_minion || return 1
|
||||||
add_registry_to_minion || return 1
|
add_registry_to_minion || return 1
|
||||||
add_kratos_to_minion || return 1
|
add_kratos_to_minion || return 1
|
||||||
add_idstools_to_minion || return 1
|
|
||||||
add_elastic_fleet_package_registry_to_minion || return 1
|
add_elastic_fleet_package_registry_to_minion || return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -26,8 +26,8 @@ def showUsage(args):
|
|||||||
print(' Where:', file=sys.stderr)
|
print(' Where:', file=sys.stderr)
|
||||||
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml', file=sys.stderr)
|
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml', file=sys.stderr)
|
||||||
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2', file=sys.stderr)
|
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2', file=sys.stderr)
|
||||||
print(' VALUE - Value to set for a given key. Can be a literal value or file:<path> to load from a YAML file.', file=sys.stderr)
|
print(' VALUE - Value to set for a given key', file=sys.stderr)
|
||||||
print(' LISTITEM - Item to append to a given key\'s list value. Can be a literal value or file:<path> to load from a YAML file.', file=sys.stderr)
|
print(' LISTITEM - Item to append to a given key\'s list value', file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
@@ -58,13 +58,7 @@ def appendItem(content, key, listItem):
|
|||||||
|
|
||||||
|
|
||||||
def convertType(value):
|
def convertType(value):
|
||||||
if isinstance(value, str) and value.startswith("file:"):
|
if isinstance(value, str) and len(value) > 0 and (not value.startswith("0") or len(value) == 1):
|
||||||
path = value[5:] # Remove "file:" prefix
|
|
||||||
if not os.path.exists(path):
|
|
||||||
print(f"File '{path}' does not exist.", file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
||||||
return loadYaml(path)
|
|
||||||
elif isinstance(value, str) and len(value) > 0 and (not value.startswith("0") or len(value) == 1):
|
|
||||||
if "." in value:
|
if "." in value:
|
||||||
try:
|
try:
|
||||||
value = float(value)
|
value = float(value)
|
||||||
|
|||||||
@@ -361,29 +361,6 @@ class TestRemove(unittest.TestCase):
|
|||||||
self.assertEqual(soyaml.convertType("FALSE"), False)
|
self.assertEqual(soyaml.convertType("FALSE"), False)
|
||||||
self.assertEqual(soyaml.convertType(""), "")
|
self.assertEqual(soyaml.convertType(""), "")
|
||||||
|
|
||||||
def test_convert_file(self):
|
|
||||||
import tempfile
|
|
||||||
import os
|
|
||||||
|
|
||||||
# Create a temporary YAML file
|
|
||||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
|
||||||
f.write("test:\n - name: hi\n color: blue\n")
|
|
||||||
temp_file = f.name
|
|
||||||
|
|
||||||
try:
|
|
||||||
result = soyaml.convertType(f"file:{temp_file}")
|
|
||||||
expected = {"test": [{"name": "hi", "color": "blue"}]}
|
|
||||||
self.assertEqual(result, expected)
|
|
||||||
finally:
|
|
||||||
os.unlink(temp_file)
|
|
||||||
|
|
||||||
def test_convert_file_nonexistent(self):
|
|
||||||
with self.assertRaises(SystemExit) as cm:
|
|
||||||
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
|
||||||
soyaml.convertType("file:/nonexistent/file.yaml")
|
|
||||||
self.assertEqual(cm.exception.code, 1)
|
|
||||||
self.assertIn("File '/nonexistent/file.yaml' does not exist.", mock_stderr.getvalue())
|
|
||||||
|
|
||||||
def test_get_int(self):
|
def test_get_int(self):
|
||||||
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
with patch('sys.stdout', new=StringIO()) as mock_stdout:
|
||||||
filename = "/tmp/so-yaml_test-get.yaml"
|
filename = "/tmp/so-yaml_test-get.yaml"
|
||||||
|
|||||||
@@ -274,7 +274,7 @@ check_os_updates() {
|
|||||||
if [[ "$confirm" == [cC] ]]; then
|
if [[ "$confirm" == [cC] ]]; then
|
||||||
echo "Continuing without updating packages"
|
echo "Continuing without updating packages"
|
||||||
elif [[ "$confirm" == [uU] ]]; then
|
elif [[ "$confirm" == [uU] ]]; then
|
||||||
echo "Applying Grid Updates. The following patch.os salt state may take a while depending on how many packages need to be updated."
|
echo "Applying Grid Updates"
|
||||||
update_flag=true
|
update_flag=true
|
||||||
else
|
else
|
||||||
echo "Exiting soup"
|
echo "Exiting soup"
|
||||||
@@ -1318,8 +1318,6 @@ upgrade_salt() {
|
|||||||
fi
|
fi
|
||||||
# Else do Ubuntu things
|
# Else do Ubuntu things
|
||||||
elif [[ $is_deb ]]; then
|
elif [[ $is_deb ]]; then
|
||||||
# ensure these files don't exist when upgrading from 3006.9 to 3006.16
|
|
||||||
rm -f /etc/apt/keyrings/salt-archive-keyring-2023.pgp /etc/apt/sources.list.d/salt.list
|
|
||||||
echo "Removing apt hold for Salt."
|
echo "Removing apt hold for Salt."
|
||||||
echo ""
|
echo ""
|
||||||
apt-mark unhold "salt-common"
|
apt-mark unhold "salt-common"
|
||||||
@@ -1681,7 +1679,7 @@ This appears to be a distributed deployment. Other nodes should update themselve
|
|||||||
|
|
||||||
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
||||||
|
|
||||||
If it looks like you’re missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Sensor nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
|
If it looks like you’re missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
|
||||||
|
|
||||||
For more information, please see $DOC_BASE_URL/soup.html#distributed-deployments.
|
For more information, please see $DOC_BASE_URL/soup.html#distributed-deployments.
|
||||||
|
|
||||||
|
|||||||
@@ -8,9 +8,12 @@
|
|||||||
|
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% from "pcap/config.map.jinja" import PCAPMERGED %}
|
{% from "pcap/config.map.jinja" import PCAPMERGED %}
|
||||||
{% from 'bpf/pcap.map.jinja' import PCAPBPF, PCAP_BPF_STATUS, PCAP_BPF_CALC, STENO_BPF_COMPILED %}
|
{% from 'bpf/pcap.map.jinja' import PCAPBPF %}
|
||||||
|
|
||||||
|
{% set BPF_COMPILED = "" %}
|
||||||
|
|
||||||
# PCAP Section
|
# PCAP Section
|
||||||
|
|
||||||
stenographergroup:
|
stenographergroup:
|
||||||
group.present:
|
group.present:
|
||||||
- name: stenographer
|
- name: stenographer
|
||||||
@@ -37,12 +40,18 @@ pcap_sbin:
|
|||||||
- group: 939
|
- group: 939
|
||||||
- file_mode: 755
|
- file_mode: 755
|
||||||
|
|
||||||
{% if PCAPBPF and not PCAP_BPF_STATUS %}
|
{% if PCAPBPF %}
|
||||||
stenoPCAPbpfcompilationfailure:
|
{% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
|
||||||
|
{% if BPF_CALC['stderr'] == "" %}
|
||||||
|
{% set BPF_COMPILED = ",\\\"--filter=" + BPF_CALC['stdout'] + "\\\"" %}
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
bpfcompilationfailure:
|
||||||
test.configurable_test_state:
|
test.configurable_test_state:
|
||||||
- changes: False
|
- changes: False
|
||||||
- result: False
|
- result: False
|
||||||
- comment: "BPF Syntax Error - Discarding Specified BPF. Error: {{ PCAP_BPF_CALC['stderr'] }}"
|
- comment: "BPF Compilation Failed - Discarding Specified BPF"
|
||||||
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
stenoconf:
|
stenoconf:
|
||||||
@@ -55,7 +64,7 @@ stenoconf:
|
|||||||
- template: jinja
|
- template: jinja
|
||||||
- defaults:
|
- defaults:
|
||||||
PCAPMERGED: {{ PCAPMERGED }}
|
PCAPMERGED: {{ PCAPMERGED }}
|
||||||
STENO_BPF_COMPILED: "{{ STENO_BPF_COMPILED }}"
|
BPF_COMPILED: "{{ BPF_COMPILED }}"
|
||||||
|
|
||||||
stenoca:
|
stenoca:
|
||||||
file.directory:
|
file.directory:
|
||||||
|
|||||||
@@ -6,6 +6,6 @@
|
|||||||
, "Interface": "{{ pillar.sensor.interface }}"
|
, "Interface": "{{ pillar.sensor.interface }}"
|
||||||
, "Port": 1234
|
, "Port": 1234
|
||||||
, "Host": "127.0.0.1"
|
, "Host": "127.0.0.1"
|
||||||
, "Flags": ["-v", "--blocks={{ PCAPMERGED.config.blocks }}", "--preallocate_file_mb={{ PCAPMERGED.config.preallocate_file_mb }}", "--aiops={{ PCAPMERGED.config.aiops }}", "--uid=stenographer", "--gid=stenographer"{{ STENO_BPF_COMPILED }}]
|
, "Flags": ["-v", "--blocks={{ PCAPMERGED.config.blocks }}", "--preallocate_file_mb={{ PCAPMERGED.config.preallocate_file_mb }}", "--aiops={{ PCAPMERGED.config.aiops }}", "--uid=stenographer", "--gid=stenographer"{{ BPF_COMPILED }}]
|
||||||
, "CertPath": "/etc/stenographer/certs"
|
, "CertPath": "/etc/stenographer/certs"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ pcap:
|
|||||||
description: By default, Stenographer limits the number of files in the pcap directory to 30000 to avoid limitations with the ext3 filesystem. However, if you're using the ext4 or xfs filesystems, then it is safe to increase this value. So if you have a large amount of storage and find that you only have 3 weeks worth of PCAP on disk while still having plenty of free space, then you may want to increase this default setting.
|
description: By default, Stenographer limits the number of files in the pcap directory to 30000 to avoid limitations with the ext3 filesystem. However, if you're using the ext4 or xfs filesystems, then it is safe to increase this value. So if you have a large amount of storage and find that you only have 3 weeks worth of PCAP on disk while still having plenty of free space, then you may want to increase this default setting.
|
||||||
helpLink: stenographer.html
|
helpLink: stenographer.html
|
||||||
diskfreepercentage:
|
diskfreepercentage:
|
||||||
description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated Sensor nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and Elasticsearch at the same time (like eval and standalone installations), then you’ll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21.
|
description: Stenographer will purge old PCAP on a regular basis to keep the disk free percentage at this level. If you have a distributed deployment with dedicated forward nodes, then the default value of 10 should be reasonable since Stenographer should be the main consumer of disk space in the /nsm partition. However, if you have systems that run both Stenographer and Elasticsearch at the same time (like eval and standalone installations), then you’ll want to make sure that this value is no lower than 21 so that you avoid Elasticsearch hitting its watermark setting at 80% disk usage. If you have an older standalone installation, then you may need to manually change this value to 21.
|
||||||
helpLink: stenographer.html
|
helpLink: stenographer.html
|
||||||
blocks:
|
blocks:
|
||||||
description: The number of 1MB packet blocks used by Stenographer and AF_PACKET to store packets in memory, per thread. You shouldn't need to change this.
|
description: The number of 1MB packet blocks used by Stenographer and AF_PACKET to store packets in memory, per thread. You shouldn't need to change this.
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
|
|
||||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
{% if sls.split('.')[0] in allowed_states %}
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||||
|
|
||||||
include:
|
include:
|
||||||
@@ -58,17 +57,6 @@ so-dockerregistry:
|
|||||||
- x509: registry_crt
|
- x509: registry_crt
|
||||||
- x509: registry_key
|
- x509: registry_key
|
||||||
|
|
||||||
wait_for_so-dockerregistry:
|
|
||||||
http.wait_for_successful_query:
|
|
||||||
- name: 'https://{{ GLOBALS.registry_host }}:5000/v2/'
|
|
||||||
- ssl: True
|
|
||||||
- verify_ssl: False
|
|
||||||
- status: 200
|
|
||||||
- wait_for: 120
|
|
||||||
- request_interval: 5
|
|
||||||
- require:
|
|
||||||
- docker_container: so-dockerregistry
|
|
||||||
|
|
||||||
delete_so-dockerregistry_so-status.disabled:
|
delete_so-dockerregistry_so-status.disabled:
|
||||||
file.uncomment:
|
file.uncomment:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
|||||||
@@ -6,30 +6,6 @@ engines:
|
|||||||
interval: 60
|
interval: 60
|
||||||
- pillarWatch:
|
- pillarWatch:
|
||||||
fpa:
|
fpa:
|
||||||
- files:
|
|
||||||
- /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
|
|
||||||
- /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
|
|
||||||
pillar: idstools.config.ruleset
|
|
||||||
default: ETOPEN
|
|
||||||
actions:
|
|
||||||
from:
|
|
||||||
'*':
|
|
||||||
to:
|
|
||||||
'*':
|
|
||||||
- cmd.run:
|
|
||||||
cmd: /usr/sbin/so-rule-update
|
|
||||||
- files:
|
|
||||||
- /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
|
|
||||||
- /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
|
|
||||||
pillar: idstools.config.oinkcode
|
|
||||||
default: ''
|
|
||||||
actions:
|
|
||||||
from:
|
|
||||||
'*':
|
|
||||||
to:
|
|
||||||
'*':
|
|
||||||
- cmd.run:
|
|
||||||
cmd: /usr/sbin/so-rule-update
|
|
||||||
- files:
|
- files:
|
||||||
- /opt/so/saltstack/local/pillar/global/soc_global.sls
|
- /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||||||
- /opt/so/saltstack/local/pillar/global/adv_global.sls
|
- /opt/so/saltstack/local/pillar/global/adv_global.sls
|
||||||
|
|||||||
@@ -26,7 +26,7 @@
|
|||||||
#======================================================================================================================
|
#======================================================================================================================
|
||||||
set -o nounset # Treat unset variables as an error
|
set -o nounset # Treat unset variables as an error
|
||||||
|
|
||||||
__ScriptVersion="2025.09.03"
|
__ScriptVersion="2025.02.24"
|
||||||
__ScriptName="bootstrap-salt.sh"
|
__ScriptName="bootstrap-salt.sh"
|
||||||
|
|
||||||
__ScriptFullName="$0"
|
__ScriptFullName="$0"
|
||||||
@@ -48,7 +48,6 @@ __ScriptArgs="$*"
|
|||||||
# * BS_GENTOO_USE_BINHOST: If 1 add `--getbinpkg` to gentoo's emerge
|
# * BS_GENTOO_USE_BINHOST: If 1 add `--getbinpkg` to gentoo's emerge
|
||||||
# * BS_SALT_MASTER_ADDRESS: The IP or DNS name of the salt-master the minion should connect to
|
# * BS_SALT_MASTER_ADDRESS: The IP or DNS name of the salt-master the minion should connect to
|
||||||
# * BS_SALT_GIT_CHECKOUT_DIR: The directory where to clone Salt on git installations
|
# * BS_SALT_GIT_CHECKOUT_DIR: The directory where to clone Salt on git installations
|
||||||
# * BS_TMP_DIR: The directory to use for executing the installation (defaults to /tmp)
|
|
||||||
#======================================================================================================================
|
#======================================================================================================================
|
||||||
|
|
||||||
|
|
||||||
@@ -172,12 +171,12 @@ __check_config_dir() {
|
|||||||
|
|
||||||
case "$CC_DIR_NAME" in
|
case "$CC_DIR_NAME" in
|
||||||
http://*|https://*)
|
http://*|https://*)
|
||||||
__fetch_url "${_TMP_DIR}/${CC_DIR_BASE}" "${CC_DIR_NAME}"
|
__fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}"
|
||||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||||
;;
|
;;
|
||||||
ftp://*)
|
ftp://*)
|
||||||
__fetch_url "${_TMP_DIR}/${CC_DIR_BASE}" "${CC_DIR_NAME}"
|
__fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}"
|
||||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||||
;;
|
;;
|
||||||
*://*)
|
*://*)
|
||||||
echoerror "Unsupported URI scheme for $CC_DIR_NAME"
|
echoerror "Unsupported URI scheme for $CC_DIR_NAME"
|
||||||
@@ -195,22 +194,22 @@ __check_config_dir() {
|
|||||||
|
|
||||||
case "$CC_DIR_NAME" in
|
case "$CC_DIR_NAME" in
|
||||||
*.tgz|*.tar.gz)
|
*.tgz|*.tar.gz)
|
||||||
tar -zxf "${CC_DIR_NAME}" -C ${_TMP_DIR}
|
tar -zxf "${CC_DIR_NAME}" -C /tmp
|
||||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tgz")
|
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tgz")
|
||||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.gz")
|
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.gz")
|
||||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||||
;;
|
;;
|
||||||
*.tbz|*.tar.bz2)
|
*.tbz|*.tar.bz2)
|
||||||
tar -xjf "${CC_DIR_NAME}" -C ${_TMP_DIR}
|
tar -xjf "${CC_DIR_NAME}" -C /tmp
|
||||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tbz")
|
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tbz")
|
||||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.bz2")
|
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.bz2")
|
||||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||||
;;
|
;;
|
||||||
*.txz|*.tar.xz)
|
*.txz|*.tar.xz)
|
||||||
tar -xJf "${CC_DIR_NAME}" -C ${_TMP_DIR}
|
tar -xJf "${CC_DIR_NAME}" -C /tmp
|
||||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".txz")
|
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".txz")
|
||||||
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.xz")
|
CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.xz")
|
||||||
CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}"
|
CC_DIR_NAME="/tmp/${CC_DIR_BASE}"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
@@ -246,7 +245,6 @@ __check_unparsed_options() {
|
|||||||
#----------------------------------------------------------------------------------------------------------------------
|
#----------------------------------------------------------------------------------------------------------------------
|
||||||
_KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE}
|
_KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE}
|
||||||
_TEMP_CONFIG_DIR="null"
|
_TEMP_CONFIG_DIR="null"
|
||||||
_TMP_DIR=${BS_TMP_DIR:-"/tmp"}
|
|
||||||
_SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git"
|
_SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git"
|
||||||
_SALT_REPO_URL=${_SALTSTACK_REPO_URL}
|
_SALT_REPO_URL=${_SALTSTACK_REPO_URL}
|
||||||
_TEMP_KEYS_DIR="null"
|
_TEMP_KEYS_DIR="null"
|
||||||
@@ -283,7 +281,7 @@ _SIMPLIFY_VERSION=$BS_TRUE
|
|||||||
_LIBCLOUD_MIN_VERSION="0.14.0"
|
_LIBCLOUD_MIN_VERSION="0.14.0"
|
||||||
_EXTRA_PACKAGES=""
|
_EXTRA_PACKAGES=""
|
||||||
_HTTP_PROXY=""
|
_HTTP_PROXY=""
|
||||||
_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-${_TMP_DIR}/git/salt}
|
_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-/tmp/git/salt}
|
||||||
_NO_DEPS=$BS_FALSE
|
_NO_DEPS=$BS_FALSE
|
||||||
_FORCE_SHALLOW_CLONE=$BS_FALSE
|
_FORCE_SHALLOW_CLONE=$BS_FALSE
|
||||||
_DISABLE_SSL=$BS_FALSE
|
_DISABLE_SSL=$BS_FALSE
|
||||||
@@ -369,7 +367,7 @@ __usage() {
|
|||||||
also be specified. Salt installation will be ommitted, but some of the
|
also be specified. Salt installation will be ommitted, but some of the
|
||||||
dependencies could be installed to write configuration with -j or -J.
|
dependencies could be installed to write configuration with -j or -J.
|
||||||
-d Disables checking if Salt services are enabled to start on system boot.
|
-d Disables checking if Salt services are enabled to start on system boot.
|
||||||
You can also do this by touching ${BS_TMP_DIR}/disable_salt_checks on the target
|
You can also do this by touching /tmp/disable_salt_checks on the target
|
||||||
host. Default: \${BS_FALSE}
|
host. Default: \${BS_FALSE}
|
||||||
-D Show debug output
|
-D Show debug output
|
||||||
-f Force shallow cloning for git installations.
|
-f Force shallow cloning for git installations.
|
||||||
@@ -426,9 +424,6 @@ __usage() {
|
|||||||
-r Disable all repository configuration performed by this script. This
|
-r Disable all repository configuration performed by this script. This
|
||||||
option assumes all necessary repository configuration is already present
|
option assumes all necessary repository configuration is already present
|
||||||
on the system.
|
on the system.
|
||||||
-T If set this overrides the use of /tmp for script execution. This is
|
|
||||||
to allow for systems in which noexec is applied to temp filesystem mounts
|
|
||||||
for security reasons
|
|
||||||
-U If set, fully upgrade the system prior to bootstrapping Salt
|
-U If set, fully upgrade the system prior to bootstrapping Salt
|
||||||
-v Display script version
|
-v Display script version
|
||||||
-V Install Salt into virtualenv
|
-V Install Salt into virtualenv
|
||||||
@@ -441,7 +436,7 @@ __usage() {
|
|||||||
EOT
|
EOT
|
||||||
} # ---------- end of function __usage ----------
|
} # ---------- end of function __usage ----------
|
||||||
|
|
||||||
while getopts ':hvnDc:g:Gx:k:s:MSWNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:T:aqQ' opt
|
while getopts ':hvnDc:g:Gx:k:s:MSWNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aqQ' opt
|
||||||
do
|
do
|
||||||
case "${opt}" in
|
case "${opt}" in
|
||||||
|
|
||||||
@@ -483,7 +478,6 @@ do
|
|||||||
a ) _PIP_ALL=$BS_TRUE ;;
|
a ) _PIP_ALL=$BS_TRUE ;;
|
||||||
r ) _DISABLE_REPOS=$BS_TRUE ;;
|
r ) _DISABLE_REPOS=$BS_TRUE ;;
|
||||||
R ) _CUSTOM_REPO_URL=$OPTARG ;;
|
R ) _CUSTOM_REPO_URL=$OPTARG ;;
|
||||||
T ) _TMP_DIR="$OPTARG" ;;
|
|
||||||
J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;;
|
J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;;
|
||||||
j ) _CUSTOM_MINION_CONFIG=$OPTARG ;;
|
j ) _CUSTOM_MINION_CONFIG=$OPTARG ;;
|
||||||
q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;;
|
q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;;
|
||||||
@@ -501,10 +495,10 @@ done
|
|||||||
shift $((OPTIND-1))
|
shift $((OPTIND-1))
|
||||||
|
|
||||||
# Define our logging file and pipe paths
|
# Define our logging file and pipe paths
|
||||||
LOGFILE="${_TMP_DIR}/$( echo "$__ScriptName" | sed s/.sh/.log/g )"
|
LOGFILE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.log/g )"
|
||||||
LOGPIPE="${_TMP_DIR}/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )"
|
LOGPIPE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )"
|
||||||
# Ensure no residual pipe exists
|
# Ensure no residual pipe exists
|
||||||
rm -f "$LOGPIPE" 2>/dev/null
|
rm "$LOGPIPE" 2>/dev/null
|
||||||
|
|
||||||
# Create our logging pipe
|
# Create our logging pipe
|
||||||
# On FreeBSD we have to use mkfifo instead of mknod
|
# On FreeBSD we have to use mkfifo instead of mknod
|
||||||
@@ -540,7 +534,7 @@ exec 2>"$LOGPIPE"
|
|||||||
# 14 SIGALRM
|
# 14 SIGALRM
|
||||||
# 15 SIGTERM
|
# 15 SIGTERM
|
||||||
#----------------------------------------------------------------------------------------------------------------------
|
#----------------------------------------------------------------------------------------------------------------------
|
||||||
APT_ERR=$(mktemp ${_TMP_DIR}/apt_error.XXXXXX)
|
APT_ERR=$(mktemp /tmp/apt_error.XXXXXX)
|
||||||
__exit_cleanup() {
|
__exit_cleanup() {
|
||||||
EXIT_CODE=$?
|
EXIT_CODE=$?
|
||||||
|
|
||||||
@@ -933,11 +927,6 @@ if [ -d "${_VIRTUALENV_DIR}" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Make sure the designated temp directory exists
|
|
||||||
if [ ! -d "${_TMP_DIR}" ]; then
|
|
||||||
mkdir -p "${_TMP_DIR}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
#--- FUNCTION -------------------------------------------------------------------------------------------------------
|
#--- FUNCTION -------------------------------------------------------------------------------------------------------
|
||||||
# NAME: __fetch_url
|
# NAME: __fetch_url
|
||||||
# DESCRIPTION: Retrieves a URL and writes it to a given path
|
# DESCRIPTION: Retrieves a URL and writes it to a given path
|
||||||
@@ -1952,6 +1941,11 @@ __wait_for_apt(){
|
|||||||
# Timeout set at 15 minutes
|
# Timeout set at 15 minutes
|
||||||
WAIT_TIMEOUT=900
|
WAIT_TIMEOUT=900
|
||||||
|
|
||||||
|
## see if sync'ing the clocks helps
|
||||||
|
if [ -f /usr/sbin/hwclock ]; then
|
||||||
|
/usr/sbin/hwclock -s
|
||||||
|
fi
|
||||||
|
|
||||||
# Run our passed in apt command
|
# Run our passed in apt command
|
||||||
"${@}" 2>"$APT_ERR"
|
"${@}" 2>"$APT_ERR"
|
||||||
APT_RETURN=$?
|
APT_RETURN=$?
|
||||||
@@ -2002,14 +1996,14 @@ __apt_get_upgrade_noinput() {
|
|||||||
#----------------------------------------------------------------------------------------------------------------------
|
#----------------------------------------------------------------------------------------------------------------------
|
||||||
__temp_gpg_pub() {
|
__temp_gpg_pub() {
|
||||||
if __check_command_exists mktemp; then
|
if __check_command_exists mktemp; then
|
||||||
tempfile="$(mktemp ${_TMP_DIR}/salt-gpg-XXXXXXXX.pub 2>/dev/null)"
|
tempfile="$(mktemp /tmp/salt-gpg-XXXXXXXX.pub 2>/dev/null)"
|
||||||
|
|
||||||
if [ -z "$tempfile" ]; then
|
if [ -z "$tempfile" ]; then
|
||||||
echoerror "Failed to create temporary file in ${_TMP_DIR}"
|
echoerror "Failed to create temporary file in /tmp"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
tempfile="${_TMP_DIR}/salt-gpg-$$.pub"
|
tempfile="/tmp/salt-gpg-$$.pub"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo $tempfile
|
echo $tempfile
|
||||||
@@ -2049,7 +2043,7 @@ __rpm_import_gpg() {
|
|||||||
__fetch_url "$tempfile" "$url" || return 1
|
__fetch_url "$tempfile" "$url" || return 1
|
||||||
|
|
||||||
# At least on CentOS 8, a missing newline at the end causes:
|
# At least on CentOS 8, a missing newline at the end causes:
|
||||||
# error: ${_TMP_DIR}/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key.
|
# error: /tmp/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key.
|
||||||
# shellcheck disable=SC1003,SC2086
|
# shellcheck disable=SC1003,SC2086
|
||||||
sed -i -e '$a\' $tempfile
|
sed -i -e '$a\' $tempfile
|
||||||
|
|
||||||
@@ -2115,7 +2109,7 @@ __git_clone_and_checkout() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
__SALT_GIT_CHECKOUT_PARENT_DIR=$(dirname "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)
|
__SALT_GIT_CHECKOUT_PARENT_DIR=$(dirname "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)
|
||||||
__SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-${_TMP_DIR}/git}"
|
__SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-/tmp/git}"
|
||||||
__SALT_CHECKOUT_REPONAME="$(basename "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)"
|
__SALT_CHECKOUT_REPONAME="$(basename "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)"
|
||||||
__SALT_CHECKOUT_REPONAME="${__SALT_CHECKOUT_REPONAME:-salt}"
|
__SALT_CHECKOUT_REPONAME="${__SALT_CHECKOUT_REPONAME:-salt}"
|
||||||
[ -d "${__SALT_GIT_CHECKOUT_PARENT_DIR}" ] || mkdir "${__SALT_GIT_CHECKOUT_PARENT_DIR}"
|
[ -d "${__SALT_GIT_CHECKOUT_PARENT_DIR}" ] || mkdir "${__SALT_GIT_CHECKOUT_PARENT_DIR}"
|
||||||
@@ -2168,7 +2162,7 @@ __git_clone_and_checkout() {
|
|||||||
|
|
||||||
if [ "$__SHALLOW_CLONE" -eq $BS_TRUE ]; then
|
if [ "$__SHALLOW_CLONE" -eq $BS_TRUE ]; then
|
||||||
# Let's try 'treeless' cloning to speed up. Treeless cloning omits trees and blobs ('files')
|
# Let's try 'treeless' cloning to speed up. Treeless cloning omits trees and blobs ('files')
|
||||||
# but includes metadata (commit history, tags, branches etc.
|
# but includes metadata (commit history, tags, branches etc.
|
||||||
# Test for "--filter" option introduced in git 2.19, the minimal version of git where the treeless
|
# Test for "--filter" option introduced in git 2.19, the minimal version of git where the treeless
|
||||||
# cloning we need actually works
|
# cloning we need actually works
|
||||||
if [ "$(git clone 2>&1 | grep 'filter')" != "" ]; then
|
if [ "$(git clone 2>&1 | grep 'filter')" != "" ]; then
|
||||||
@@ -2396,14 +2390,14 @@ __overwriteconfig() {
|
|||||||
|
|
||||||
# Make a tempfile to dump any python errors into.
|
# Make a tempfile to dump any python errors into.
|
||||||
if __check_command_exists mktemp; then
|
if __check_command_exists mktemp; then
|
||||||
tempfile="$(mktemp ${_TMP_DIR}/salt-config-XXXXXXXX 2>/dev/null)"
|
tempfile="$(mktemp /tmp/salt-config-XXXXXXXX 2>/dev/null)"
|
||||||
|
|
||||||
if [ -z "$tempfile" ]; then
|
if [ -z "$tempfile" ]; then
|
||||||
echoerror "Failed to create temporary file in ${_TMP_DIR}"
|
echoerror "Failed to create temporary file in /tmp"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
tempfile="${_TMP_DIR}/salt-config-$$"
|
tempfile="/tmp/salt-config-$$"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "$_PY_EXE" ]; then
|
if [ -n "$_PY_EXE" ]; then
|
||||||
@@ -2766,8 +2760,8 @@ __install_salt_from_repo() {
|
|||||||
echoinfo "Installing salt using ${_py_exe}, $(${_py_exe} --version)"
|
echoinfo "Installing salt using ${_py_exe}, $(${_py_exe} --version)"
|
||||||
cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1
|
cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1
|
||||||
|
|
||||||
mkdir -p ${_TMP_DIR}/git/deps
|
mkdir -p /tmp/git/deps
|
||||||
echodebug "Created directory ${_TMP_DIR}/git/deps"
|
echodebug "Created directory /tmp/git/deps"
|
||||||
|
|
||||||
if [ ${DISTRO_NAME_L} = "ubuntu" ] && [ "$DISTRO_MAJOR_VERSION" -eq 22 ]; then
|
if [ ${DISTRO_NAME_L} = "ubuntu" ] && [ "$DISTRO_MAJOR_VERSION" -eq 22 ]; then
|
||||||
echodebug "Ubuntu 22.04 has problem with base.txt requirements file, not parsing sys_platform == 'win32', upgrading from default pip works"
|
echodebug "Ubuntu 22.04 has problem with base.txt requirements file, not parsing sys_platform == 'win32', upgrading from default pip works"
|
||||||
@@ -2780,7 +2774,7 @@ __install_salt_from_repo() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rm -f ${_TMP_DIR}/git/deps/*
|
rm -f /tmp/git/deps/*
|
||||||
|
|
||||||
echodebug "Installing Salt requirements from PyPi, ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --ignore-installed ${_PIP_INSTALL_ARGS} -r requirements/static/ci/py${_py_version}/linux.txt"
|
echodebug "Installing Salt requirements from PyPi, ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --ignore-installed ${_PIP_INSTALL_ARGS} -r requirements/static/ci/py${_py_version}/linux.txt"
|
||||||
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --ignore-installed ${_PIP_INSTALL_ARGS} -r "requirements/static/ci/py${_py_version}/linux.txt"
|
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --ignore-installed ${_PIP_INSTALL_ARGS} -r "requirements/static/ci/py${_py_version}/linux.txt"
|
||||||
@@ -2805,7 +2799,7 @@ __install_salt_from_repo() {
|
|||||||
|
|
||||||
echodebug "Running '${_py_exe} setup.py --salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} bdist_wheel'"
|
echodebug "Running '${_py_exe} setup.py --salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} bdist_wheel'"
|
||||||
${_py_exe} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" bdist_wheel || return 1
|
${_py_exe} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" bdist_wheel || return 1
|
||||||
mv dist/salt*.whl ${_TMP_DIR}/git/deps/ || return 1
|
mv dist/salt*.whl /tmp/git/deps/ || return 1
|
||||||
|
|
||||||
cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" || return 1
|
cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" || return 1
|
||||||
|
|
||||||
@@ -2819,14 +2813,14 @@ __install_salt_from_repo() {
|
|||||||
${_pip_cmd} install --force-reinstall --break-system-packages "${_arch_dep}"
|
${_pip_cmd} install --force-reinstall --break-system-packages "${_arch_dep}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echodebug "Running '${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl'"
|
echodebug "Running '${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} /tmp/git/deps/salt*.whl'"
|
||||||
|
|
||||||
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl"
|
echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} /tmp/git/deps/salt*.whl"
|
||||||
|
|
||||||
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \
|
${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \
|
||||||
${_PIP_INSTALL_ARGS} \
|
${_PIP_INSTALL_ARGS} \
|
||||||
--global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \
|
--global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \
|
||||||
${_TMP_DIR}/git/deps/salt*.whl || return 1
|
/tmp/git/deps/salt*.whl || return 1
|
||||||
|
|
||||||
echoinfo "Checking if Salt can be imported using ${_py_exe}"
|
echoinfo "Checking if Salt can be imported using ${_py_exe}"
|
||||||
CHECK_SALT_SCRIPT=$(cat << EOM
|
CHECK_SALT_SCRIPT=$(cat << EOM
|
||||||
@@ -6301,8 +6295,8 @@ __get_packagesite_onedir_latest() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
__install_saltstack_vmware_photon_os_onedir_repository() {
|
__install_saltstack_photon_onedir_repository() {
|
||||||
echodebug "__install_saltstack_vmware_photon_os_onedir_repository() entry"
|
echodebug "__install_saltstack_photon_onedir_repository() entry"
|
||||||
|
|
||||||
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then
|
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then
|
||||||
echoerror "Python version is no longer supported, only Python 3"
|
echoerror "Python version is no longer supported, only Python 3"
|
||||||
@@ -6382,8 +6376,8 @@ __install_saltstack_vmware_photon_os_onedir_repository() {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
install_vmware_photon_os_deps() {
|
install_photon_deps() {
|
||||||
echodebug "install_vmware_photon_os_deps() entry"
|
echodebug "install_photon_deps() entry"
|
||||||
|
|
||||||
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then
|
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then
|
||||||
echoerror "Python version is no longer supported, only Python 3"
|
echoerror "Python version is no longer supported, only Python 3"
|
||||||
@@ -6412,8 +6406,8 @@ install_vmware_photon_os_deps() {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
install_vmware_photon_os_stable_post() {
|
install_photon_stable_post() {
|
||||||
echodebug "install_vmware_photon_os_stable_post() entry"
|
echodebug "install_photon_stable_post() entry"
|
||||||
|
|
||||||
for fname in api master minion syndic; do
|
for fname in api master minion syndic; do
|
||||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||||
@@ -6430,8 +6424,8 @@ install_vmware_photon_os_stable_post() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
install_vmware_photon_os_git_deps() {
|
install_photon_git_deps() {
|
||||||
echodebug "install_vmware_photon_os_git_deps() entry"
|
echodebug "install_photon_git_deps() entry"
|
||||||
|
|
||||||
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then
|
if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then
|
||||||
echoerror "Python version is no longer supported, only Python 3"
|
echoerror "Python version is no longer supported, only Python 3"
|
||||||
@@ -6469,7 +6463,7 @@ install_vmware_photon_os_git_deps() {
|
|||||||
|
|
||||||
__PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc glibc-devel linux-devel.x86_64 cython${PY_PKG_VER}"
|
__PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc glibc-devel linux-devel.x86_64 cython${PY_PKG_VER}"
|
||||||
|
|
||||||
echodebug "install_vmware_photon_os_git_deps() distro major version, ${DISTRO_MAJOR_VERSION}"
|
echodebug "install_photon_git_deps() distro major version, ${DISTRO_MAJOR_VERSION}"
|
||||||
|
|
||||||
## Photon 5 container is missing systemd on default installation
|
## Photon 5 container is missing systemd on default installation
|
||||||
if [ "${DISTRO_MAJOR_VERSION}" -lt 5 ]; then
|
if [ "${DISTRO_MAJOR_VERSION}" -lt 5 ]; then
|
||||||
@@ -6495,8 +6489,8 @@ install_vmware_photon_os_git_deps() {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
install_vmware_photon_os_git() {
|
install_photon_git() {
|
||||||
echodebug "install_vmware_photon_os_git() entry"
|
echodebug "install_photon_git() entry"
|
||||||
|
|
||||||
if [ "${_PY_EXE}" != "" ]; then
|
if [ "${_PY_EXE}" != "" ]; then
|
||||||
_PYEXE=${_PY_EXE}
|
_PYEXE=${_PY_EXE}
|
||||||
@@ -6506,7 +6500,7 @@ install_vmware_photon_os_git() {
|
|||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
install_vmware_photon_os_git_deps
|
install_photon_git_deps
|
||||||
|
|
||||||
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then
|
if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then
|
||||||
${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1
|
${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1
|
||||||
@@ -6516,8 +6510,8 @@ install_vmware_photon_os_git() {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
install_vmware_photon_os_git_post() {
|
install_photon_git_post() {
|
||||||
echodebug "install_vmware_photon_os_git_post() entry"
|
echodebug "install_photon_git_post() entry"
|
||||||
|
|
||||||
for fname in api master minion syndic; do
|
for fname in api master minion syndic; do
|
||||||
# Skip if not meant to be installed
|
# Skip if not meant to be installed
|
||||||
@@ -6549,9 +6543,9 @@ install_vmware_photon_os_git_post() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
install_vmware_photon_os_restart_daemons() {
|
install_photon_restart_daemons() {
|
||||||
[ "$_START_DAEMONS" -eq $BS_FALSE ] && return
|
[ "$_START_DAEMONS" -eq $BS_FALSE ] && return
|
||||||
echodebug "install_vmware_photon_os_restart_daemons() entry"
|
echodebug "install_photon_restart_daemons() entry"
|
||||||
|
|
||||||
|
|
||||||
for fname in api master minion syndic; do
|
for fname in api master minion syndic; do
|
||||||
@@ -6573,8 +6567,8 @@ install_vmware_photon_os_restart_daemons() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
install_vmware_photon_os_check_services() {
|
install_photon_check_services() {
|
||||||
echodebug "install_vmware_photon_os_check_services() entry"
|
echodebug "install_photon_check_services() entry"
|
||||||
|
|
||||||
for fname in api master minion syndic; do
|
for fname in api master minion syndic; do
|
||||||
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
# Skip salt-api since the service should be opt-in and not necessarily started on boot
|
||||||
@@ -6591,8 +6585,8 @@ install_vmware_photon_os_check_services() {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
install_vmware_photon_os_onedir_deps() {
|
install_photon_onedir_deps() {
|
||||||
echodebug "install_vmware_photon_os_onedir_deps() entry"
|
echodebug "install_photon_onedir_deps() entry"
|
||||||
|
|
||||||
|
|
||||||
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
|
if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then
|
||||||
@@ -6606,17 +6600,17 @@ install_vmware_photon_os_onedir_deps() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then
|
if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then
|
||||||
__install_saltstack_vmware_photon_os_onedir_repository || return 1
|
__install_saltstack_photon_onedir_repository || return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If -R was passed, we need to configure custom repo url with rsync-ed packages
|
# If -R was passed, we need to configure custom repo url with rsync-ed packages
|
||||||
# Which was handled in __install_saltstack_rhel_repository buu that hanlded old-stable which is for
|
# Which was handled in __install_saltstack_rhel_repository buu that hanlded old-stable which is for
|
||||||
# releases which are End-Of-Life. This call has its own check in case -r was passed without -R.
|
# releases which are End-Of-Life. This call has its own check in case -r was passed without -R.
|
||||||
if [ "$_CUSTOM_REPO_URL" != "null" ]; then
|
if [ "$_CUSTOM_REPO_URL" != "null" ]; then
|
||||||
__install_saltstack_vmware_photon_os_onedir_repository || return 1
|
__install_saltstack_photon_onedir_repository || return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
__PACKAGES="procps-ng sudo shadow wget"
|
__PACKAGES="procps-ng sudo shadow"
|
||||||
|
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
__tdnf_install_noinput ${__PACKAGES} || return 1
|
__tdnf_install_noinput ${__PACKAGES} || return 1
|
||||||
@@ -6632,9 +6626,9 @@ install_vmware_photon_os_onedir_deps() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
install_vmware_photon_os_onedir() {
|
install_photon_onedir() {
|
||||||
|
|
||||||
echodebug "install_vmware_photon_os_onedir() entry"
|
echodebug "install_photon_onedir() entry"
|
||||||
|
|
||||||
STABLE_REV=$ONEDIR_REV
|
STABLE_REV=$ONEDIR_REV
|
||||||
_GENERIC_PKG_VERSION=""
|
_GENERIC_PKG_VERSION=""
|
||||||
@@ -6678,9 +6672,9 @@ install_vmware_photon_os_onedir() {
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
install_vmware_photon_os_onedir_post() {
|
install_photon_onedir_post() {
|
||||||
STABLE_REV=$ONEDIR_REV
|
STABLE_REV=$ONEDIR_REV
|
||||||
install_vmware_photon_os_stable_post || return 1
|
install_photon_stable_post || return 1
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@@ -7803,7 +7797,7 @@ install_macosx_git_deps() {
|
|||||||
export PATH=/usr/local/bin:$PATH
|
export PATH=/usr/local/bin:$PATH
|
||||||
fi
|
fi
|
||||||
|
|
||||||
__fetch_url "${_TMP_DIR}/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1
|
__fetch_url "/tmp/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1
|
||||||
|
|
||||||
if [ -n "$_PY_EXE" ]; then
|
if [ -n "$_PY_EXE" ]; then
|
||||||
_PYEXE="${_PY_EXE}"
|
_PYEXE="${_PY_EXE}"
|
||||||
@@ -7813,7 +7807,7 @@ install_macosx_git_deps() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Install PIP
|
# Install PIP
|
||||||
$_PYEXE ${_TMP_DIR}/get-pip.py || return 1
|
$_PYEXE /tmp/get-pip.py || return 1
|
||||||
|
|
||||||
# shellcheck disable=SC2119
|
# shellcheck disable=SC2119
|
||||||
__git_clone_and_checkout || return 1
|
__git_clone_and_checkout || return 1
|
||||||
@@ -7825,9 +7819,9 @@ install_macosx_stable() {
|
|||||||
|
|
||||||
install_macosx_stable_deps || return 1
|
install_macosx_stable_deps || return 1
|
||||||
|
|
||||||
__fetch_url "${_TMP_DIR}/${PKG}" "${SALTPKGCONFURL}" || return 1
|
__fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1
|
||||||
|
|
||||||
/usr/sbin/installer -pkg "${_TMP_DIR}/${PKG}" -target / || return 1
|
/usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@@ -7836,9 +7830,9 @@ install_macosx_onedir() {
|
|||||||
|
|
||||||
install_macosx_onedir_deps || return 1
|
install_macosx_onedir_deps || return 1
|
||||||
|
|
||||||
__fetch_url "${_TMP_DIR}/${PKG}" "${SALTPKGCONFURL}" || return 1
|
__fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1
|
||||||
|
|
||||||
/usr/sbin/installer -pkg "${_TMP_DIR}/${PKG}" -target / || return 1
|
/usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ sensoroni:
|
|||||||
sensoronikey:
|
sensoronikey:
|
||||||
soc_host:
|
soc_host:
|
||||||
suripcap:
|
suripcap:
|
||||||
pcapMaxCount: 100000
|
pcapMaxCount: 999999
|
||||||
analyzers:
|
analyzers:
|
||||||
echotrail:
|
echotrail:
|
||||||
base_url: https://api.echotrail.io/insights/
|
base_url: https://api.echotrail.io/insights/
|
||||||
|
|||||||
@@ -1364,8 +1364,6 @@ soc:
|
|||||||
cases: soc
|
cases: soc
|
||||||
filedatastore:
|
filedatastore:
|
||||||
jobDir: jobs
|
jobDir: jobs
|
||||||
retryFailureIntervalMs: 600000
|
|
||||||
retryFailureMaxAttempts: 5
|
|
||||||
kratos:
|
kratos:
|
||||||
hostUrl:
|
hostUrl:
|
||||||
hydra:
|
hydra:
|
||||||
@@ -1563,12 +1561,72 @@ soc:
|
|||||||
disableRegex: []
|
disableRegex: []
|
||||||
enableRegex: []
|
enableRegex: []
|
||||||
failAfterConsecutiveErrorCount: 10
|
failAfterConsecutiveErrorCount: 10
|
||||||
communityRulesFile: /nsm/rules/suricata/emerging-all.rules
|
|
||||||
rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint
|
rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint
|
||||||
stateFilePath: /opt/sensoroni/fingerprints/suricataengine.state
|
stateFilePath: /opt/sensoroni/fingerprints/suricataengine.state
|
||||||
integrityCheckFrequencySeconds: 1200
|
integrityCheckFrequencySeconds: 1200
|
||||||
ignoredSidRanges:
|
ignoredSidRanges:
|
||||||
- '1100000-1101000'
|
- '1100000-1101000'
|
||||||
|
rulesetSources:
|
||||||
|
default:
|
||||||
|
- name: Emerging-Threats
|
||||||
|
description: "Emerging Threats ruleset - To enable ET Pro, enter your license key below. Leave empty for ET Open (free) rules."
|
||||||
|
licenseKey: ""
|
||||||
|
enabled: true
|
||||||
|
sourceType: url
|
||||||
|
sourcePath: 'https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz'
|
||||||
|
urlHash: "https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz.md5"
|
||||||
|
license: "BSD"
|
||||||
|
excludeFiles:
|
||||||
|
- "*deleted*"
|
||||||
|
- "*retired*"
|
||||||
|
proxyURL: ""
|
||||||
|
proxyUsername: ""
|
||||||
|
proxyPassword: ""
|
||||||
|
proxyCACert: ""
|
||||||
|
insecureSkipVerify: false
|
||||||
|
readOnly: true
|
||||||
|
deleteUnreferenced: true
|
||||||
|
- name: local-rules
|
||||||
|
id: local-rules
|
||||||
|
description: "Local custom rules from files (*.rules) in a directory on the filesystem"
|
||||||
|
license: "custom"
|
||||||
|
sourceType: directory
|
||||||
|
sourcePath: /nsm/rules/local/
|
||||||
|
readOnly: false
|
||||||
|
deleteUnreferenced: false
|
||||||
|
enabled: false
|
||||||
|
excludeFiles:
|
||||||
|
- "*backup*"
|
||||||
|
airgap:
|
||||||
|
- name: Emerging-Threats
|
||||||
|
description: "Emerging Threats ruleset - To enable ET Pro, enter your license key below. Leave empty for ET Open (free) rules."
|
||||||
|
licenseKey: ""
|
||||||
|
enabled: true
|
||||||
|
sourceType: url
|
||||||
|
sourcePath: 'https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz'
|
||||||
|
urlHash: "https://rules.emergingthreats.net/open/suricata/emerging.rules.tar.gz.md5"
|
||||||
|
license: "BSD"
|
||||||
|
excludeFiles:
|
||||||
|
- "*deleted*"
|
||||||
|
- "*retired*"
|
||||||
|
proxyURL: ""
|
||||||
|
proxyUsername: ""
|
||||||
|
proxyPassword: ""
|
||||||
|
proxyCACert: ""
|
||||||
|
insecureSkipVerify: false
|
||||||
|
readOnly: true
|
||||||
|
deleteUnreferenced: true
|
||||||
|
- name: local-rules
|
||||||
|
id: local-rules
|
||||||
|
description: "Local custom rules from files (*.rules) in a directory on the filesystem"
|
||||||
|
license: "custom"
|
||||||
|
sourceType: directory
|
||||||
|
sourcePath: /nsm/rules/local/
|
||||||
|
readOnly: false
|
||||||
|
deleteUnreferenced: false
|
||||||
|
enabled: false
|
||||||
|
excludeFiles:
|
||||||
|
- "*backup*"
|
||||||
navigator:
|
navigator:
|
||||||
intervalMinutes: 30
|
intervalMinutes: 30
|
||||||
outputPath: /opt/sensoroni/navigator
|
outputPath: /opt/sensoroni/navigator
|
||||||
@@ -1746,7 +1804,7 @@ soc:
|
|||||||
showSubtitle: true
|
showSubtitle: true
|
||||||
- name: DPD
|
- name: DPD
|
||||||
description: Dynamic Protocol Detection errors
|
description: Dynamic Protocol Detection errors
|
||||||
query: '(tags:dpd OR tags:analyzer) | groupby error.reason'
|
query: 'tags:dpd | groupby error.reason'
|
||||||
showSubtitle: true
|
showSubtitle: true
|
||||||
- name: Files
|
- name: Files
|
||||||
description: Files grouped by mimetype
|
description: Files grouped by mimetype
|
||||||
@@ -2012,7 +2070,7 @@ soc:
|
|||||||
query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.query.type_name | groupby dns.response.code_name | groupby dns.answers.name | groupby destination.as.organization.name'
|
query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.query.type_name | groupby dns.response.code_name | groupby dns.answers.name | groupby destination.as.organization.name'
|
||||||
- name: DPD
|
- name: DPD
|
||||||
description: DPD (Dynamic Protocol Detection) errors
|
description: DPD (Dynamic Protocol Detection) errors
|
||||||
query: '(tags:dpd OR tags:analyzer) | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination.as.organization.name'
|
query: 'tags:dpd | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination.as.organization.name'
|
||||||
- name: Files
|
- name: Files
|
||||||
description: Files seen in network traffic
|
description: Files seen in network traffic
|
||||||
query: 'tags:file | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip | groupby destination.as.organization.name'
|
query: 'tags:file | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip | groupby destination.as.organization.name'
|
||||||
@@ -2554,7 +2612,6 @@ soc:
|
|||||||
assistant:
|
assistant:
|
||||||
enabled: false
|
enabled: false
|
||||||
investigationPrompt: Investigate Alert ID {socId}
|
investigationPrompt: Investigate Alert ID {socId}
|
||||||
compressContextPrompt: Summarize the conversation for context compaction
|
|
||||||
thresholdColorRatioLow: 0.5
|
thresholdColorRatioLow: 0.5
|
||||||
thresholdColorRatioMed: 0.75
|
thresholdColorRatioMed: 0.75
|
||||||
thresholdColorRatioMax: 1
|
thresholdColorRatioMax: 1
|
||||||
@@ -2564,22 +2621,13 @@ soc:
|
|||||||
contextLimitSmall: 200000
|
contextLimitSmall: 200000
|
||||||
contextLimitLarge: 1000000
|
contextLimitLarge: 1000000
|
||||||
lowBalanceColorAlert: 500000
|
lowBalanceColorAlert: 500000
|
||||||
enabled: true
|
|
||||||
- id: sonnet-4.5
|
- id: sonnet-4.5
|
||||||
displayName: Claude Sonnet 4.5
|
displayName: Claude Sonnet 4.5
|
||||||
contextLimitSmall: 200000
|
contextLimitSmall: 200000
|
||||||
contextLimitLarge: 1000000
|
contextLimitLarge: 1000000
|
||||||
lowBalanceColorAlert: 500000
|
lowBalanceColorAlert: 500000
|
||||||
enabled: true
|
|
||||||
- id: gptoss-120b
|
- id: gptoss-120b
|
||||||
displayName: GPT-OSS 120B
|
displayName: GPT-OSS 120B
|
||||||
contextLimitSmall: 128000
|
contextLimitSmall: 128000
|
||||||
contextLimitLarge: 128000
|
contextLimitLarge: 128000
|
||||||
lowBalanceColorAlert: 500000
|
lowBalanceColorAlert: 500000
|
||||||
enabled: true
|
|
||||||
- id: qwen-235b
|
|
||||||
displayName: QWEN 235B
|
|
||||||
contextLimitSmall: 256000
|
|
||||||
contextLimitLarge: 256000
|
|
||||||
lowBalanceColorAlert: 500000
|
|
||||||
enabled: true
|
|
||||||
@@ -27,7 +27,7 @@ so-soc:
|
|||||||
- /opt/so/conf/strelka:/opt/sensoroni/yara:rw
|
- /opt/so/conf/strelka:/opt/sensoroni/yara:rw
|
||||||
- /opt/so/conf/sigma:/opt/sensoroni/sigma:rw
|
- /opt/so/conf/sigma:/opt/sensoroni/sigma:rw
|
||||||
- /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw
|
- /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw
|
||||||
- /opt/so/rules/nids/suri:/opt/sensoroni/nids:ro
|
- /opt/so/rules/nids/suri:/opt/sensoroni/nids:rw
|
||||||
- /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw
|
- /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw
|
||||||
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw
|
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw
|
||||||
- /nsm/soc/uploads:/nsm/soc/uploads:rw
|
- /nsm/soc/uploads:/nsm/soc/uploads:rw
|
||||||
|
|||||||
@@ -50,17 +50,74 @@
|
|||||||
{% do SOCMERGED.config.server.modules.elastalertengine.update({'enabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.enabledSigmaRules.default}) %}
|
{% do SOCMERGED.config.server.modules.elastalertengine.update({'enabledSigmaRules': SOCMERGED.config.server.modules.elastalertengine.enabledSigmaRules.default}) %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{# set elastalertengine.rulesRepos and strelkaengine.rulesRepos based on airgap or not #}
|
{# set elastalertengine.rulesRepos, strelkaengine.rulesRepos, and suricataengine.rulesetSources based on airgap or not #}
|
||||||
{% if GLOBALS.airgap %}
|
{% if GLOBALS.airgap %}
|
||||||
{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.airgap}) %}
|
{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.airgap}) %}
|
||||||
{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.airgap}) %}
|
{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.airgap}) %}
|
||||||
|
{#% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is mapping %#}
|
||||||
|
{% do SOCMERGED.config.server.modules.suricataengine.update({'rulesetSources': SOCMERGED.config.server.modules.suricataengine.rulesetSources.airgap}) %}
|
||||||
|
{#% endif %#}
|
||||||
{% do SOCMERGED.config.server.update({'airgapEnabled': true}) %}
|
{% do SOCMERGED.config.server.update({'airgapEnabled': true}) %}
|
||||||
{% else %}
|
{% else %}
|
||||||
{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.default}) %}
|
{% do SOCMERGED.config.server.modules.elastalertengine.update({'rulesRepos': SOCMERGED.config.server.modules.elastalertengine.rulesRepos.default}) %}
|
||||||
{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.default}) %}
|
{% do SOCMERGED.config.server.modules.strelkaengine.update({'rulesRepos': SOCMERGED.config.server.modules.strelkaengine.rulesRepos.default}) %}
|
||||||
|
{#% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is mapping %#}
|
||||||
|
{% do SOCMERGED.config.server.modules.suricataengine.update({'rulesetSources': SOCMERGED.config.server.modules.suricataengine.rulesetSources.default}) %}
|
||||||
|
{#% endif %#}
|
||||||
{% do SOCMERGED.config.server.update({'airgapEnabled': false}) %}
|
{% do SOCMERGED.config.server.update({'airgapEnabled': false}) %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
{# Define the Detections custom ruleset that should always be present #}
|
||||||
|
{% set CUSTOM_RULESET = {
|
||||||
|
'name': 'custom',
|
||||||
|
'description': 'User-created custom rules created via the Detections module in the SOC UI',
|
||||||
|
'sourceType': 'elasticsearch',
|
||||||
|
'sourcePath': 'so_detection.ruleset:__custom__',
|
||||||
|
'readOnly': false,
|
||||||
|
'deleteUnreferenced': false,
|
||||||
|
'license': 'Custom',
|
||||||
|
'enabled': true
|
||||||
|
} %}
|
||||||
|
|
||||||
|
{# Always append the custom ruleset to suricataengine.rulesetSources if not already present #}
|
||||||
|
{% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %}
|
||||||
|
{% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %}
|
||||||
|
{% set custom_names = SOCMERGED.config.server.modules.suricataengine.rulesetSources | selectattr('name', 'equalto', 'custom') | list %}
|
||||||
|
{% if custom_names | length == 0 %}
|
||||||
|
{% do SOCMERGED.config.server.modules.suricataengine.rulesetSources.append(CUSTOM_RULESET) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# Transform Emerging-Threats ruleset based on license key #}
|
||||||
|
{% if SOCMERGED.config.server.modules.suricataengine is defined and SOCMERGED.config.server.modules.suricataengine.rulesetSources is defined %}
|
||||||
|
{% if SOCMERGED.config.server.modules.suricataengine.rulesetSources is not mapping %}
|
||||||
|
{% for ruleset in SOCMERGED.config.server.modules.suricataengine.rulesetSources %}
|
||||||
|
{% if ruleset.name == 'Emerging-Threats' %}
|
||||||
|
{% if ruleset.licenseKey and ruleset.licenseKey != '' %}
|
||||||
|
{# License key is defined - transform to ETPRO #}
|
||||||
|
{% do ruleset.update({
|
||||||
|
'name': 'ETPRO',
|
||||||
|
'sourcePath': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz',
|
||||||
|
'urlHash': 'https://rules.emergingthreatspro.com/' ~ ruleset.licenseKey ~ '/suricata-7.0.3/etpro.rules.tar.gz.md5',
|
||||||
|
'license': 'Commercial'
|
||||||
|
}) %}
|
||||||
|
{% else %}
|
||||||
|
{# No license key - explicitly set to ETOPEN #}
|
||||||
|
{% do ruleset.update({
|
||||||
|
'name': 'ETOPEN',
|
||||||
|
'sourcePath': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz',
|
||||||
|
'urlHash': 'https://rules.emergingthreats.net/open/suricata-7.0.3/emerging.rules.tar.gz.md5',
|
||||||
|
'license': 'BSD'
|
||||||
|
}) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
{# set playbookRepos based on airgap or not #}
|
{# set playbookRepos based on airgap or not #}
|
||||||
{% if GLOBALS.airgap %}
|
{% if GLOBALS.airgap %}
|
||||||
{% do SOCMERGED.config.server.modules.playbook.update({'playbookRepos': SOCMERGED.config.server.modules.playbook.playbookRepos.airgap}) %}
|
{% do SOCMERGED.config.server.modules.playbook.update({'playbookRepos': SOCMERGED.config.server.modules.playbook.playbookRepos.airgap}) %}
|
||||||
|
|||||||
@@ -424,17 +424,6 @@ soc:
|
|||||||
description: The maximum number of documents to request in a single Elasticsearch scroll request.
|
description: The maximum number of documents to request in a single Elasticsearch scroll request.
|
||||||
bulkIndexWorkerCount:
|
bulkIndexWorkerCount:
|
||||||
description: The number of worker threads to use when bulk indexing data into Elasticsearch. A value below 1 will default to the number of CPUs available.
|
description: The number of worker threads to use when bulk indexing data into Elasticsearch. A value below 1 will default to the number of CPUs available.
|
||||||
filedatastore:
|
|
||||||
jobDir:
|
|
||||||
description: The location where local job files are stored on the manager.
|
|
||||||
global: True
|
|
||||||
advanced: True
|
|
||||||
retryFailureIntervalMs:
|
|
||||||
description: The interval, in milliseconds, to wait before attempting to reprocess a failed job.
|
|
||||||
global: True
|
|
||||||
retryFailureMaxAttempts:
|
|
||||||
description: The max number of attempts to process a job, in the event the job fails to complete.
|
|
||||||
global: True
|
|
||||||
sostatus:
|
sostatus:
|
||||||
refreshIntervalMs:
|
refreshIntervalMs:
|
||||||
description: Duration (in milliseconds) between refreshes of the grid status. Shortening this duration may not have expected results, as the backend systems feeding this sostatus data will continue their updates as scheduled.
|
description: Duration (in milliseconds) between refreshes of the grid status. Shortening this duration may not have expected results, as the backend systems feeding this sostatus data will continue their updates as scheduled.
|
||||||
@@ -563,6 +552,52 @@ soc:
|
|||||||
advanced: True
|
advanced: True
|
||||||
forcedType: "[]string"
|
forcedType: "[]string"
|
||||||
helpLink: detections.html#rule-engine-status
|
helpLink: detections.html#rule-engine-status
|
||||||
|
rulesetSources:
|
||||||
|
default: &serulesetSources
|
||||||
|
description: "Ruleset sources for Suricata rules. Supports URL downloads and local directories. Refer to the linked documentation for details on how to configure this setting."
|
||||||
|
global: True
|
||||||
|
advanced: False
|
||||||
|
forcedType: "[]{}"
|
||||||
|
helpLink: suricata.html
|
||||||
|
syntax: json
|
||||||
|
uiElements:
|
||||||
|
- field: name
|
||||||
|
label: Ruleset Name (This will be the name of the ruleset in the UI)
|
||||||
|
required: True
|
||||||
|
readonly: True
|
||||||
|
- field: description
|
||||||
|
label: Description
|
||||||
|
- field: enabled
|
||||||
|
label: Enabled (If false, existing rules & overrides will be removed)
|
||||||
|
forcedType: bool
|
||||||
|
required: True
|
||||||
|
- field: licenseKey
|
||||||
|
label: License Key
|
||||||
|
required: False
|
||||||
|
- field: sourceType
|
||||||
|
label: Source Type
|
||||||
|
required: True
|
||||||
|
options:
|
||||||
|
- url
|
||||||
|
- directory
|
||||||
|
- field: sourcePath
|
||||||
|
label: Source Path (full url or directory path)
|
||||||
|
required: True
|
||||||
|
- field: excludeFiles
|
||||||
|
label: Exclude Files (list of file names to exclude, separated by commas)
|
||||||
|
required: False
|
||||||
|
- field: license
|
||||||
|
label: Ruleset License
|
||||||
|
required: True
|
||||||
|
- field: readOnly
|
||||||
|
label: Read Only
|
||||||
|
forcedType: bool
|
||||||
|
required: False
|
||||||
|
- field: deleteUnreferenced
|
||||||
|
label: Delete Unreferenced
|
||||||
|
forcedType: bool
|
||||||
|
required: False
|
||||||
|
airgap: *serulesetSources
|
||||||
navigator:
|
navigator:
|
||||||
intervalMinutes:
|
intervalMinutes:
|
||||||
description: How often to generate the Navigator Layers. (minutes)
|
description: How often to generate the Navigator Layers. (minutes)
|
||||||
@@ -617,9 +652,6 @@ soc:
|
|||||||
investigationPrompt:
|
investigationPrompt:
|
||||||
description: Prompt given to Onion AI when beginning an investigation.
|
description: Prompt given to Onion AI when beginning an investigation.
|
||||||
global: True
|
global: True
|
||||||
compressContextPrompt:
|
|
||||||
description: Prompt given to Onion AI when summarizing a conversation in order to compress context.
|
|
||||||
global: True
|
|
||||||
thresholdColorRatioLow:
|
thresholdColorRatioLow:
|
||||||
description: Lower visual context color change threshold.
|
description: Lower visual context color change threshold.
|
||||||
global: True
|
global: True
|
||||||
@@ -662,9 +694,6 @@ soc:
|
|||||||
label: Low Balance Color Alert
|
label: Low Balance Color Alert
|
||||||
forcedType: int
|
forcedType: int
|
||||||
required: True
|
required: True
|
||||||
- field: enabled
|
|
||||||
label: Enabled
|
|
||||||
forcedType: bool
|
|
||||||
apiTimeoutMs:
|
apiTimeoutMs:
|
||||||
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
|
description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI.
|
||||||
global: True
|
global: True
|
||||||
|
|||||||
@@ -7,47 +7,9 @@
|
|||||||
{% if sls.split('.')[0] in allowed_states %}
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
{% from 'bpf/suricata.map.jinja' import SURICATABPF %}
|
||||||
{% from 'suricata/map.jinja' import SURICATAMERGED %}
|
{% from 'suricata/map.jinja' import SURICATAMERGED %}
|
||||||
{% from 'bpf/suricata.map.jinja' import SURICATABPF, SURICATA_BPF_STATUS, SURICATA_BPF_CALC %}
|
{% set BPF_STATUS = 0 %}
|
||||||
|
|
||||||
suridir:
|
|
||||||
file.directory:
|
|
||||||
- name: /opt/so/conf/suricata
|
|
||||||
- user: 940
|
|
||||||
- group: 940
|
|
||||||
|
|
||||||
{% if GLOBALS.pcap_engine in ["SURICATA", "TRANSITION"] %}
|
|
||||||
{% from 'bpf/pcap.map.jinja' import PCAPBPF, PCAP_BPF_STATUS, PCAP_BPF_CALC %}
|
|
||||||
# BPF compilation and configuration
|
|
||||||
{% if PCAPBPF and not PCAP_BPF_STATUS %}
|
|
||||||
suriPCAPbpfcompilationfailure:
|
|
||||||
test.configurable_test_state:
|
|
||||||
- changes: False
|
|
||||||
- result: False
|
|
||||||
- comment: "BPF Syntax Error - Discarding Specified BPF. Error: {{ PCAP_BPF_CALC['stderr'] }}"
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# BPF applied to all of Suricata - alerts/metadata/pcap
|
|
||||||
suribpf:
|
|
||||||
file.managed:
|
|
||||||
- name: /opt/so/conf/suricata/bpf
|
|
||||||
- user: 940
|
|
||||||
- group: 940
|
|
||||||
{% if SURICATA_BPF_STATUS %}
|
|
||||||
- contents: {{ SURICATABPF }}
|
|
||||||
{% else %}
|
|
||||||
- contents:
|
|
||||||
- ""
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if SURICATABPF and not SURICATA_BPF_STATUS %}
|
|
||||||
suribpfcompilationfailure:
|
|
||||||
test.configurable_test_state:
|
|
||||||
- changes: False
|
|
||||||
- result: False
|
|
||||||
- comment: "BPF Syntax Error - Discarding Specified BPF. Error: {{ SURICATA_BPF_CALC['stderr'] }}"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Add Suricata Group
|
# Add Suricata Group
|
||||||
suricatagroup:
|
suricatagroup:
|
||||||
@@ -87,11 +49,18 @@ suricata_sbin_jinja:
|
|||||||
- file_mode: 755
|
- file_mode: 755
|
||||||
- template: jinja
|
- template: jinja
|
||||||
|
|
||||||
|
suridir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/suricata
|
||||||
|
- user: 940
|
||||||
|
- group: 940
|
||||||
|
|
||||||
suriruledir:
|
suriruledir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /opt/so/conf/suricata/rules
|
- name: /opt/so/conf/suricata/rules
|
||||||
- user: 940
|
- user: 940
|
||||||
- group: 940
|
- group: 940
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
surilogdir:
|
surilogdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
@@ -167,6 +136,32 @@ suriclassifications:
|
|||||||
- user: 940
|
- user: 940
|
||||||
- group: 940
|
- group: 940
|
||||||
|
|
||||||
|
# BPF compilation and configuration
|
||||||
|
{% if SURICATABPF %}
|
||||||
|
{% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %}
|
||||||
|
{% if BPF_CALC['stderr'] == "" %}
|
||||||
|
{% set BPF_STATUS = 1 %}
|
||||||
|
{% else %}
|
||||||
|
suribpfcompilationfailure:
|
||||||
|
test.configurable_test_state:
|
||||||
|
- changes: False
|
||||||
|
- result: False
|
||||||
|
- comment: "BPF Syntax Error - Discarding Specified BPF"
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
suribpf:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/suricata/bpf
|
||||||
|
- user: 940
|
||||||
|
- group: 940
|
||||||
|
{% if BPF_STATUS %}
|
||||||
|
- contents: {{ SURICATABPF }}
|
||||||
|
{% else %}
|
||||||
|
- contents:
|
||||||
|
- ""
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
so-suricata-eve-clean:
|
so-suricata-eve-clean:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /usr/sbin/so-suricata-eve-clean
|
- name: /usr/sbin/so-suricata-eve-clean
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ suricata:
|
|||||||
threads: 1
|
threads: 1
|
||||||
tpacket-v3: "yes"
|
tpacket-v3: "yes"
|
||||||
ring-size: 5000
|
ring-size: 5000
|
||||||
block-size: 69632
|
block-size: 32768
|
||||||
block-timeout: 10
|
block-timeout: 10
|
||||||
use-emergency-flush: "yes"
|
use-emergency-flush: "yes"
|
||||||
buffer-size: 32768
|
buffer-size: 32768
|
||||||
@@ -97,11 +97,6 @@ suricata:
|
|||||||
- 4789
|
- 4789
|
||||||
TEREDO_PORTS:
|
TEREDO_PORTS:
|
||||||
- 3544
|
- 3544
|
||||||
SIP_PORTS:
|
|
||||||
- 5060
|
|
||||||
- 5061
|
|
||||||
GENEVE_PORTS:
|
|
||||||
- 6081
|
|
||||||
default-log-dir: /var/log/suricata/
|
default-log-dir: /var/log/suricata/
|
||||||
stats:
|
stats:
|
||||||
enabled: "yes"
|
enabled: "yes"
|
||||||
@@ -139,6 +134,14 @@ suricata:
|
|||||||
header: X-Forwarded-For
|
header: X-Forwarded-For
|
||||||
unified2-alert:
|
unified2-alert:
|
||||||
enabled: "no"
|
enabled: "no"
|
||||||
|
http-log:
|
||||||
|
enabled: "no"
|
||||||
|
filename: http.log
|
||||||
|
append: "yes"
|
||||||
|
tls-log:
|
||||||
|
enabled: "no"
|
||||||
|
filename: tls.log
|
||||||
|
append: "yes"
|
||||||
tls-store:
|
tls-store:
|
||||||
enabled: "no"
|
enabled: "no"
|
||||||
pcap-log:
|
pcap-log:
|
||||||
@@ -154,6 +157,9 @@ suricata:
|
|||||||
totals: "yes"
|
totals: "yes"
|
||||||
threads: "no"
|
threads: "no"
|
||||||
null-values: "yes"
|
null-values: "yes"
|
||||||
|
syslog:
|
||||||
|
enabled: "no"
|
||||||
|
facility: local5
|
||||||
drop:
|
drop:
|
||||||
enabled: "no"
|
enabled: "no"
|
||||||
file-store:
|
file-store:
|
||||||
@@ -200,9 +206,6 @@ suricata:
|
|||||||
enabled: "yes"
|
enabled: "yes"
|
||||||
detection-ports:
|
detection-ports:
|
||||||
dp: 443
|
dp: 443
|
||||||
ja3-fingerprints: auto
|
|
||||||
ja4-fingerprints: auto
|
|
||||||
encryption-handling: track-only
|
|
||||||
dcerpc:
|
dcerpc:
|
||||||
enabled: "yes"
|
enabled: "yes"
|
||||||
ftp:
|
ftp:
|
||||||
@@ -252,21 +255,19 @@ suricata:
|
|||||||
libhtp:
|
libhtp:
|
||||||
default-config:
|
default-config:
|
||||||
personality: IDS
|
personality: IDS
|
||||||
request-body-limit: 100 KiB
|
request-body-limit: 100kb
|
||||||
response-body-limit: 100 KiB
|
response-body-limit: 100kb
|
||||||
request-body-minimal-inspect-size: 32 KiB
|
request-body-minimal-inspect-size: 32kb
|
||||||
request-body-inspect-window: 4 KiB
|
request-body-inspect-window: 4kb
|
||||||
response-body-minimal-inspect-size: 40 KiB
|
response-body-minimal-inspect-size: 40kb
|
||||||
response-body-inspect-window: 16 KiB
|
response-body-inspect-window: 16kb
|
||||||
response-body-decompress-layer-limit: 2
|
response-body-decompress-layer-limit: 2
|
||||||
http-body-inline: auto
|
http-body-inline: auto
|
||||||
swf-decompression:
|
swf-decompression:
|
||||||
enabled: "no"
|
enabled: "yes"
|
||||||
type: both
|
type: both
|
||||||
compress-depth: 100 KiB
|
compress-depth: 0
|
||||||
decompress-depth: 100 KiB
|
decompress-depth: 0
|
||||||
randomize-inspection-sizes: "yes"
|
|
||||||
randomize-inspection-range: 10
|
|
||||||
double-decode-path: "no"
|
double-decode-path: "no"
|
||||||
double-decode-query: "no"
|
double-decode-query: "no"
|
||||||
server-config:
|
server-config:
|
||||||
@@ -400,12 +401,8 @@ suricata:
|
|||||||
vxlan:
|
vxlan:
|
||||||
enabled: true
|
enabled: true
|
||||||
ports: $VXLAN_PORTS
|
ports: $VXLAN_PORTS
|
||||||
geneve:
|
erspan:
|
||||||
enabled: true
|
enabled: true
|
||||||
ports: $GENEVE_PORTS
|
|
||||||
max-layers: 16
|
|
||||||
recursion-level:
|
|
||||||
use-for-tracking: true
|
|
||||||
detect:
|
detect:
|
||||||
profile: medium
|
profile: medium
|
||||||
custom-values:
|
custom-values:
|
||||||
@@ -425,12 +422,7 @@ suricata:
|
|||||||
spm-algo: auto
|
spm-algo: auto
|
||||||
luajit:
|
luajit:
|
||||||
states: 128
|
states: 128
|
||||||
security:
|
|
||||||
lua:
|
|
||||||
allow-rules: false
|
|
||||||
max-bytes: 500000
|
|
||||||
max-instructions: 500000
|
|
||||||
allow-restricted-functions: false
|
|
||||||
profiling:
|
profiling:
|
||||||
rules:
|
rules:
|
||||||
enabled: "yes"
|
enabled: "yes"
|
||||||
@@ -467,7 +459,7 @@ suricata:
|
|||||||
append: "yes"
|
append: "yes"
|
||||||
default-rule-path: /etc/suricata/rules
|
default-rule-path: /etc/suricata/rules
|
||||||
rule-files:
|
rule-files:
|
||||||
- all.rules
|
- all-rulesets.rules
|
||||||
classification-file: /etc/suricata/classification.config
|
classification-file: /etc/suricata/classification.config
|
||||||
reference-config-file: /etc/suricata/reference.config
|
reference-config-file: /etc/suricata/reference.config
|
||||||
threshold-file: /etc/suricata/threshold.conf
|
threshold-file: /etc/suricata/threshold.conf
|
||||||
|
|||||||
@@ -10,12 +10,6 @@
|
|||||||
|
|
||||||
{# before we change outputs back to list, enable pcap-log if suricata is the pcapengine #}
|
{# before we change outputs back to list, enable pcap-log if suricata is the pcapengine #}
|
||||||
{% if GLOBALS.pcap_engine in ["SURICATA", "TRANSITION"] %}
|
{% if GLOBALS.pcap_engine in ["SURICATA", "TRANSITION"] %}
|
||||||
|
|
||||||
{% from 'bpf/pcap.map.jinja' import PCAPBPF, PCAP_BPF_STATUS %}
|
|
||||||
{% if PCAPBPF and PCAP_BPF_STATUS %}
|
|
||||||
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'bpf-filter': PCAPBPF|join(" ")}) %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'enabled': 'yes'}) %}
|
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'enabled': 'yes'}) %}
|
||||||
{# move the items in suricata.pcap into suricata.config.outputs.pcap-log. these items were placed under suricata.config for ease of access in SOC #}
|
{# move the items in suricata.pcap into suricata.config.outputs.pcap-log. these items were placed under suricata.config for ease of access in SOC #}
|
||||||
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'compression': SURICATAMERGED.pcap.compression}) %}
|
{% do SURICATAMERGED.config.outputs['pcap-log'].update({'compression': SURICATAMERGED.pcap.compression}) %}
|
||||||
|
|||||||
@@ -190,8 +190,6 @@ suricata:
|
|||||||
FTP_PORTS: *suriportgroup
|
FTP_PORTS: *suriportgroup
|
||||||
VXLAN_PORTS: *suriportgroup
|
VXLAN_PORTS: *suriportgroup
|
||||||
TEREDO_PORTS: *suriportgroup
|
TEREDO_PORTS: *suriportgroup
|
||||||
SIP_PORTS: *suriportgroup
|
|
||||||
GENEVE_PORTS: *suriportgroup
|
|
||||||
outputs:
|
outputs:
|
||||||
eve-log:
|
eve-log:
|
||||||
types:
|
types:
|
||||||
@@ -211,7 +209,7 @@ suricata:
|
|||||||
helpLink: suricata.html
|
helpLink: suricata.html
|
||||||
pcap-log:
|
pcap-log:
|
||||||
enabled:
|
enabled:
|
||||||
description: This value is ignored by SO. pcapengine in globals takes precedence.
|
description: This value is ignored by SO. pcapengine in globals takes precidence.
|
||||||
readonly: True
|
readonly: True
|
||||||
helpLink: suricata.html
|
helpLink: suricata.html
|
||||||
advanced: True
|
advanced: True
|
||||||
@@ -299,10 +297,3 @@ suricata:
|
|||||||
ports:
|
ports:
|
||||||
description: Ports to listen for. This should be a variable.
|
description: Ports to listen for. This should be a variable.
|
||||||
helpLink: suricata.html
|
helpLink: suricata.html
|
||||||
geneve:
|
|
||||||
enabled:
|
|
||||||
description: Enable VXLAN capabilities.
|
|
||||||
helpLink: suricata.html
|
|
||||||
ports:
|
|
||||||
description: Ports to listen for. This should be a variable.
|
|
||||||
helpLink: suricata.html
|
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ suricata:
|
|||||||
#custom: [Accept-Encoding, Accept-Language, Authorization]
|
#custom: [Accept-Encoding, Accept-Language, Authorization]
|
||||||
# dump-all-headers: none
|
# dump-all-headers: none
|
||||||
- dns:
|
- dns:
|
||||||
version: 3
|
version: 2
|
||||||
enabled: "yes"
|
enabled: "yes"
|
||||||
#requests: "no"
|
#requests: "no"
|
||||||
#responses: "no"
|
#responses: "no"
|
||||||
|
|||||||
@@ -7,5 +7,5 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
retry 60 3 'docker exec so-suricata /opt/suricata/bin/suricatasc -c reload-rules /var/run/suricata/suricata-command.socket' '{"message":"done","return":"OK"}' || fail "The Suricata container was not ready in time."
|
retry 60 3 'docker exec so-suricata /opt/suricata/bin/suricatasc -c reload-rules /var/run/suricata/suricata-command.socket' '{"message": "done", "return": "OK"}' || fail "The Suricata container was not ready in time."
|
||||||
retry 60 3 'docker exec so-suricata /opt/suricata/bin/suricatasc -c ruleset-reload-nonblocking /var/run/suricata/suricata-command.socket' '{"message":"done","return":"OK"}' || fail "The Suricata container was not ready in time."
|
retry 60 3 'docker exec so-suricata /opt/suricata/bin/suricatasc -c ruleset-reload-nonblocking /var/run/suricata/suricata-command.socket' '{"message": "done", "return": "OK"}' || fail "The Suricata container was not ready in time."
|
||||||
|
|||||||
@@ -74,7 +74,6 @@ base:
|
|||||||
- sensoroni
|
- sensoroni
|
||||||
- telegraf
|
- telegraf
|
||||||
- firewall
|
- firewall
|
||||||
- idstools
|
|
||||||
- suricata.manager
|
- suricata.manager
|
||||||
- healthcheck
|
- healthcheck
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
@@ -106,7 +105,6 @@ base:
|
|||||||
- firewall
|
- firewall
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- telegraf
|
- telegraf
|
||||||
- idstools
|
|
||||||
- suricata.manager
|
- suricata.manager
|
||||||
- healthcheck
|
- healthcheck
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
@@ -142,7 +140,6 @@ base:
|
|||||||
- sensoroni
|
- sensoroni
|
||||||
- telegraf
|
- telegraf
|
||||||
- backup.config_backup
|
- backup.config_backup
|
||||||
- idstools
|
|
||||||
- suricata.manager
|
- suricata.manager
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
- logstash
|
- logstash
|
||||||
@@ -177,7 +174,6 @@ base:
|
|||||||
- sensoroni
|
- sensoroni
|
||||||
- telegraf
|
- telegraf
|
||||||
- backup.config_backup
|
- backup.config_backup
|
||||||
- idstools
|
|
||||||
- suricata.manager
|
- suricata.manager
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
- logstash
|
- logstash
|
||||||
@@ -208,7 +204,6 @@ base:
|
|||||||
- sensoroni
|
- sensoroni
|
||||||
- telegraf
|
- telegraf
|
||||||
- firewall
|
- firewall
|
||||||
- idstools
|
|
||||||
- suricata.manager
|
- suricata.manager
|
||||||
- pcap
|
- pcap
|
||||||
- elasticsearch
|
- elasticsearch
|
||||||
|
|||||||
@@ -8,7 +8,8 @@
|
|||||||
|
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% from "zeek/config.map.jinja" import ZEEKMERGED %}
|
{% from "zeek/config.map.jinja" import ZEEKMERGED %}
|
||||||
{% from 'bpf/zeek.map.jinja' import ZEEKBPF, ZEEK_BPF_STATUS, ZEEK_BPF_CALC %}
|
{% from 'bpf/zeek.map.jinja' import ZEEKBPF %}
|
||||||
|
{% set BPF_STATUS = 0 %}
|
||||||
|
|
||||||
# Add Zeek group
|
# Add Zeek group
|
||||||
zeekgroup:
|
zeekgroup:
|
||||||
@@ -157,13 +158,18 @@ zeekja4cfg:
|
|||||||
- user: 937
|
- user: 937
|
||||||
- group: 939
|
- group: 939
|
||||||
|
|
||||||
# BPF compilation failed
|
# BPF compilation and configuration
|
||||||
{% if ZEEKBPF and not ZEEK_BPF_STATUS %}
|
{% if ZEEKBPF %}
|
||||||
|
{% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %}
|
||||||
|
{% if BPF_CALC['stderr'] == "" %}
|
||||||
|
{% set BPF_STATUS = 1 %}
|
||||||
|
{% else %}
|
||||||
zeekbpfcompilationfailure:
|
zeekbpfcompilationfailure:
|
||||||
test.configurable_test_state:
|
test.configurable_test_state:
|
||||||
- changes: False
|
- changes: False
|
||||||
- result: False
|
- result: False
|
||||||
- comment: "BPF Syntax Error - Discarding Specified BPF. Error: {{ ZEEK_BPF_CALC['stderr'] }}"
|
- comment: "BPF Syntax Error - Discarding Specified BPF"
|
||||||
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
zeekbpf:
|
zeekbpf:
|
||||||
@@ -171,7 +177,7 @@ zeekbpf:
|
|||||||
- name: /opt/so/conf/zeek/bpf
|
- name: /opt/so/conf/zeek/bpf
|
||||||
- user: 940
|
- user: 940
|
||||||
- group: 940
|
- group: 940
|
||||||
{% if ZEEK_BPF_STATUS %}
|
{% if BPF_STATUS %}
|
||||||
- contents: {{ ZEEKBPF }}
|
- contents: {{ ZEEKBPF }}
|
||||||
{% else %}
|
{% else %}
|
||||||
- contents:
|
- contents:
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ zeek:
|
|||||||
- protocols/ssh/geo-data
|
- protocols/ssh/geo-data
|
||||||
- protocols/ssh/detect-bruteforcing
|
- protocols/ssh/detect-bruteforcing
|
||||||
- protocols/ssh/interesting-hostnames
|
- protocols/ssh/interesting-hostnames
|
||||||
- protocols/http/detect-sql-injection
|
- protocols/http/detect-sqli
|
||||||
- frameworks/files/hash-all-files
|
- frameworks/files/hash-all-files
|
||||||
- frameworks/files/detect-MHR
|
- frameworks/files/detect-MHR
|
||||||
- policy/frameworks/notice/extend-email/hostnames
|
- policy/frameworks/notice/extend-email/hostnames
|
||||||
|
|||||||
@@ -502,7 +502,6 @@ configure_minion() {
|
|||||||
minion_type=desktop
|
minion_type=desktop
|
||||||
fi
|
fi
|
||||||
info "Configuring minion type as $minion_type"
|
info "Configuring minion type as $minion_type"
|
||||||
logCmd "mkdir -p /etc/salt/minion.d"
|
|
||||||
echo "role: so-$minion_type" > /etc/salt/grains
|
echo "role: so-$minion_type" > /etc/salt/grains
|
||||||
|
|
||||||
local minion_config=/etc/salt/minion
|
local minion_config=/etc/salt/minion
|
||||||
@@ -542,6 +541,20 @@ configure_minion() {
|
|||||||
"log_file: /opt/so/log/salt/minion"\
|
"log_file: /opt/so/log/salt/minion"\
|
||||||
"#startup_states: highstate" >> "$minion_config"
|
"#startup_states: highstate" >> "$minion_config"
|
||||||
|
|
||||||
|
# At the time the so-managerhype node does not yet have the bridge configured.
|
||||||
|
# The so-hypervisor node doesn't either, but it doesn't cause issues here.
|
||||||
|
local usebr0=false
|
||||||
|
if [ "$minion_type" == 'hypervisor' ]; then
|
||||||
|
usebr0=true
|
||||||
|
fi
|
||||||
|
local pillar_json="{\"host\": {\"mainint\": \"$MNIC\"}, \"usebr0\": $usebr0}"
|
||||||
|
info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='$pillar_json'"
|
||||||
|
salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="$pillar_json"
|
||||||
|
|
||||||
|
{
|
||||||
|
logCmd "systemctl enable salt-minion";
|
||||||
|
logCmd "systemctl restart salt-minion";
|
||||||
|
} >> "$setup_log" 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
checkin_at_boot() {
|
checkin_at_boot() {
|
||||||
@@ -716,7 +729,7 @@ configure_network_sensor() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Create the bond interface only if it doesn't already exist
|
# Create the bond interface only if it doesn't already exist
|
||||||
nmcli -f name,uuid -p con | grep -q "$INTERFACE"
|
nmcli -f name,uuid -p con | grep -q '$INTERFACE'
|
||||||
local found_int=$?
|
local found_int=$?
|
||||||
|
|
||||||
if [[ $found_int != 0 ]]; then
|
if [[ $found_int != 0 ]]; then
|
||||||
@@ -785,18 +798,25 @@ configure_hyper_bridge() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
copy_salt_master_config() {
|
copy_salt_master_config() {
|
||||||
logCmd "mkdir /etc/salt"
|
|
||||||
title "Copy the Salt master config template to the proper directory"
|
title "Copy the Salt master config template to the proper directory"
|
||||||
if [ "$setup_type" = 'iso' ]; then
|
if [ "$setup_type" = 'iso' ]; then
|
||||||
logCmd "cp /root/SecurityOnion/files/salt/master/master /etc/salt/master"
|
logCmd "cp /root/SecurityOnion/files/salt/master/master /etc/salt/master"
|
||||||
|
#logCmd "cp /root/SecurityOnion/files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service"
|
||||||
else
|
else
|
||||||
logCmd "cp ../files/salt/master/master /etc/salt/master"
|
logCmd "cp ../files/salt/master/master /etc/salt/master"
|
||||||
|
#logCmd "cp ../files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service"
|
||||||
fi
|
fi
|
||||||
info "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
|
info "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
|
||||||
logCmd "cp -R $temp_install_dir/pillar/ $local_salt_dir/"
|
logCmd "cp -R $temp_install_dir/pillar/ $local_salt_dir/"
|
||||||
if [ -d "$temp_install_dir"/salt ] ; then
|
if [ -d "$temp_install_dir"/salt ] ; then
|
||||||
logCmd "cp -R $temp_install_dir/salt/ $local_salt_dir/"
|
logCmd "cp -R $temp_install_dir/salt/ $local_salt_dir/"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Restart the service so it picks up the changes
|
||||||
|
logCmd "systemctl daemon-reload"
|
||||||
|
logCmd "systemctl enable salt-master"
|
||||||
|
logCmd "systemctl restart salt-master"
|
||||||
}
|
}
|
||||||
|
|
||||||
create_local_nids_rules() {
|
create_local_nids_rules() {
|
||||||
@@ -816,7 +836,6 @@ create_manager_pillars() {
|
|||||||
backup_pillar
|
backup_pillar
|
||||||
docker_pillar
|
docker_pillar
|
||||||
redis_pillar
|
redis_pillar
|
||||||
idstools_pillar
|
|
||||||
kratos_pillar
|
kratos_pillar
|
||||||
hydra_pillar
|
hydra_pillar
|
||||||
soc_pillar
|
soc_pillar
|
||||||
@@ -1282,11 +1301,6 @@ ls_heapsize() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
idstools_pillar() {
|
|
||||||
title "Ading IDSTOOLS pillar options"
|
|
||||||
touch $adv_idstools_pillar_file
|
|
||||||
}
|
|
||||||
|
|
||||||
nginx_pillar() {
|
nginx_pillar() {
|
||||||
title "Creating the NGINX pillar"
|
title "Creating the NGINX pillar"
|
||||||
[[ -z "$TESTING" ]] && return
|
[[ -z "$TESTING" ]] && return
|
||||||
@@ -1462,7 +1476,7 @@ make_some_dirs() {
|
|||||||
mkdir -p $local_salt_dir/salt/firewall/portgroups
|
mkdir -p $local_salt_dir/salt/firewall/portgroups
|
||||||
mkdir -p $local_salt_dir/salt/firewall/ports
|
mkdir -p $local_salt_dir/salt/firewall/ports
|
||||||
|
|
||||||
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos hydra idstools idh elastalert stig global kafka versionlock hypervisor vm; do
|
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos hydra idh elastalert stig global kafka versionlock hypervisor vm; do
|
||||||
mkdir -p $local_salt_dir/pillar/$THEDIR
|
mkdir -p $local_salt_dir/pillar/$THEDIR
|
||||||
touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls
|
touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls
|
||||||
touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls
|
touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls
|
||||||
@@ -1921,12 +1935,11 @@ repo_sync_local() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
saltify() {
|
saltify() {
|
||||||
|
info "Installing Salt"
|
||||||
SALTVERSION=$(grep "version:" ../salt/salt/master.defaults.yaml | grep -o "[0-9]\+\.[0-9]\+")
|
SALTVERSION=$(grep "version:" ../salt/salt/master.defaults.yaml | grep -o "[0-9]\+\.[0-9]\+")
|
||||||
info "Installing Salt $SALTVERSION"
|
|
||||||
chmod u+x ../salt/salt/scripts/bootstrap-salt.sh
|
|
||||||
if [[ $is_deb ]]; then
|
if [[ $is_deb ]]; then
|
||||||
|
|
||||||
DEBIAN_FRONTEND=noninteractive retry 30 10 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || fail_setup
|
DEBIAN_FRONTEND=noninteractive retry 150 20 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || fail_setup
|
||||||
if [ $OSVER == "focal" ]; then update-alternatives --install /usr/bin/python python /usr/bin/python3.10 10; fi
|
if [ $OSVER == "focal" ]; then update-alternatives --install /usr/bin/python python /usr/bin/python3.10 10; fi
|
||||||
local pkg_arr=(
|
local pkg_arr=(
|
||||||
'apache2-utils'
|
'apache2-utils'
|
||||||
@@ -1939,11 +1952,16 @@ saltify() {
|
|||||||
'jq'
|
'jq'
|
||||||
'gnupg'
|
'gnupg'
|
||||||
)
|
)
|
||||||
retry 30 10 "apt-get -y install ${pkg_arr[*]}" || fail_setup
|
retry 150 20 "apt-get -y install ${pkg_arr[*]}" || fail_setup
|
||||||
|
|
||||||
logCmd "mkdir -vp /etc/apt/keyrings"
|
logCmd "mkdir -vp /etc/apt/keyrings"
|
||||||
logCmd "wget -q --inet4-only -O /etc/apt/keyrings/docker.pub https://download.docker.com/linux/ubuntu/gpg"
|
logCmd "wget -q --inet4-only -O /etc/apt/keyrings/docker.pub https://download.docker.com/linux/ubuntu/gpg"
|
||||||
|
|
||||||
|
# Download public key
|
||||||
|
logCmd "curl -fsSL -o /etc/apt/keyrings/salt-archive-keyring-2023.pgp https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public"
|
||||||
|
# Create apt repo target configuration
|
||||||
|
echo "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.pgp arch=amd64] https://packages.broadcom.com/artifactory/saltproject-deb/ stable main" | sudo tee /etc/apt/sources.list.d/salt.list
|
||||||
|
|
||||||
if [[ $is_ubuntu ]]; then
|
if [[ $is_ubuntu ]]; then
|
||||||
# Add Docker Repo
|
# Add Docker Repo
|
||||||
add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||||
@@ -1954,50 +1972,45 @@ saltify() {
|
|||||||
echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $OSVER stable" > /etc/apt/sources.list.d/docker.list
|
echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $OSVER stable" > /etc/apt/sources.list.d/docker.list
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
logCmd "apt-key add /etc/apt/keyrings/salt-archive-keyring-2023.pgp"
|
||||||
|
|
||||||
|
#logCmd "apt-key add /opt/so/gpg/SALTSTACK-GPG-KEY.pub"
|
||||||
logCmd "apt-key add /etc/apt/keyrings/docker.pub"
|
logCmd "apt-key add /etc/apt/keyrings/docker.pub"
|
||||||
|
|
||||||
retry 30 10 "apt-get update" "" "Err:" || fail_setup
|
# Add SO Saltstack Repo
|
||||||
|
#echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/20.04/amd64/salt3004.2/ focal main" > /etc/apt/sources.list.d/saltstack.list
|
||||||
|
|
||||||
|
# Ain't nothing but a GPG
|
||||||
|
|
||||||
|
retry 150 20 "apt-get update" "" "Err:" || fail_setup
|
||||||
if [[ $waitforstate ]]; then
|
if [[ $waitforstate ]]; then
|
||||||
retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -M -X stable $SALTVERSION" || fail_setup
|
retry 150 20 "apt-get -y install salt-common=$SALTVERSION salt-minion=$SALTVERSION salt-master=$SALTVERSION" || fail_setup
|
||||||
retry 30 10 "apt-mark hold salt-minion salt-common salt-master" || fail_setup
|
retry 150 20 "apt-mark hold salt-minion salt-common salt-master" || fail_setup
|
||||||
retry 30 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-packaging python3-influxdb python3-lxml" || exit 1
|
retry 150 20 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-packaging python3-influxdb python3-lxml" || exit 1
|
||||||
else
|
else
|
||||||
retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -X stable $SALTVERSION" || fail_setup
|
retry 150 20 "apt-get -y install salt-common=$SALTVERSION salt-minion=$SALTVERSION" || fail_setup
|
||||||
retry 30 10 "apt-mark hold salt-minion salt-common" || fail_setup
|
retry 150 20 "apt-mark hold salt-minion salt-common" || fail_setup
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $is_rpm ]]; then
|
if [[ $is_rpm ]]; then
|
||||||
if [[ $waitforstate ]]; then
|
if [[ $waitforstate ]]; then
|
||||||
# install all for a manager
|
# install all for a manager
|
||||||
retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -r -M -X stable $SALTVERSION" || fail_setup
|
logCmd "dnf -y install salt-$SALTVERSION salt-master-$SALTVERSION salt-minion-$SALTVERSION"
|
||||||
else
|
else
|
||||||
# just a minion
|
# We just need the minion
|
||||||
retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -r -X stable $SALTVERSION" || fail_setup
|
if [[ $is_airgap ]]; then
|
||||||
|
logCmd "dnf -y install salt salt-minion"
|
||||||
|
else
|
||||||
|
logCmd "dnf -y install salt-$SALTVERSION salt-minion-$SALTVERSION"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
logCmd "mkdir -p /etc/salt/minion.d"
|
||||||
salt_install_module_deps
|
salt_install_module_deps
|
||||||
salt_patch_x509_v2
|
salt_patch_x509_v2
|
||||||
|
|
||||||
# At the time the so-managerhype node does not yet have the bridge configured.
|
|
||||||
# The so-hypervisor node doesn't either, but it doesn't cause issues here.
|
|
||||||
local usebr0=false
|
|
||||||
if [ "$minion_type" == 'hypervisor' ]; then
|
|
||||||
usebr0=true
|
|
||||||
fi
|
|
||||||
local pillar_json="{\"host\": {\"mainint\": \"$MNIC\"}, \"usebr0\": $usebr0}"
|
|
||||||
info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='$pillar_json'"
|
|
||||||
salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="$pillar_json"
|
|
||||||
|
|
||||||
if [[ $waitforstate ]]; then
|
|
||||||
logCmd "systemctl enable salt-master";
|
|
||||||
logCmd "systemctl start salt-master";
|
|
||||||
fi
|
|
||||||
|
|
||||||
logCmd "systemctl enable salt-minion";
|
|
||||||
logCmd "systemctl restart salt-minion";
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
salt_install_module_deps() {
|
salt_install_module_deps() {
|
||||||
|
|||||||
@@ -745,12 +745,13 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
securityonion_repo
|
securityonion_repo
|
||||||
# Update existing packages
|
# Update existing packages
|
||||||
update_packages
|
update_packages
|
||||||
# Put salt-master config in place
|
|
||||||
copy_salt_master_config
|
|
||||||
configure_minion "$minion_type"
|
|
||||||
# Install salt
|
# Install salt
|
||||||
saltify
|
saltify
|
||||||
|
# Start the master service
|
||||||
|
copy_salt_master_config
|
||||||
|
configure_minion "$minion_type"
|
||||||
check_sos_appliance
|
check_sos_appliance
|
||||||
|
|
||||||
logCmd "salt-key -yd $MINION_ID"
|
logCmd "salt-key -yd $MINION_ID"
|
||||||
sleep 2 # Debug RSA Key format errors
|
sleep 2 # Debug RSA Key format errors
|
||||||
logCmd "salt-call state.show_top"
|
logCmd "salt-call state.show_top"
|
||||||
@@ -851,8 +852,8 @@ if ! [[ -f $install_opt_file ]]; then
|
|||||||
gpg_rpm_import
|
gpg_rpm_import
|
||||||
securityonion_repo
|
securityonion_repo
|
||||||
update_packages
|
update_packages
|
||||||
configure_minion "$minion_type"
|
|
||||||
saltify
|
saltify
|
||||||
|
configure_minion "$minion_type"
|
||||||
check_sos_appliance
|
check_sos_appliance
|
||||||
drop_install_options
|
drop_install_options
|
||||||
hypervisor_local_states
|
hypervisor_local_states
|
||||||
|
|||||||
@@ -166,12 +166,6 @@ export hydra_pillar_file
|
|||||||
adv_hydra_pillar_file="$local_salt_dir/pillar/hydra/adv_hydra.sls"
|
adv_hydra_pillar_file="$local_salt_dir/pillar/hydra/adv_hydra.sls"
|
||||||
export adv_hydra_pillar_file
|
export adv_hydra_pillar_file
|
||||||
|
|
||||||
idstools_pillar_file="$local_salt_dir/pillar/idstools/soc_idstools.sls"
|
|
||||||
export idstools_pillar_file
|
|
||||||
|
|
||||||
adv_idstools_pillar_file="$local_salt_dir/pillar/idstools/adv_idstools.sls"
|
|
||||||
export adv_idstools_pillar_file
|
|
||||||
|
|
||||||
nginx_pillar_file="$local_salt_dir/pillar/nginx/soc_nginx.sls"
|
nginx_pillar_file="$local_salt_dir/pillar/nginx/soc_nginx.sls"
|
||||||
export nginx_pillar_file
|
export nginx_pillar_file
|
||||||
|
|
||||||
|
|||||||
@@ -676,8 +676,8 @@ whiptail_install_type_dist_existing() {
|
|||||||
EOM
|
EOM
|
||||||
|
|
||||||
install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 7 \
|
install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 7 \
|
||||||
"SENSOR" "Add a Sensor Node for monitoring network traffic " \
|
"SENSOR" "Create a forward only sensor " \
|
||||||
"SEARCHNODE" "Add a Search Node with parsing " \
|
"SEARCHNODE" "Add a search node with parsing " \
|
||||||
"FLEET" "Dedicated Elastic Fleet Node " \
|
"FLEET" "Dedicated Elastic Fleet Node " \
|
||||||
"HEAVYNODE" "Sensor + Search Node " \
|
"HEAVYNODE" "Sensor + Search Node " \
|
||||||
"IDH" "Intrusion Detection Honeypot Node " \
|
"IDH" "Intrusion Detection Honeypot Node " \
|
||||||
|
|||||||
Reference in New Issue
Block a user