mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-20 16:03:06 +01:00
Merge remote-tracking branch 'origin/2.4/dev' into desktopyummv
This commit is contained in:
@@ -27,6 +27,8 @@ Imports one or more evtx files into Security Onion. The evtx files will be analy
|
||||
Options:
|
||||
--json Outputs summary in JSON format. Implies --quiet.
|
||||
--quiet Silences progress information to stdout.
|
||||
--shift Adds a time shift. Accepts a single argument that is intended to be the date of the last record, and shifts the dates of the previous records accordingly.
|
||||
Ex. sudo so-import-evtx --shift "2023-08-01 01:01:01" example.evtx
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -44,6 +46,10 @@ while [[ $# -gt 0 ]]; do
|
||||
--quiet)
|
||||
quiet=1
|
||||
;;
|
||||
--shift)
|
||||
SHIFTDATE=$1
|
||||
shift
|
||||
;;
|
||||
-*)
|
||||
echo "Encountered unexpected parameter: $param"
|
||||
usage
|
||||
@@ -68,8 +74,10 @@ function status {
|
||||
function evtx2es() {
|
||||
EVTX=$1
|
||||
HASH=$2
|
||||
SHIFTDATE=$3
|
||||
|
||||
docker run --rm \
|
||||
-e "SHIFTTS=$SHIFTDATE" \
|
||||
-v "$EVTX:/tmp/data.evtx" \
|
||||
-v "/nsm/import/$HASH/evtx/:/tmp/evtx/" \
|
||||
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
|
||||
@@ -113,7 +121,9 @@ echo $END_NEWEST > /nsm/import/evtx-end_newest
|
||||
for EVTX in $INPUT_FILES; do
|
||||
EVTX=$(/usr/bin/realpath "$EVTX")
|
||||
status "Processing Import: ${EVTX}"
|
||||
|
||||
if ! [ -z "$SHIFTDATE" ]; then
|
||||
status "- timeshifting logs to end date of $SHIFTDATE"
|
||||
fi
|
||||
# generate a unique hash to assist with dedupe checks
|
||||
HASH=$(md5sum "${EVTX}" | awk '{ print $1 }')
|
||||
HASH_DIR=/nsm/import/${HASH}
|
||||
@@ -136,7 +146,7 @@ for EVTX in $INPUT_FILES; do
|
||||
|
||||
# import evtx and write them to import ingest pipeline
|
||||
status "- importing logs to Elasticsearch..."
|
||||
evtx2es "${EVTX}" $HASH
|
||||
evtx2es "${EVTX}" $HASH "$SHIFTDATE"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
INVALID_EVTXS_COUNT=$((INVALID_EVTXS_COUNT + 1))
|
||||
status "- WARNING: This evtx file may not have fully imported successfully"
|
||||
|
||||
@@ -9,25 +9,26 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
appliance_check() {
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
APPLIANCE=1
|
||||
{%- if grains['sosmodel'] in ['SO2AMI01', 'SO2GCI01', 'SO2AZI01'] %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
DUDEYOUGOTADELL=$(dmidecode |grep Dell)
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
APPTYPE=dell
|
||||
else
|
||||
APPTYPE=sm
|
||||
fi
|
||||
mkdir -p /opt/so/log/raid
|
||||
|
||||
{%- else %}
|
||||
echo "This is not an appliance"
|
||||
exit 0
|
||||
{%- endif %}
|
||||
}
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
{%- set model = salt['grains.get']('sosmodel') %}
|
||||
model={{ model }}
|
||||
# Don't need cloud images to use this
|
||||
if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then
|
||||
exit 0
|
||||
fi
|
||||
{%- else %}
|
||||
echo "This is not an appliance"
|
||||
exit 0
|
||||
{%- endif %}
|
||||
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200|SOSSNNV|SOSMN)$ ]]; then
|
||||
is_bossraid=true
|
||||
fi
|
||||
if [[ $model =~ ^(SOSSNNV|SOSMN)$ ]]; then
|
||||
is_swraid=true
|
||||
fi
|
||||
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200)$ ]]; then
|
||||
is_hwraid=true
|
||||
fi
|
||||
|
||||
check_nsm_raid() {
|
||||
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
|
||||
@@ -49,61 +50,44 @@ check_nsm_raid() {
|
||||
check_boss_raid() {
|
||||
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
|
||||
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ -n $MVCLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
if [[ -n $MVCLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
}
|
||||
|
||||
check_software_raid() {
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
SWRC=$(grep "_" /proc/mdstat)
|
||||
|
||||
if [[ -n $SWRC ]]; then
|
||||
# RAID is failed in some way
|
||||
SWRAID=1
|
||||
else
|
||||
SWRAID=0
|
||||
fi
|
||||
SWRC=$(grep "_" /proc/mdstat)
|
||||
if [[ -n $SWRC ]]; then
|
||||
# RAID is failed in some way
|
||||
SWRAID=1
|
||||
else
|
||||
SWRAID=0
|
||||
fi
|
||||
}
|
||||
|
||||
# This script checks raid status if you use SO appliances
|
||||
# Set everything to 0
|
||||
SWRAID=0
|
||||
BOSSRAID=0
|
||||
HWRAID=0
|
||||
|
||||
# See if this is an appliance
|
||||
if [[ $is_hwraid ]]; then
|
||||
check_nsm_raid
|
||||
fi
|
||||
if [[ $is_bossraid ]]; then
|
||||
check_boss_raid
|
||||
fi
|
||||
if [[ $is_swraid ]]; then
|
||||
check_software_raid
|
||||
fi
|
||||
|
||||
appliance_check
|
||||
check_nsm_raid
|
||||
check_boss_raid
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
|
||||
check_software_raid
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
sum=$(($SWRAID + $BOSSRAID + $HWRAID))
|
||||
|
||||
if [[ -n $SWRAID ]]; then
|
||||
if [[ $SWRAID == '0' && $BOSSRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ $BOSSRAID == '0' && $HWRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ "$APPTYPE" == 'sm' ]]; then
|
||||
if [[ -n "$HWRAID" ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
if [[ $sum == "0" ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
|
||||
echo "nsmraid=$RAIDSTATUS" > /opt/so/log/raid/status.log
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ elasticfleet:
|
||||
enabled: False
|
||||
config:
|
||||
server:
|
||||
custom_fqdn: ''
|
||||
custom_fqdn: []
|
||||
enable_auto_configuration: True
|
||||
endpoints_enrollment: ''
|
||||
es_token: ''
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
include:
|
||||
- elasticfleet.config
|
||||
- elasticfleet.sostatus
|
||||
- ssl
|
||||
|
||||
# If enabled, automatically update Fleet Logstash Outputs
|
||||
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
|
||||
@@ -61,11 +62,14 @@ so-elastic-fleet:
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- binds:
|
||||
- /etc/pki:/etc/pki:ro
|
||||
- /etc/pki/elasticfleet-server.crt:/etc/pki/elasticfleet-server.crt:ro
|
||||
- /etc/pki/elasticfleet-server.key:/etc/pki/elasticfleet-server.key:ro
|
||||
- /etc/pki/tls/certs/intca.crt:/etc/pki/tls/certs/intca.crt:ro
|
||||
{% if GLOBALS.os_family == 'Debian' %}
|
||||
- /etc/ssl:/etc/ssl:ro
|
||||
- /etc/ssl/elasticfleet-server.crt:/etc/ssl/elasticfleet-server.crt:ro
|
||||
- /etc/ssl/elasticfleet-server.key:/etc/ssl/elasticfleet-server.key:ro
|
||||
- /etc/ssl/tls/certs/intca.crt:/etc/ssl/tls/certs/intca.crt:ro
|
||||
{% endif %}
|
||||
#- /opt/so/conf/elastic-fleet/state:/usr/share/elastic-agent/state:rw
|
||||
- /opt/so/log/elasticfleet:/usr/share/elastic-agent/logs
|
||||
{% if DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
@@ -93,6 +97,9 @@ so-elastic-fleet:
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- x509: etc_elasticfleet_key
|
||||
- x509: etc_elasticfleet_crt
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role != "so-fleet" %}
|
||||
|
||||
@@ -9,13 +9,12 @@
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_id": "endpoints-initial",
|
||||
"vars": {},
|
||||
"inputs": [{
|
||||
"type": "endpoint",
|
||||
"type": "ENDPOINT_INTEGRATION_CONFIG",
|
||||
"enabled": true,
|
||||
"streams": [],
|
||||
"config": {
|
||||
"integration_config": {
|
||||
"_config": {
|
||||
"value": {
|
||||
"type": "endpoint",
|
||||
"endpointConfig": {
|
||||
|
||||
@@ -12,10 +12,11 @@ elasticfleet:
|
||||
config:
|
||||
server:
|
||||
custom_fqdn:
|
||||
description: Custom FQDN for Agents to connect to.
|
||||
description: Custom FQDN for Agents to connect to. One per line.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
advanced: True
|
||||
forcedType: "[]string"
|
||||
enable_auto_configuration:
|
||||
description: Enable auto-configuration of Logstash Outputs & Fleet Host URLs.
|
||||
global: True
|
||||
|
||||
@@ -11,6 +11,12 @@
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
LOG="/opt/so/log/elasticfleet/so-elastic-agent-gen-installers.log"
|
||||
|
||||
# Check to see if we are already running
|
||||
NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers")
|
||||
[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING gen installers script processes running...exiting." >>$LOG && exit 0
|
||||
|
||||
for i in {1..30}
|
||||
do
|
||||
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set CUSTOMFQDN = salt['pillar.get']('elasticfleet:config:server:custom_fqdn') %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
@@ -41,9 +41,16 @@ else
|
||||
NEW_LIST=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
|
||||
fi
|
||||
|
||||
{% if CUSTOMFQDN != "" %}
|
||||
# Add Custom Hostname to list
|
||||
NEW_LIST+=("{{ CUSTOMFQDN }}:5055")
|
||||
# Query for FQDN entries & add them to the list
|
||||
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
|
||||
CUSTOMFQDNLIST=({{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }})
|
||||
if [ -n "$CUSTOMFQDNLIST" ]; then
|
||||
readarray -t CUSTOMFQDN <<< $CUSTOMFQDNLIST
|
||||
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
|
||||
do
|
||||
NEW_LIST+=("https://$CUSTOMNAME:8220")
|
||||
done
|
||||
fi
|
||||
{% endif %}
|
||||
|
||||
# Query for the current Grid Nodes that are running Logstash
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set CUSTOMFQDN = salt['pillar.get']('elasticfleet:config:server:custom_fqdn') %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
@@ -41,9 +41,16 @@ else
|
||||
NEW_LIST=("https://{{ GLOBALS.url_base }}:8220" "https://{{ GLOBALS.hostname }}:8220")
|
||||
fi
|
||||
|
||||
{% if CUSTOMFQDN != "" %}
|
||||
# Add Custom Hostname to list
|
||||
NEW_LIST+=("https://{{ CUSTOMFQDN }}:8220")
|
||||
# Query for FQDN entries & add them to the list
|
||||
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
|
||||
CUSTOMFQDNLIST=({{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }})
|
||||
if [ -n "$CUSTOMFQDNLIST" ]; then
|
||||
readarray -t CUSTOMFQDN <<< $CUSTOMFQDNLIST
|
||||
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
|
||||
do
|
||||
NEW_LIST+=("https://$CUSTOMNAME:8220")
|
||||
done
|
||||
fi
|
||||
{% endif %}
|
||||
|
||||
# Query for the current Grid Nodes that are running Logstash (which includes Fleet Nodes)
|
||||
@@ -62,7 +69,7 @@ fi
|
||||
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
||||
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
|
||||
# Compare the current & new list of URLs - if different, update the Fleet Server URLs
|
||||
# Compare the current & new list of URLs - if different, update the Fleet Server URLs & regenerate the agent installer
|
||||
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
||||
printf "\nHashes match - no update needed.\n"
|
||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||
@@ -71,4 +78,5 @@ else
|
||||
printf "\nHashes don't match - update needed.\n"
|
||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||
update_fleet_urls
|
||||
/sbin/so-elastic-agent-gen-installers >> /opt/so/log/elasticfleet/so-elastic-agent-gen-installers.log &
|
||||
fi
|
||||
|
||||
@@ -78,6 +78,7 @@
|
||||
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
||||
{"community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
],
|
||||
"on_failure": [
|
||||
|
||||
@@ -26,6 +26,7 @@ firewall:
|
||||
standalone: []
|
||||
strelka_frontend: []
|
||||
syslog: []
|
||||
workstation: []
|
||||
customhostgroup0: []
|
||||
customhostgroup1: []
|
||||
customhostgroup2: []
|
||||
@@ -370,6 +371,7 @@ firewall:
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- localrules
|
||||
- sensoroni
|
||||
fleet:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
@@ -383,6 +385,17 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -393,6 +406,7 @@ firewall:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -405,6 +419,7 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
heavynode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -417,6 +432,7 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
receiver:
|
||||
portgroups:
|
||||
- yum
|
||||
@@ -425,6 +441,10 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -442,9 +462,9 @@ firewall:
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
analyst:
|
||||
workstation:
|
||||
portgroups:
|
||||
- nginx
|
||||
- yum
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -476,6 +496,9 @@ firewall:
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
@@ -491,6 +514,9 @@ firewall:
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
workstation:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -535,6 +561,7 @@ firewall:
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- localrules
|
||||
- sensoroni
|
||||
fleet:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
@@ -548,6 +575,17 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -558,6 +596,7 @@ firewall:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -569,6 +608,7 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
heavynode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -580,6 +620,7 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
receiver:
|
||||
portgroups:
|
||||
- yum
|
||||
@@ -588,6 +629,10 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -605,9 +650,9 @@ firewall:
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
analyst:
|
||||
workstation:
|
||||
portgroups:
|
||||
- nginx
|
||||
- yum
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -639,6 +684,9 @@ firewall:
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
@@ -654,6 +702,9 @@ firewall:
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
workstation:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -723,6 +774,17 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
@@ -760,6 +822,10 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
@@ -780,9 +846,9 @@ firewall:
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
analyst:
|
||||
workstation:
|
||||
portgroups:
|
||||
- nginx
|
||||
- yum
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -814,6 +880,9 @@ firewall:
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
@@ -832,6 +901,9 @@ firewall:
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
workstation:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -1128,6 +1200,9 @@ firewall:
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
workstation:
|
||||
portgroups:
|
||||
- yum
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
|
||||
@@ -45,6 +45,7 @@ firewall:
|
||||
standalone: *hostgroupsettings
|
||||
strelka_frontend: *hostgroupsettings
|
||||
syslog: *hostgroupsettings
|
||||
workstation: *hostgroupsettings
|
||||
customhostgroup0: &customhostgroupsettings
|
||||
description: List of IP or CIDR blocks to allow to this hostgroup.
|
||||
forcedType: "[]string"
|
||||
@@ -215,6 +216,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
@@ -339,6 +342,8 @@ firewall:
|
||||
hostgroups:
|
||||
manager:
|
||||
portgroups: *portgroupsdocker
|
||||
idh:
|
||||
portgroups: *portgroupsdocker
|
||||
sensor:
|
||||
portgroups: *portgroupsdocker
|
||||
searchnode:
|
||||
@@ -361,6 +366,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
@@ -389,12 +396,16 @@ firewall:
|
||||
portgroups: *portgroupshost
|
||||
localhost:
|
||||
portgroups: *portgroupshost
|
||||
idh:
|
||||
portgroups: *portgroupshost
|
||||
sensor:
|
||||
portgroups: *portgroupshost
|
||||
searchnode:
|
||||
portgroups: *portgroupshost
|
||||
heavynode:
|
||||
portgroups: *portgroupshost
|
||||
workstation:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup1:
|
||||
@@ -422,6 +433,8 @@ firewall:
|
||||
hostgroups:
|
||||
managersearch:
|
||||
portgroups: *portgroupsdocker
|
||||
idh:
|
||||
portgroups: *portgroupsdocker
|
||||
sensor:
|
||||
portgroups: *portgroupsdocker
|
||||
searchnode:
|
||||
@@ -444,6 +457,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
@@ -472,12 +487,16 @@ firewall:
|
||||
portgroups: *portgroupshost
|
||||
localhost:
|
||||
portgroups: *portgroupshost
|
||||
idh:
|
||||
portgroups: *portgroupshost
|
||||
sensor:
|
||||
portgroups: *portgroupshost
|
||||
searchnode:
|
||||
portgroups: *portgroupshost
|
||||
heavynode:
|
||||
portgroups: *portgroupshost
|
||||
workstation:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup1:
|
||||
@@ -509,6 +528,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
fleet:
|
||||
portgroups: *portgroupsdocker
|
||||
idh:
|
||||
portgroups: *portgroupsdocker
|
||||
sensor:
|
||||
portgroups: *portgroupsdocker
|
||||
searchnode:
|
||||
@@ -533,6 +554,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
@@ -565,12 +588,16 @@ firewall:
|
||||
portgroups: *portgroupshost
|
||||
standalone:
|
||||
portgroups: *portgroupshost
|
||||
idh:
|
||||
portgroups: *portgroupshost
|
||||
sensor:
|
||||
portgroups: *portgroupshost
|
||||
searchnode:
|
||||
portgroups: *portgroupshost
|
||||
heavynode:
|
||||
portgroups: *portgroupshost
|
||||
workstation:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup1:
|
||||
@@ -795,6 +822,8 @@ firewall:
|
||||
portgroups: *portgroupsdocker
|
||||
analyst:
|
||||
portgroups: *portgroupsdocker
|
||||
workstation:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup0:
|
||||
portgroups: *portgroupsdocker
|
||||
customhostgroup1:
|
||||
|
||||
@@ -3,17 +3,21 @@
|
||||
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED %}
|
||||
{%- set proxy = salt['pillar.get']('manager:proxy') %}
|
||||
|
||||
{%- set proxy = salt['pillar.get']('manager:proxy') %}
|
||||
{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %}
|
||||
|
||||
# Download the rules from the internet
|
||||
{%- if proxy %}
|
||||
export http_proxy={{ proxy }}
|
||||
export https_proxy={{ proxy }}
|
||||
export no_proxy="{{ noproxy }}"
|
||||
{%- endif %}
|
||||
|
||||
mkdir -p /nsm/rules/suricata
|
||||
chown -R socore:socore /nsm/rules/suricata
|
||||
# Download the rules from the internet
|
||||
{%- if GLOBALS.airgap != 'True' %}
|
||||
{%- if proxy %}
|
||||
export http_proxy={{ proxy }}
|
||||
export https_proxy={{ proxy }}
|
||||
export no_proxy=salt['pillar.get']('manager:no_proxy')
|
||||
{%- endif %}
|
||||
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
|
||||
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
|
||||
|
||||
@@ -22,6 +22,7 @@ include:
|
||||
{% endif %}
|
||||
- logstash.config
|
||||
- logstash.sostatus
|
||||
- ssl
|
||||
|
||||
so-logstash:
|
||||
docker_container.running:
|
||||
@@ -90,6 +91,10 @@ so-logstash:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-fleet', 'so-receiver'] %}
|
||||
- x509: etc_elasticfleet_logstash_key
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
{% endif %}
|
||||
- file: lsetcsync
|
||||
{% for assigned_pipeline in LOGSTASH_MERGED.assigned_pipelines.roles[GLOBALS.role.split('-')[1]] %}
|
||||
- file: ls_pipeline_{{assigned_pipeline}}
|
||||
|
||||
@@ -74,9 +74,12 @@ fi
|
||||
so-firewall includehost heavynode "$IP" --apply
|
||||
;;
|
||||
'IDH')
|
||||
so-firewall includehost sensor "$IP" --apply
|
||||
so-firewall includehost idh "$IP" --apply
|
||||
;;
|
||||
'RECEIVER')
|
||||
so-firewall includehost receiver "$IP" --apply
|
||||
;;
|
||||
'WORKSTATION')
|
||||
so-firewall includehost workstation "$IP" --apply
|
||||
;;
|
||||
esac
|
||||
@@ -184,7 +184,7 @@ check_airgap() {
|
||||
is_airgap=0
|
||||
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
||||
AGDOCKER=/tmp/soagupdate/docker
|
||||
AGREPO=/tmp/soagupdate/Packages
|
||||
AGREPO=/tmp/soagupdate/minimal/Packages
|
||||
else
|
||||
is_airgap=1
|
||||
fi
|
||||
@@ -403,8 +403,6 @@ postupgrade_changes() {
|
||||
[[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3
|
||||
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
|
||||
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
|
||||
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
@@ -479,11 +477,23 @@ up_to_2.4.4() {
|
||||
}
|
||||
|
||||
up_to_2.4.5() {
|
||||
update_elastic_agent
|
||||
determine_elastic_agent_upgrade
|
||||
|
||||
INSTALLEDVERSION=2.4.5
|
||||
}
|
||||
|
||||
determine_elastic_agent_upgrade() {
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
update_elastic_agent_airgap
|
||||
else
|
||||
update_elastic_agent
|
||||
fi
|
||||
}
|
||||
|
||||
update_elastic_agent_airgap() {
|
||||
rsync -av /tmp/soagupdate/fleet/* /nsm/elastic-fleet/artifacts/
|
||||
}
|
||||
|
||||
verify_upgradespace() {
|
||||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||||
@@ -521,6 +531,7 @@ update_centos_repo() {
|
||||
echo "Syncing new updates to /nsm/repo"
|
||||
rsync -av $AGREPO/* /nsm/repo/
|
||||
echo "Creating repo"
|
||||
dnf -y install yum-utils createrepo
|
||||
createrepo /nsm/repo
|
||||
}
|
||||
|
||||
|
||||
@@ -3,12 +3,13 @@ NOROOT=1
|
||||
. /usr/sbin/so-common
|
||||
|
||||
{%- set proxy = salt['pillar.get']('manager:proxy') %}
|
||||
{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %}
|
||||
|
||||
# Download the rules from the internet
|
||||
{%- if proxy %}
|
||||
export http_proxy={{ proxy }}
|
||||
export https_proxy={{ proxy }}
|
||||
export no_proxy=salt['pillar.get']('manager:no_proxy')
|
||||
export no_proxy="{{ noproxy }}"
|
||||
{%- endif %}
|
||||
|
||||
repos="/opt/so/conf/strelka/repos.txt"
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% set CUSTOMFQDN = salt['pillar.get']('elasticfleet:config:server:custom_fqdn') %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
|
||||
{% set global_ca_text = [] %}
|
||||
{% set global_ca_server = [] %}
|
||||
@@ -154,7 +154,7 @@ etc_elasticfleet_crt:
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-server.key
|
||||
- CN: {{ GLOBALS.url_base }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }} {% if CUSTOMFQDN != "" %},DNS:{{ CUSTOMFQDN }}{% endif %}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
@@ -211,7 +211,7 @@ etc_elasticfleet_logstash_crt:
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-logstash.key
|
||||
- CN: {{ GLOBALS.url_base }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }} {% if CUSTOMFQDN != "" %},DNS:{{ CUSTOMFQDN }}{% endif %}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
|
||||
@@ -1012,9 +1012,9 @@ whiptail_manager_unreachable() {
|
||||
|
||||
local msg
|
||||
read -r -d '' msg <<- EOM
|
||||
Setup is unable to access the manager at this time.
|
||||
Setup is unable to access the manager. This most likely means that you need to allow this machine to connect through the manager's firewall.
|
||||
|
||||
Run the following on the manager:
|
||||
You can either go to SOC --> Administration --> Configuration and choose the correct firewall option from the list OR you can run the following command on the manager:
|
||||
|
||||
sudo so-firewall-minion --role=$install_type --ip=$MAINIP
|
||||
|
||||
|
||||
Reference in New Issue
Block a user