mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Merge branch 'dev' into kilo
This commit is contained in:
172
salt/common/tools/sbin/so-import-evtx
Normal file
172
salt/common/tools/sbin/so-import-evtx
Normal file
@@ -0,0 +1,172 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- set VERSION = salt['pillar.get']('global:soversion') %}
|
||||
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%}
|
||||
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
|
||||
{% set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{% set ES_PW = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
INDEX_DATE=$(date +'%Y.%m.%d')
|
||||
RUNID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
function usage {
|
||||
cat << EOF
|
||||
Usage: $0 <evtx-file-1> [evtx-file-2] [evtx-file-*]
|
||||
|
||||
Imports one or more evtx files into Security Onion. The evtx files will be analyzed and made available for review in the Security Onion toolset.
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
function evtx2es() {
|
||||
EVTX=$1
|
||||
HASH=$2
|
||||
|
||||
docker run --rm \
|
||||
-v "$EVTX:/tmp/$RUNID.evtx" \
|
||||
--entrypoint evtx2es \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} \
|
||||
--host {{ MANAGERIP }} --scheme https \
|
||||
--index so-beats-$INDEX_DATE --pipeline import.wel \
|
||||
--login {{ES_USER}} --pwd {{ES_PW}} \
|
||||
"/tmp/$RUNID.evtx" 1>/dev/null 2>/dev/null
|
||||
|
||||
docker run --rm \
|
||||
-v "$EVTX:/tmp/import.evtx" \
|
||||
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
|
||||
-v "/nsm/import/evtx-start_oldest:/tmp/oldest" \
|
||||
--entrypoint '/evtx_calc_timestamps.sh' \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}
|
||||
}
|
||||
|
||||
# if no parameters supplied, display usage
|
||||
if [ $# -eq 0 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ensure this is a Manager node
|
||||
require_manager
|
||||
|
||||
# verify that all parameters are files
|
||||
for i in "$@"; do
|
||||
if ! [ -f "$i" ]; then
|
||||
usage
|
||||
echo "\"$i\" is not a valid file!"
|
||||
exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
# track if we have any valid or invalid evtx
|
||||
INVALID_EVTXS="no"
|
||||
VALID_EVTXS="no"
|
||||
|
||||
# track oldest start and newest end so that we can generate the Kibana search hyperlink at the end
|
||||
START_OLDEST="2050-12-31"
|
||||
END_NEWEST="1971-01-01"
|
||||
|
||||
touch /nsm/import/evtx-start_oldest
|
||||
touch /nsm/import/evtx-end_newest
|
||||
|
||||
echo $START_OLDEST > /nsm/import/evtx-start_oldest
|
||||
echo $END_NEWEST > /nsm/import/evtx-end_newest
|
||||
|
||||
# paths must be quoted in case they include spaces
|
||||
for EVTX in "$@"; do
|
||||
EVTX=$(/usr/bin/realpath "$EVTX")
|
||||
echo "Processing Import: ${EVTX}"
|
||||
|
||||
# generate a unique hash to assist with dedupe checks
|
||||
HASH=$(md5sum "${EVTX}" | awk '{ print $1 }')
|
||||
HASH_DIR=/nsm/import/${HASH}
|
||||
echo "- assigning unique identifier to import: $HASH"
|
||||
|
||||
if [ -d $HASH_DIR ]; then
|
||||
echo "- this EVTX has already been imported; skipping"
|
||||
INVALID_EVTXS="yes"
|
||||
else
|
||||
VALID_EVTXS="yes"
|
||||
|
||||
EVTX_DIR=$HASH_DIR/evtx
|
||||
mkdir -p $EVTX_DIR
|
||||
|
||||
# import evtx and write them to import ingest pipeline
|
||||
echo "- importing logs to Elasticsearch..."
|
||||
evtx2es "${EVTX}" $HASH
|
||||
|
||||
# compare $START to $START_OLDEST
|
||||
START=$(cat /nsm/import/evtx-start_oldest)
|
||||
START_COMPARE=$(date -d $START +%s)
|
||||
START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s)
|
||||
if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then
|
||||
START_OLDEST=$START
|
||||
fi
|
||||
|
||||
# compare $ENDNEXT to $END_NEWEST
|
||||
END=$(cat /nsm/import/evtx-end_newest)
|
||||
ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"`
|
||||
ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s)
|
||||
END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s)
|
||||
if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then
|
||||
END_NEWEST=$ENDNEXT
|
||||
fi
|
||||
|
||||
cp -f "${EVTX}" "${EVTX_DIR}"/data.evtx
|
||||
chmod 644 "${EVTX_DIR}"/data.evtx
|
||||
|
||||
fi # end of valid evtx
|
||||
|
||||
echo
|
||||
|
||||
done # end of for-loop processing evtx files
|
||||
|
||||
# remove temp files
|
||||
echo "Cleaning up:"
|
||||
for TEMP_EVTX in ${TEMP_EVTXS[@]}; do
|
||||
echo "- removing temporary evtx $TEMP_EVTX"
|
||||
rm -f $TEMP_EVTX
|
||||
done
|
||||
|
||||
# output final messages
|
||||
if [ "$INVALID_EVTXS" = "yes" ]; then
|
||||
echo
|
||||
echo "Please note! One or more evtx was invalid! You can scroll up to see which ones were invalid."
|
||||
fi
|
||||
|
||||
START_OLDEST_FORMATTED=`date +%Y-%m-%d --date="$START_OLDEST"`
|
||||
START_OLDEST_SLASH=$(echo $START_OLDEST_FORMATTED | sed -e 's/-/%2F/g')
|
||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||
|
||||
if [ "$VALID_EVTXS" = "yes" ]; then
|
||||
cat << EOF
|
||||
|
||||
Import complete!
|
||||
|
||||
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
|
||||
https://{{ URLBASE }}/#/hunt?q=import.id:${RUNID}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC
|
||||
|
||||
or you can manually set your Time Range to be (in UTC):
|
||||
From: $START_OLDEST_FORMATTED To: $END_NEWEST
|
||||
|
||||
Please note that it may take 30 seconds or more for events to appear in Hunt.
|
||||
EOF
|
||||
fi
|
||||
@@ -20,6 +20,9 @@
|
||||
appliance_check() {
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
APPLIANCE=1
|
||||
{%- if grains['sosmodel'] in ['SO2AMI01', 'SO2GCI01', 'SO2AZI01'] %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
DUDEYOUGOTADELL=$(dmidecode |grep Dell)
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
APPTYPE=dell
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#!/bin/bash
|
||||
got_root() {
|
||||
|
||||
# Make sure you are root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
. /usr/sbin/so-common
|
||||
|
||||
}
|
||||
argstr=""
|
||||
for arg in "$@"; do
|
||||
argstr="${argstr} \"${arg}\""
|
||||
done
|
||||
|
||||
got_root
|
||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat $1"
|
||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"
|
||||
|
||||
@@ -385,6 +385,7 @@ preupgrade_changes() {
|
||||
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
|
||||
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
|
||||
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50
|
||||
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70]] && up_2.3.5X_to_2.3.80
|
||||
true
|
||||
}
|
||||
|
||||
@@ -622,6 +623,20 @@ EOF
|
||||
INSTALLEDVERSION=2.3.50
|
||||
}
|
||||
|
||||
up_2.3.5X_to_2.3.80() {
|
||||
|
||||
# Remove watermark settings from global.sls
|
||||
sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
# Add new indices to the global
|
||||
sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
INSTALLEDVERSION=2.3.80
|
||||
}
|
||||
|
||||
verify_upgradespace() {
|
||||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
{ "remove": { "field": ["event.created","timestamp", "winlog.event_data.UtcTime", "event_record_id"], "ignore_failure": true } },
|
||||
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
|
||||
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
|
||||
{ "dissect": { "field": "log.file.name", "pattern" : "/tmp/%{import.id}.evtx" } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
@@ -1,3 +1,4 @@
|
||||
{%- set INDEX_SORTING = salt['pillar.get']('elasticsearch:index_sorting', True) %}
|
||||
{
|
||||
"index_patterns": ["so-*"],
|
||||
"version":50001,
|
||||
@@ -8,8 +9,10 @@
|
||||
"index.refresh_interval":"30s",
|
||||
"index.routing.allocation.require.box_type":"hot",
|
||||
"index.mapping.total_fields.limit": "1500",
|
||||
{%- if INDEX_SORTING is sameas true %}
|
||||
"index.sort.field": "@timestamp",
|
||||
"index.sort.order": "desc",
|
||||
{%- endif %}
|
||||
"analysis": {
|
||||
"analyzer": {
|
||||
"es_security_analyzer": {
|
||||
|
||||
@@ -3,9 +3,11 @@
|
||||
{%- set ES = salt['pillar.get']('manager:mainip', '') -%}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
|
||||
server.name: kibana
|
||||
server.host: "0"
|
||||
server.basePath: /kibana
|
||||
server.publicBaseUrl: https://{{ URLBASE }}/kibana
|
||||
elasticsearch.hosts: [ "https://{{ ES }}:9200" ]
|
||||
elasticsearch.ssl.verificationMode: none
|
||||
#kibana.index: ".kibana"
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"::software": ["soc_timestamp", "source.ip", "software.name", "software.type" ],
|
||||
"::ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssh.version", "ssh.hassh_version", "ssh.direction", "ssh.client", "ssh.server", "log.id.uid" ],
|
||||
"::ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssl.server_name", "ssl.certificate.subject", "ssl.validation_status", "ssl.version", "log.id.uid" ],
|
||||
"::syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ],
|
||||
":zeek:syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ],
|
||||
"::tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ],
|
||||
"::weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "weird.name", "log.id.uid" ],
|
||||
"::x509": ["soc_timestamp", "x509.certificate.subject", "x509.certificate.key.type", "x509.certificate.key.length", "x509.certificate.issuer", "log.id.fuid" ],
|
||||
@@ -44,5 +44,9 @@
|
||||
":elasticsearch:": ["soc_timestamp", "agent.name", "message", "log.level", "metadata.version", "metadata.pipeline", "event.dataset" ],
|
||||
":kibana:": ["soc_timestamp", "host.name", "message", "kibana.log.meta.req.headers.x-real-ip", "event.dataset" ],
|
||||
"::rootcheck": ["soc_timestamp", "host.name", "metadata.ip_address", "log.full", "event.dataset", "event.module" ],
|
||||
"::syscollector": ["soc_timestamp", "host.name", "metadata.ip_address", "wazuh.data.type", "event.dataset", "event.module" ]
|
||||
"::ossec": ["soc_timestamp", "host.name", "metadata.ip_address", "log.full", "event.dataset", "event.module" ],
|
||||
"::syscollector": ["soc_timestamp", "host.name", "metadata.ip_address", "wazuh.data.type", "log.full", "event.dataset", "event.module" ],
|
||||
":syslog:syslog": ["soc_timestamp", "host.name", "metadata.ip_address", "real_message", "syslog.priority", "syslog.application" ],
|
||||
":aws:": ["soc_timestamp", "aws.cloudtrail.event_category", "aws.cloudtrail.event_type", "event.provider", "event.action", "event.outcome", "cloud.region", "user.name", "source.ip", "source.geo.region_iso_code" ],
|
||||
":squid:": ["soc_timestamp", "url.original", "destination.ip", "destination.geo.country_iso_code", "user.name", "source.ip" ]
|
||||
}
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
{%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
|
||||
{%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
||||
{%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
|
||||
{%- set OSQUERY = salt['pillar.get']('manager:osquery', '0') %}
|
||||
{% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %}
|
||||
{% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %}
|
||||
{%- set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
{%- set API_TIMEOUT = salt['pillar.get']('sensoroni:api_timeout_ms', 0) %}
|
||||
@@ -116,7 +117,7 @@
|
||||
{%- if THEHIVE == 0 %}
|
||||
"toolTheHive",
|
||||
{%- endif %}
|
||||
{%- if OSQUERY == 0 %}
|
||||
{%- if not FLEETMANAGER and not FLEETNODE %}
|
||||
"toolFleet",
|
||||
{%- endif %}
|
||||
{%- if GRAFANA == 0 %}
|
||||
|
||||
@@ -1867,8 +1867,9 @@ whiptail_you_sure() {
|
||||
read -r -d '' you_sure_text <<- EOM
|
||||
Welcome to Security Onion Setup!
|
||||
|
||||
You can use Setup for lots of different use cases from a small standalone installation to a large distributed deployment for your enterprise.
|
||||
|
||||
You can use Setup for lots of different use cases from a small standalone installation to a large distributed deployment for your enterprise. Don't forget to review the documentation at:
|
||||
https://docs.securityonion.net
|
||||
|
||||
Setup uses keyboard navigation and you can use arrow keys to move around. Certain screens may provide a list and ask you to select one or more items from that list. You can use [SPACE] to select items and [ENTER] to proceed to the next screen.
|
||||
|
||||
Would you like to continue?
|
||||
|
||||
Reference in New Issue
Block a user