mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-02-20 14:05:26 +01:00
Fix some files
This commit is contained in:
@@ -1,91 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
{# we only want the script to install the workstation if it is Rocky -#}
|
||||
{% if grains.os == 'Rocky' -%}
|
||||
{# if this is a manager -#}
|
||||
{% if grains.master == grains.id.split('_')|first -%}
|
||||
|
||||
source /usr/sbin/so-common
|
||||
doc_workstation_url="$DOC_BASE_URL/analyst-vm.html"
|
||||
pillar_file="/opt/so/saltstack/local/pillar/minions/{{grains.id}}.sls"
|
||||
|
||||
if [ -f "$pillar_file" ]; then
|
||||
if ! grep -q "^workstation:$" "$pillar_file"; then
|
||||
|
||||
FIRSTPASS=yes
|
||||
while [[ $INSTALL != "yes" ]] && [[ $INSTALL != "no" ]]; do
|
||||
if [[ "$FIRSTPASS" == "yes" ]]; then
|
||||
echo "###########################################"
|
||||
echo "## ** W A R N I N G ** ##"
|
||||
echo "## _______________________________ ##"
|
||||
echo "## ##"
|
||||
echo "## Installing the Security Onion ##"
|
||||
echo "## analyst node on this device will ##"
|
||||
echo "## make permanent changes to ##"
|
||||
echo "## the system. ##"
|
||||
echo "## A system reboot will be required ##"
|
||||
echo "## to complete the install. ##"
|
||||
echo "## ##"
|
||||
echo "###########################################"
|
||||
echo "Do you wish to continue? (Type the entire word 'yes' to proceed or 'no' to exit)"
|
||||
FIRSTPASS=no
|
||||
else
|
||||
echo "Please type 'yes' to continue or 'no' to exit."
|
||||
fi
|
||||
read INSTALL
|
||||
done
|
||||
|
||||
if [[ $INSTALL == "no" ]]; then
|
||||
echo "Exiting analyst node installation."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Add workstation pillar to the minion's pillar file
|
||||
printf '%s\n'\
|
||||
"workstation:"\
|
||||
" gui:"\
|
||||
" enabled: true"\
|
||||
"" >> "$pillar_file"
|
||||
echo "Applying the workstation state. This could take some time since there are many packages that need to be installed."
|
||||
if salt-call state.apply workstation -linfo queue=True; then # make sure the state ran successfully
|
||||
echo ""
|
||||
echo "Analyst workstation has been installed!"
|
||||
echo "Press ENTER to reboot or Ctrl-C to cancel."
|
||||
read pause
|
||||
|
||||
reboot;
|
||||
else
|
||||
echo "There was an issue applying the workstation state. Please review the log above or at /opt/so/log/salt/minion."
|
||||
fi
|
||||
else # workstation is already added
|
||||
echo "The workstation pillar already exists in $pillar_file."
|
||||
echo "To enable/disable the gui, set 'workstation:gui:enabled' to true or false in $pillar_file."
|
||||
echo "Additional documentation can be found at $doc_workstation_url."
|
||||
fi
|
||||
else # if the pillar file doesn't exist
|
||||
echo "Could not find $pillar_file and add the workstation pillar."
|
||||
fi
|
||||
|
||||
{#- if this is not a manager #}
|
||||
{% else -%}
|
||||
|
||||
echo "Since this is not a manager, the pillar values to enable analyst workstation must be set manually. Please view the documentation at $doc_workstation_url."
|
||||
|
||||
{#- endif if this is a manager #}
|
||||
{% endif -%}
|
||||
|
||||
{#- if not Rocky #}
|
||||
{%- else %}
|
||||
|
||||
echo "The Analyst Workstation can only be installed on Rocky. Please view the documentation at $doc_workstation_url."
|
||||
|
||||
{#- endif grains.os == Rocky #}
|
||||
{% endif -%}
|
||||
|
||||
exit 0
|
||||
@@ -1,155 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- set VERSION = salt['pillar.get']('global:soversion') %}
|
||||
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{%- set MANAGERIP = salt['pillar.get']('global:managerip') %}
|
||||
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
INDEX_DATE=$(date +'%Y.%m.%d')
|
||||
RUNID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)
|
||||
LOG_FILE=/nsm/import/evtx-import.log
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
function usage {
|
||||
cat << EOF
|
||||
Usage: $0 <evtx-file-1> [evtx-file-2] [evtx-file-*]
|
||||
|
||||
Imports one or more evtx files into Security Onion. The evtx files will be analyzed and made available for review in the Security Onion toolset.
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
function evtx2es() {
|
||||
EVTX=$1
|
||||
HASH=$2
|
||||
|
||||
docker run --rm \
|
||||
-v "$EVTX:/tmp/data.evtx" \
|
||||
-v "/nsm/import/$HASH/evtx/:/tmp/evtx/" \
|
||||
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
|
||||
-v "/nsm/import/evtx-start_oldest:/tmp/oldest" \
|
||||
--entrypoint "/evtx_calc_timestamps.sh" \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} >> $LOG_FILE 2>&1
|
||||
}
|
||||
|
||||
# if no parameters supplied, display usage
|
||||
if [ $# -eq 0 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ensure this is a Manager node
|
||||
require_manager
|
||||
|
||||
# verify that all parameters are files
|
||||
for i in "$@"; do
|
||||
if ! [ -f "$i" ]; then
|
||||
usage
|
||||
echo "\"$i\" is not a valid file!"
|
||||
exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
# track if we have any valid or invalid evtx
|
||||
INVALID_EVTXS="no"
|
||||
VALID_EVTXS="no"
|
||||
|
||||
# track oldest start and newest end so that we can generate the Kibana search hyperlink at the end
|
||||
START_OLDEST="2050-12-31"
|
||||
END_NEWEST="1971-01-01"
|
||||
|
||||
touch /nsm/import/evtx-start_oldest
|
||||
touch /nsm/import/evtx-end_newest
|
||||
|
||||
echo $START_OLDEST > /nsm/import/evtx-start_oldest
|
||||
echo $END_NEWEST > /nsm/import/evtx-end_newest
|
||||
|
||||
# paths must be quoted in case they include spaces
|
||||
for EVTX in "$@"; do
|
||||
EVTX=$(/usr/bin/realpath "$EVTX")
|
||||
echo "Processing Import: ${EVTX}"
|
||||
|
||||
# generate a unique hash to assist with dedupe checks
|
||||
HASH=$(md5sum "${EVTX}" | awk '{ print $1 }')
|
||||
HASH_DIR=/nsm/import/${HASH}
|
||||
echo "- assigning unique identifier to import: $HASH"
|
||||
|
||||
if [ -d $HASH_DIR ]; then
|
||||
echo "- this EVTX has already been imported; skipping"
|
||||
INVALID_EVTXS="yes"
|
||||
else
|
||||
VALID_EVTXS="yes"
|
||||
|
||||
EVTX_DIR=$HASH_DIR/evtx
|
||||
mkdir -p $EVTX_DIR
|
||||
|
||||
# import evtx and write them to import ingest pipeline
|
||||
echo "- importing logs to Elasticsearch..."
|
||||
evtx2es "${EVTX}" $HASH
|
||||
|
||||
# compare $START to $START_OLDEST
|
||||
START=$(cat /nsm/import/evtx-start_oldest)
|
||||
START_COMPARE=$(date -d $START +%s)
|
||||
START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s)
|
||||
if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then
|
||||
START_OLDEST=$START
|
||||
fi
|
||||
|
||||
# compare $ENDNEXT to $END_NEWEST
|
||||
END=$(cat /nsm/import/evtx-end_newest)
|
||||
ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"`
|
||||
ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s)
|
||||
END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s)
|
||||
if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then
|
||||
END_NEWEST=$ENDNEXT
|
||||
fi
|
||||
|
||||
cp -f "${EVTX}" "${EVTX_DIR}"/data.evtx
|
||||
chmod 644 "${EVTX_DIR}"/data.evtx
|
||||
|
||||
fi # end of valid evtx
|
||||
|
||||
echo
|
||||
|
||||
done # end of for-loop processing evtx files
|
||||
|
||||
# remove temp files
|
||||
echo "Cleaning up:"
|
||||
for TEMP_EVTX in ${TEMP_EVTXS[@]}; do
|
||||
echo "- removing temporary evtx $TEMP_EVTX"
|
||||
rm -f $TEMP_EVTX
|
||||
done
|
||||
|
||||
# output final messages
|
||||
if [ "$INVALID_EVTXS" = "yes" ]; then
|
||||
echo
|
||||
echo "Please note! One or more evtx was invalid! You can scroll up to see which ones were invalid."
|
||||
fi
|
||||
|
||||
START_OLDEST_FORMATTED=`date +%Y-%m-%d --date="$START_OLDEST"`
|
||||
START_OLDEST_SLASH=$(echo $START_OLDEST_FORMATTED | sed -e 's/-/%2F/g')
|
||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||
|
||||
if [ "$VALID_EVTXS" = "yes" ]; then
|
||||
cat << EOF
|
||||
|
||||
Import complete!
|
||||
|
||||
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
|
||||
https://{{ URLBASE }}/#/dashboards?q=import.id:${RUNID}%20%7C%20groupby%20-sankey%20event.dataset%20event.category%2a%20%7C%20groupby%20-pie%20event.category%20%7C%20groupby%20-bar%20event.module%20%7C%20groupby%20event.dataset%20%7C%20groupby%20event.module%20%7C%20groupby%20event.category%20%7C%20groupby%20observer.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC
|
||||
|
||||
or you can manually set your Time Range to be (in UTC):
|
||||
From: $START_OLDEST_FORMATTED To: $END_NEWEST
|
||||
|
||||
Please note that it may take 30 seconds or more for events to appear in Security Onion Console.
|
||||
EOF
|
||||
fi
|
||||
@@ -1,215 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- set VERSION = salt['pillar.get']('global:soversion') %}
|
||||
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{%- set MANAGERIP = salt['pillar.get']('global:managerip') %}
|
||||
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
function usage {
|
||||
cat << EOF
|
||||
Usage: $0 <pcap-file-1> [pcap-file-2] [pcap-file-N]
|
||||
|
||||
Imports one or more PCAP files onto a sensor node. The PCAP traffic will be analyzed and made available for review in the Security Onion toolset.
|
||||
EOF
|
||||
}
|
||||
|
||||
function pcapinfo() {
|
||||
PCAP=$1
|
||||
ARGS=$2
|
||||
docker run --rm -v "$PCAP:/input.pcap" --entrypoint capinfos {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap $ARGS
|
||||
}
|
||||
|
||||
function pcapfix() {
|
||||
PCAP=$1
|
||||
PCAP_OUT=$2
|
||||
docker run --rm -v "$PCAP:/input.pcap" -v $PCAP_OUT:$PCAP_OUT --entrypoint pcapfix {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} /input.pcap -o $PCAP_OUT > /dev/null 2>&1
|
||||
}
|
||||
|
||||
function suricata() {
|
||||
PCAP=$1
|
||||
HASH=$2
|
||||
|
||||
NSM_PATH=/nsm/import/${HASH}/suricata
|
||||
mkdir -p $NSM_PATH
|
||||
chown suricata:socore $NSM_PATH
|
||||
LOG_PATH=/opt/so/log/suricata/import/${HASH}
|
||||
mkdir -p $LOG_PATH
|
||||
chown suricata:suricata $LOG_PATH
|
||||
docker run --rm \
|
||||
-v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \
|
||||
-v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \
|
||||
-v /opt/so/conf/suricata/rules:/etc/suricata/rules:ro \
|
||||
-v ${LOG_PATH}:/var/log/suricata/:rw \
|
||||
-v ${NSM_PATH}/:/nsm/:rw \
|
||||
-v "$PCAP:/input.pcap:ro" \
|
||||
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
|
||||
--runmode single -k none -r /input.pcap > $LOG_PATH/console.log 2>&1
|
||||
}
|
||||
|
||||
function zeek() {
|
||||
PCAP=$1
|
||||
HASH=$2
|
||||
|
||||
NSM_PATH=/nsm/import/${HASH}/zeek
|
||||
mkdir -p $NSM_PATH/logs
|
||||
mkdir -p $NSM_PATH/extracted
|
||||
mkdir -p $NSM_PATH/spool
|
||||
chown -R zeek:socore $NSM_PATH
|
||||
docker run --rm \
|
||||
-v $NSM_PATH/logs:/nsm/zeek/logs:rw \
|
||||
-v $NSM_PATH/spool:/nsm/zeek/spool:rw \
|
||||
-v $NSM_PATH/extracted:/nsm/zeek/extracted:rw \
|
||||
-v "$PCAP:/input.pcap:ro" \
|
||||
-v /opt/so/conf/zeek/local.zeek:/opt/zeek/share/zeek/site/local.zeek:ro \
|
||||
-v /opt/so/conf/zeek/node.cfg:/opt/zeek/etc/node.cfg:ro \
|
||||
-v /opt/so/conf/zeek/zeekctl.cfg:/opt/zeek/etc/zeekctl.cfg:ro \
|
||||
-v /opt/so/conf/zeek/policy/securityonion:/opt/zeek/share/zeek/policy/securityonion:ro \
|
||||
-v /opt/so/conf/zeek/policy/custom:/opt/zeek/share/zeek/policy/custom:ro \
|
||||
-v /opt/so/conf/zeek/policy/cve-2020-0601:/opt/zeek/share/zeek/policy/cve-2020-0601:ro \
|
||||
-v /opt/so/conf/zeek/policy/intel:/opt/zeek/share/zeek/policy/intel:rw \
|
||||
-v /opt/so/conf/zeek/bpf:/opt/zeek/etc/bpf:ro \
|
||||
--entrypoint /opt/zeek/bin/zeek \
|
||||
-w /nsm/zeek/logs \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-zeek:{{ VERSION }} \
|
||||
-C -r /input.pcap local > $NSM_PATH/logs/console.log 2>&1
|
||||
}
|
||||
|
||||
# if no parameters supplied, display usage
|
||||
if [ $# -eq 0 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ensure this is a sensor node
|
||||
if [ ! -d /opt/so/conf/suricata ]; then
|
||||
echo "This command must be run on a sensor node."
|
||||
exit 3
|
||||
fi
|
||||
|
||||
# verify that all parameters are files
|
||||
for i in "$@"; do
|
||||
if ! [ -f "$i" ]; then
|
||||
usage
|
||||
echo "\"$i\" is not a valid file!"
|
||||
exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
# track if we have any valid or invalid pcaps
|
||||
INVALID_PCAPS="no"
|
||||
VALID_PCAPS="no"
|
||||
|
||||
# track oldest start and newest end so that we can generate the Kibana search hyperlink at the end
|
||||
START_OLDEST="2050-12-31"
|
||||
END_NEWEST="1971-01-01"
|
||||
|
||||
# paths must be quoted in case they include spaces
|
||||
for PCAP in "$@"; do
|
||||
PCAP=$(/usr/bin/realpath "$PCAP")
|
||||
echo "Processing Import: ${PCAP}"
|
||||
echo "- verifying file"
|
||||
if ! pcapinfo "${PCAP}" > /dev/null 2>&1; then
|
||||
# try to fix pcap and then process the fixed pcap directly
|
||||
PCAP_FIXED=`mktemp /tmp/so-import-pcap-XXXXXXXXXX.pcap`
|
||||
echo "- attempting to recover corrupted PCAP file"
|
||||
pcapfix "${PCAP}" "${PCAP_FIXED}"
|
||||
# Make fixed file world readable since the Suricata docker container will runas a non-root user
|
||||
chmod a+r "${PCAP_FIXED}"
|
||||
PCAP="${PCAP_FIXED}"
|
||||
TEMP_PCAPS+=(${PCAP_FIXED})
|
||||
fi
|
||||
|
||||
# generate a unique hash to assist with dedupe checks
|
||||
HASH=$(md5sum "${PCAP}" | awk '{ print $1 }')
|
||||
HASH_DIR=/nsm/import/${HASH}
|
||||
echo "- assigning unique identifier to import: $HASH"
|
||||
|
||||
if [ -d $HASH_DIR ]; then
|
||||
echo "- this PCAP has already been imported; skipping"
|
||||
INVALID_PCAPS="yes"
|
||||
elif pcapinfo "${PCAP}" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then
|
||||
echo "- this PCAP file is invalid; skipping"
|
||||
INVALID_PCAPS="yes"
|
||||
else
|
||||
VALID_PCAPS="yes"
|
||||
|
||||
PCAP_DIR=$HASH_DIR/pcap
|
||||
mkdir -p $PCAP_DIR
|
||||
|
||||
# generate IDS alerts and write them to standard pipeline
|
||||
echo "- analyzing traffic with Suricata"
|
||||
suricata "${PCAP}" $HASH
|
||||
{% if salt['pillar.get']('global:mdengine') == 'ZEEK' %}
|
||||
# generate Zeek logs and write them to a unique subdirectory in /nsm/import/zeek/
|
||||
# since each run writes to a unique subdirectory, there is no need for a lock file
|
||||
echo "- analyzing traffic with Zeek"
|
||||
zeek "${PCAP}" $HASH
|
||||
{% endif %}
|
||||
|
||||
START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
|
||||
END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
|
||||
echo "- saving PCAP data spanning dates $START through $END"
|
||||
|
||||
# compare $START to $START_OLDEST
|
||||
START_COMPARE=$(date -d $START +%s)
|
||||
START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s)
|
||||
if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then
|
||||
START_OLDEST=$START
|
||||
fi
|
||||
|
||||
# compare $ENDNEXT to $END_NEWEST
|
||||
ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"`
|
||||
ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s)
|
||||
END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s)
|
||||
if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then
|
||||
END_NEWEST=$ENDNEXT
|
||||
fi
|
||||
|
||||
cp -f "${PCAP}" "${PCAP_DIR}"/data.pcap
|
||||
chmod 644 "${PCAP_DIR}"/data.pcap
|
||||
|
||||
fi # end of valid pcap
|
||||
|
||||
echo
|
||||
|
||||
done # end of for-loop processing pcap files
|
||||
|
||||
# remove temp files
|
||||
echo "Cleaning up:"
|
||||
for TEMP_PCAP in ${TEMP_PCAPS[@]}; do
|
||||
echo "- removing temporary pcap $TEMP_PCAP"
|
||||
rm -f $TEMP_PCAP
|
||||
done
|
||||
|
||||
# output final messages
|
||||
if [ "$INVALID_PCAPS" = "yes" ]; then
|
||||
echo
|
||||
echo "Please note! One or more pcaps was invalid! You can scroll up to see which ones were invalid."
|
||||
fi
|
||||
|
||||
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
|
||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||
|
||||
if [ "$VALID_PCAPS" = "yes" ]; then
|
||||
cat << EOF
|
||||
|
||||
Import complete!
|
||||
|
||||
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
|
||||
https://{{ URLBASE }}/#/dashboards?q=import.id:${HASH}%20%7C%20groupby%20-sankey%20event.dataset%20event.category%2a%20%7C%20groupby%20-pie%20event.category%20%7C%20groupby%20-bar%20event.module%20%7C%20groupby%20event.dataset%20%7C%20groupby%20event.module%20%7C%20groupby%20event.category%20%7C%20groupby%20observer.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC
|
||||
|
||||
or you can manually set your Time Range to be (in UTC):
|
||||
From: $START_OLDEST To: $END_NEWEST
|
||||
|
||||
Please note that it may take 30 seconds or more for events to appear in Security Onion Console.
|
||||
EOF
|
||||
fi
|
||||
@@ -1,109 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
appliance_check() {
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
APPLIANCE=1
|
||||
{%- if grains['sosmodel'] in ['SO2AMI01', 'SO2GCI01', 'SO2AZI01'] %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
DUDEYOUGOTADELL=$(dmidecode |grep Dell)
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
APPTYPE=dell
|
||||
else
|
||||
APPTYPE=sm
|
||||
fi
|
||||
mkdir -p /opt/so/log/raid
|
||||
|
||||
{%- else %}
|
||||
echo "This is not an appliance"
|
||||
exit 0
|
||||
{%- endif %}
|
||||
}
|
||||
|
||||
check_nsm_raid() {
|
||||
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
|
||||
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
|
||||
|
||||
if [[ $APPLIANCE == '1' ]]; then
|
||||
if [[ -n $PERCCLI ]]; then
|
||||
HWRAID=0
|
||||
elif [[ -n $MEGACTL ]]; then
|
||||
HWRAID=0
|
||||
else
|
||||
HWRAID=1
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
check_boss_raid() {
|
||||
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
|
||||
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ -n $MVCLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_software_raid() {
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
SWRC=$(grep "_" /proc/mdstat)
|
||||
|
||||
if [[ -n $SWRC ]]; then
|
||||
# RAID is failed in some way
|
||||
SWRAID=1
|
||||
else
|
||||
SWRAID=0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# This script checks raid status if you use SO appliances
|
||||
|
||||
# See if this is an appliance
|
||||
|
||||
appliance_check
|
||||
check_nsm_raid
|
||||
check_boss_raid
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
|
||||
check_software_raid
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
if [[ -n $SWRAID ]]; then
|
||||
if [[ $SWRAID == '0' && $BOSSRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ $BOSSRAID == '0' && $HWRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ "$APPTYPE" == 'sm' ]]; then
|
||||
if [[ -n "$HWRAID" ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "nsmraid=$RAIDSTATUS" > /opt/so/log/raid/status.log
|
||||
|
||||
|
||||
Reference in New Issue
Block a user