Merge branch 'dev' into feature/setup

This commit is contained in:
William Wernert
2021-01-14 13:06:32 -05:00
35 changed files with 1759 additions and 112 deletions

View File

@@ -157,9 +157,7 @@ set_version() {
}
require_manager() {
# Check to see if this is a manager
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ] || [ $MANAGERCHECK == 'so-import' ]; then
if is_manager; then
echo "This is a manager, We can proceed."
else
echo "Please run this command on the manager; the manager controls the grid."
@@ -167,12 +165,32 @@ require_manager() {
fi
}
is_manager() {
# Check to see if this is a manager node
role=$(lookup_role)
is_single_node_grid && return 0
[ $role == 'manager' ] && return 0
[ $role == 'managersearch' ] && return 0
[ $role == 'helix' ] && return 0
return 1
}
is_sensor() {
# Check to see if this is a sensor (forward) node
role=$(lookup_role)
is_single_node_grid && return 0
[ $role == 'sensor' ] && return 0
[ $role == 'heavynode' ] && return 0
[ $role == 'helix' ] && return 0
return 1
}
is_single_node_grid() {
role=$(lookup_role)
if [ "$role" != "eval" ] && [ "$role" != "standalone" ] && [ "$role" != "import" ]; then
return 1
fi
return 0
[ $role == 'eval' ] && return 0
[ $role == 'standalone' ] && return 0
[ $role == 'import' ] && return 0
return 1
}
fail() {
@@ -187,6 +205,34 @@ get_random_value() {
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
retry() {
maxAttempts=$1
sleepDelay=$2
cmd=$3
expectedOutput=$4
attempt=0
while [[ $attempt -lt $maxAttempts ]]; do
attempt=$((attempt+1))
info "Executing command with retry support: $cmd"
output=$($cmd)
info "Results: $output"
exitcode=$?
if [ -n "$expectedOutput" ]; then
if [[ "$output" =~ "$expectedOutput" ]]; then
return $exitCode
else
info "Expected '$expectedOutput' but got '$output'"
fi
elif [[ $exitcode -eq 0 ]]; then
return $exitCode
fi
info "Command failed with exit code $exitcode; will retry in $sleepDelay seconds ($attempt / $maxAttempts)..."
sleep $sleepDelay
done
error "Command continues to fail; giving up."
return 1
}
wait_for_apt() {
local progress_callback=$1

View File

@@ -20,10 +20,25 @@ if ! [ "$(id -u)" = 0 ]; then
exit 1
fi
display_help() {
cat <<HELP_USAGE
$0 [-h] [-q|--quiet]
-h Show this message.
-q|--quiet Suppress the output and only return a
single status code for overall status
0:Ok, 1:Error, 2:Starting/Pending, 99:Installing SO
HELP_USAGE
}
# Constants
QUIET=false
EXITCODE=0
SYSTEM_START_TIME=$(date -d "$(</proc/uptime awk '{print $1}') seconds ago" +%s)
# file populated by salt.lasthighstate state at end of successful highstate run
LAST_HIGHSTATE_END=$([ -e "/opt/so/log/salt/lasthighstate" ] && date -r /opt/so/log/salt/lasthighstate +%s || echo 0)
LAST_SOSETUP_LOG=$([ -e "/root/sosetup.log" ] && date -r /root/sosetup.log +%s || echo 0)
HIGHSTATE_RUNNING=$(salt-call --local saltutil.running --out=json | jq -r '.local[].fun' | grep -q 'state.highstate' && echo $?)
ERROR_STRING="ERROR"
SUCCESS_STRING="OK"
@@ -81,7 +96,7 @@ compare_lists() {
create_expected_container_list() {
mapfile -t expected_container_list < <(sort -u /opt/so/conf/so-status/so-status.conf | tr -d "#")
mapfile -t expected_container_list < <(sort -u /opt/so/conf/so-status/so-status.conf | tr -d "#")
}
@@ -111,43 +126,43 @@ populate_container_lists() {
}
parse_status() {
local container_state=${1}
local service_name=${2}
local service_name=${1}
local container_state=${2}
for state in "${GOOD_STATUSES[@]}"; do
[[ $container_state = "$state" ]] && printf $SUCCESS_STRING && return 0
[[ $container_state = "$state" ]] && [[ $QUIET = "false" ]] && printf $SUCCESS_STRING && return 0 || [[ $container_state = "$state" ]] && return 0
done
for state in "${BAD_STATUSES[@]}"; do
[[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]] && printf $DISABLED_STRING && return 0
[[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]] && [[ $QUIET = "false" ]] && printf $DISABLED_STRING && return 0 || [[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]] && return 0
done
# if a highstate has finished running since the system has started
# then the containers should be running so let's check the status
if [ $LAST_HIGHSTATE_END -ge $SYSTEM_START_TIME ]; then
[[ $container_state = "missing" ]] && printf $MISSING_STRING && return 1
[[ $container_state = "missing" ]] && [[ $QUIET = "false" ]] && printf $MISSING_STRING && return 1 || [[ $container_state = "missing" ]] && [[ "$EXITCODE" -lt 2 ]] && EXITCODE=1 && return 1
for state in "${PENDING_STATUSES[@]}"; do
[[ $container_state = "$state" ]] && printf $PENDING_STRING && return 0
[[ $container_state = "$state" ]] && [[ $QUIET = "false" ]] && printf $PENDING_STRING && return 0
done
# This is technically not needed since the default is error state
for state in "${BAD_STATUSES[@]}"; do
[[ $container_state = "$state" ]] && printf $ERROR_STRING && return 1
[[ $container_state = "$state" ]] && [[ $QUIET = "false" ]] && printf $ERROR_STRING && return 1 || [[ $container_state = "$state" ]] && [[ "$EXITCODE" -lt 2 ]] && EXITCODE=1 && return 1
done
printf $ERROR_STRING && return 1
[[ $QUIET = "false" ]] && printf $ERROR_STRING && return 1 || [[ "$EXITCODE" -lt 2 ]] && EXITCODE=1 && return 1
# if a highstate has not run since system start time, but a highstate is currently running
# then show that the containers are STARTING
elif [[ "$HIGHSTATE_RUNNING" == 0 ]]; then
printf $STARTING_STRING && return 0
[[ $QUIET = "false" ]] && printf $STARTING_STRING && return 2 || EXITCODE=2 && return 2
# if a highstate has not finished running since system startup and isn't currently running
# then just show that the containers are WAIT_START; waiting to be started
else
printf $WAIT_START_STRING && return 1
[[ $QUIET = "false" ]] && printf $WAIT_START_STRING && return 2 || EXITCODE=2 && return 2
fi
}
@@ -156,18 +171,22 @@ parse_status() {
print_line() {
local service_name=${1}
local service_state="$( parse_status ${2} ${1} )"
local service_state="$( parse_status ${1} ${2} )"
local columns=$(tput cols)
local state_color="\e[0m"
local PADDING_CONSTANT=15
if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]] || [[ $service_state = "$WAIT_START_STRING" ]]; then
if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]]; then
state_color="\e[1;31m"
if [[ "$EXITCODE" -eq 0 ]]; then
EXITCODE=1
fi
elif [[ $service_state = "$SUCCESS_STRING" ]]; then
state_color="\e[1;32m"
elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]] || [[ $service_state = "$STARTING_STRING" ]]; then
elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]] || [[ $service_state = "$STARTING_STRING" ]] || [[ $service_state = "$WAIT_START_STRING" ]]; then
state_color="\e[1;33m"
EXITCODE=2
fi
printf " $service_name "
@@ -181,7 +200,15 @@ print_line() {
non_term_print_line() {
local service_name=${1}
local service_state="$( parse_status ${2} ${1} )"
local service_state="$( parse_status ${1} ${2} )"
if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]]; then
if [[ "$EXITCODE" -eq 0 ]]; then
EXITCODE=1
fi
elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]] || [[ $service_state = "$STARTING_STRING" ]] || [[ $service_state = "$WAIT_START_STRING" ]]; then
EXITCODE=2
fi
printf " $service_name "
for i in $(seq 0 $(( 35 - ${#service_name} - ${#service_state} ))); do
@@ -218,37 +245,67 @@ main() {
done
printf "\n"
# else if running from a terminal
else
local focus_color="\e[1;34m"
printf "\n"
printf "${focus_color}%b\e[0m" "Checking Docker status\n\n"
if [ "$QUIET" = true ]; then
if [ $SYSTEM_START_TIME -lt $LAST_SOSETUP_LOG ]; then
exit 99
fi
print_or_parse="parse_status"
else
print_or_parse="print_line"
local focus_color="\e[1;34m"
printf "\n"
printf "${focus_color}%b\e[0m" "Checking Docker status\n\n"
fi
systemctl is-active --quiet docker
if [[ $? = 0 ]]; then
print_line "Docker" "running"
${print_or_parse} "Docker" "running"
else
print_line "Docker" "exited"
${print_or_parse} "Docker" "exited"
fi
populate_container_lists
printf "\n"
printf "${focus_color}%b\e[0m" "Checking container statuses\n\n"
if [ "$QUIET" = false ]; then
printf "\n"
printf "${focus_color}%b\e[0m" "Checking container statuses\n\n"
fi
local num_containers=${#container_name_list[@]}
for i in $(seq 0 $(($num_containers - 1 ))); do
print_line ${container_name_list[$i]} ${container_state_list[$i]}
${print_or_parse} ${container_name_list[$i]} ${container_state_list[$i]}
done
printf "\n"
if [ "$QUIET" = false ]; then
printf "\n"
fi
fi
}
# {% endraw %}
while getopts ':hq' OPTION; do
case "$OPTION" in
h)
display_help
exit 0
;;
q)
QUIET=true
;;
\?)
display_help
exit 0
;;
esac
done
main
main
exit $EXITCODE

View File

@@ -47,13 +47,26 @@ if ! docker ps | grep -q so-tcpreplay; then
echo "Replay functionality not enabled; attempting to enable now (may require Internet access)..."
echo
TRUSTED_CONTAINERS=("so-tcpreplay")
mkdir -p /opt/so/log/tcpreplay
update_docker_containers "tcpreplay" "" "" "/opt/so/log/tcpreplay/init.log"
so-tcpreplay-start || fail "Unable to initialize tcpreplay"
if is_manager; then
TRUSTED_CONTAINERS=("so-tcpreplay")
mkdir -p /opt/so/log/tcpreplay
update_docker_containers "tcpreplay" "" "" "/opt/so/log/tcpreplay/init.log"
fi
if is_sensor; then
if ! is_manager; then
echo "Attempting to start replay container. If this fails then you may need to run this command on the manager first."
fi
so-tcpreplay-start || fail "Unable to initialize tcpreplay"
fi
fi
echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface ${REPLAYIFACE}..."
docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i ${REPLAYIFACE} -M${REPLAYSPEED} $@"
if is_sensor; then
echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface ${REPLAYIFACE}..."
docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i ${REPLAYIFACE} -M${REPLAYSPEED} $@"
echo "Replay completed. Warnings shown above are typically expected."
echo "Replay completed. Warnings shown above are typically expected."
elif is_manager; then
echo "The sensor nodes in this grid can now replay traffic."
else
echo "Unable to replay traffic since this node is not a sensor node."
fi

View File

@@ -1,10 +1,12 @@
{%- set interface = salt['pillar.get']('sensor:interface', 'bond0') %}
{%- set diskfreepercentage = salt['pillar.get']('steno:diskfreepercentage', 10) %}
{
"Threads": [
{ "PacketsDirectory": "/nsm/pcap"
, "IndexDirectory": "/nsm/pcapindex"
, "MaxDirectoryFiles": 30000
, "DiskFreePercentage": 10
, "DiskFreePercentage": {{ diskfreepercentage }}
}
]
, "StenotypePath": "/usr/bin/stenotype"
@@ -13,4 +15,4 @@
, "Host": "127.0.0.1"
, "Flags": ["-v", "--uid=stenographer", "--gid=stenographer"{{ BPF_COMPILED }}]
, "CertPath": "/etc/stenographer/certs"
}
}

View File

@@ -1,4 +1,4 @@
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,8 +17,8 @@
{% if 'strelka' in top_states %}
{%- set MANAGER = salt['grains.get']('master') %}
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{% set MANAGER = salt['grains.get']('master') %}
{% set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %}
@@ -47,7 +47,7 @@ strelkasync:
- group: 939
- template: jinja
{%- if STRELKA_RULES == 1 %}
{% if STRELKA_RULES == 1 %}
strelkarules:
file.recurse:
@@ -56,13 +56,15 @@ strelkarules:
- user: 939
- group: 939
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import'] %}
strelkarepos:
file.managed:
- name: /opt/so/saltstack/default/salt/strelka/rules/repos.txt
- source: salt://strelka/rules/repos.txt.jinja
- template: jinja
{%- endif %}
{% endif %}
{% endif %}
strelkadatadir:
file.directory:

View File

@@ -33,20 +33,20 @@ if [ $CHECKIT == 2 ]; then
CURRENTDROP=${RESULT[4]}
PASTDROP=${RESULT[14]}
DROPPED=$(($CURRENTDROP - $PASTDROP))
DROPPED=$((CURRENTDROP - PASTDROP))
if [ $DROPPED == 0 ]; then
LOSS=0
echo "suridrop drop=0"
else
CURRENTPACKETS=${RESULT[9]}
PASTPACKETS=${RESULT[19]}
TOTALCURRENT=$(($CURRENTPACKETS + $CURRENTDROP))
TOTALPAST=$(($PASTPACKETS + $PASTDROP))
TOTAL=$(($TOTALCURRENT - $TOTALPAST))
TOTALCURRENT=$((CURRENTPACKETS + CURRENTDROP))
TOTALPAST=$((PASTPACKETS + PASTDROP))
TOTAL=$((TOTALCURRENT - TOTALPAST))
LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
echo "suridrop drop=$LOSS"
fi
else
echo "suridrop drop=0"
fi
fi

View File

@@ -29,15 +29,22 @@ echo $$ > $lf
ZEEKLOG=$(tac /host/nsm/zeek/logs/packetloss.log | head -2)
declare RESULT=($ZEEKLOG)
CURRENTDROP=${RESULT[3]}
PASTDROP=${RESULT[9]}
DROPPED=$((CURRENTDROP - PASTDROP))
if [ $DROPPED == 0 ]; then
# zeek likely not running if this is true
if [[ $CURRENTDROP == "rcvd:" ]]; then
CURRENTDROP=0
PASTDROP=0
DROPPED=0
else
PASTDROP=${RESULT[9]}
DROPPED=$((CURRENTDROP - PASTDROP))
fi
if [[ "$DROPPED" -le 0 ]]; then
LOSS=0
echo "zeekdrop drop=0"
else
CURRENTPACKETS=${RESULT[5]}
PASTPACKETS=${RESULT[11]}
TOTAL=$((CURRENTPACKETS - PASTPACKETS))
LOSS=$(echo $DROPPED $TOTAL / p | dc)
LOSS=$(echo 4 k $DROPPED $TOTAL / p | dc)
echo "zeekdrop drop=$LOSS"
fi
fi