This commit is contained in:
m0duspwnens
2021-01-07 15:12:36 -05:00
12 changed files with 156 additions and 55 deletions

View File

@@ -20,10 +20,25 @@ if ! [ "$(id -u)" = 0 ]; then
exit 1
fi
display_help() {
cat <<HELP_USAGE
$0 [-h] [-q|--quiet]
-h Show this message.
-q|--quiet Suppress the output and only return a
single status code for overall status
0:Ok, 1:Error, 2:Starting/Pending, 99:Installing SO
HELP_USAGE
}
# Constants
QUIET=false
EXITCODE=0
SYSTEM_START_TIME=$(date -d "$(</proc/uptime awk '{print $1}') seconds ago" +%s)
# file populated by salt.lasthighstate state at end of successful highstate run
LAST_HIGHSTATE_END=$([ -e "/opt/so/log/salt/lasthighstate" ] && date -r /opt/so/log/salt/lasthighstate +%s || echo 0)
LAST_SOSETUP_LOG=$([ -e "/root/sosetup.log" ] && date -r /root/sosetup.log +%s || echo 0)
HIGHSTATE_RUNNING=$(salt-call --local saltutil.running --out=json | jq -r '.local[].fun' | grep -q 'state.highstate' && echo $?)
ERROR_STRING="ERROR"
SUCCESS_STRING="OK"
@@ -81,7 +96,7 @@ compare_lists() {
create_expected_container_list() {
mapfile -t expected_container_list < <(sort -u /opt/so/conf/so-status/so-status.conf | tr -d "#")
mapfile -t expected_container_list < <(sort -u /opt/so/conf/so-status/so-status.conf | tr -d "#")
}
@@ -111,43 +126,43 @@ populate_container_lists() {
}
parse_status() {
local container_state=${1}
local service_name=${2}
local service_name=${1}
local container_state=${2}
for state in "${GOOD_STATUSES[@]}"; do
[[ $container_state = "$state" ]] && printf $SUCCESS_STRING && return 0
[[ $container_state = "$state" ]] && [[ $QUIET = "false" ]] && printf $SUCCESS_STRING && return 0 || [[ $container_state = "$state" ]] && return 0
done
for state in "${BAD_STATUSES[@]}"; do
[[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]] && printf $DISABLED_STRING && return 0
[[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]] && [[ $QUIET = "false" ]] && printf $DISABLED_STRING && return 0 || [[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]] && return 0
done
# if a highstate has finished running since the system has started
# then the containers should be running so let's check the status
if [ $LAST_HIGHSTATE_END -ge $SYSTEM_START_TIME ]; then
[[ $container_state = "missing" ]] && printf $MISSING_STRING && return 1
[[ $container_state = "missing" ]] && [[ $QUIET = "false" ]] && printf $MISSING_STRING && return 1 || [[ $container_state = "missing" ]] && [[ "$EXITCODE" -lt 2 ]] && EXITCODE=1 && return 1
for state in "${PENDING_STATUSES[@]}"; do
[[ $container_state = "$state" ]] && printf $PENDING_STRING && return 0
[[ $container_state = "$state" ]] && [[ $QUIET = "false" ]] && printf $PENDING_STRING && return 0
done
# This is technically not needed since the default is error state
for state in "${BAD_STATUSES[@]}"; do
[[ $container_state = "$state" ]] && printf $ERROR_STRING && return 1
[[ $container_state = "$state" ]] && [[ $QUIET = "false" ]] && printf $ERROR_STRING && return 1 || [[ $container_state = "$state" ]] && [[ "$EXITCODE" -lt 2 ]] && EXITCODE=1 && return 1
done
printf $ERROR_STRING && return 1
[[ $QUIET = "false" ]] && printf $ERROR_STRING && return 1 || [[ "$EXITCODE" -lt 2 ]] && EXITCODE=1 && return 1
# if a highstate has not run since system start time, but a highstate is currently running
# then show that the containers are STARTING
elif [[ "$HIGHSTATE_RUNNING" == 0 ]]; then
printf $STARTING_STRING && return 0
[[ $QUIET = "false" ]] && printf $STARTING_STRING && return 2 || EXITCODE=2 && return 2
# if a highstate has not finished running since system startup and isn't currently running
# then just show that the containers are WAIT_START; waiting to be started
else
printf $WAIT_START_STRING && return 1
[[ $QUIET = "false" ]] && printf $WAIT_START_STRING && return 2 || EXITCODE=2 && return 2
fi
}
@@ -156,18 +171,22 @@ parse_status() {
print_line() {
local service_name=${1}
local service_state="$( parse_status ${2} ${1} )"
local service_state="$( parse_status ${1} ${2} )"
local columns=$(tput cols)
local state_color="\e[0m"
local PADDING_CONSTANT=15
if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]] || [[ $service_state = "$WAIT_START_STRING" ]]; then
if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]]; then
state_color="\e[1;31m"
if [[ "$EXITCODE" -eq 0 ]]; then
EXITCODE=1
fi
elif [[ $service_state = "$SUCCESS_STRING" ]]; then
state_color="\e[1;32m"
elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]] || [[ $service_state = "$STARTING_STRING" ]]; then
elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]] || [[ $service_state = "$STARTING_STRING" ]] || [[ $service_state = "$WAIT_START_STRING" ]]; then
state_color="\e[1;33m"
EXITCODE=2
fi
printf " $service_name "
@@ -181,7 +200,15 @@ print_line() {
non_term_print_line() {
local service_name=${1}
local service_state="$( parse_status ${2} ${1} )"
local service_state="$( parse_status ${1} ${2} )"
if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]]; then
if [[ "$EXITCODE" -eq 0 ]]; then
EXITCODE=1
fi
elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]] || [[ $service_state = "$STARTING_STRING" ]] || [[ $service_state = "$WAIT_START_STRING" ]]; then
EXITCODE=2
fi
printf " $service_name "
for i in $(seq 0 $(( 35 - ${#service_name} - ${#service_state} ))); do
@@ -218,37 +245,67 @@ main() {
done
printf "\n"
# else if running from a terminal
else
local focus_color="\e[1;34m"
printf "\n"
printf "${focus_color}%b\e[0m" "Checking Docker status\n\n"
if [ "$QUIET" = true ]; then
if [ $SYSTEM_START_TIME -lt $LAST_SOSETUP_LOG ]; then
exit 99
fi
print_or_parse="parse_status"
else
print_or_parse="print_line"
local focus_color="\e[1;34m"
printf "\n"
printf "${focus_color}%b\e[0m" "Checking Docker status\n\n"
fi
systemctl is-active --quiet docker
if [[ $? = 0 ]]; then
print_line "Docker" "running"
${print_or_parse} "Docker" "running"
else
print_line "Docker" "exited"
${print_or_parse} "Docker" "exited"
fi
populate_container_lists
printf "\n"
printf "${focus_color}%b\e[0m" "Checking container statuses\n\n"
if [ "$QUIET" = false ]; then
printf "\n"
printf "${focus_color}%b\e[0m" "Checking container statuses\n\n"
fi
local num_containers=${#container_name_list[@]}
for i in $(seq 0 $(($num_containers - 1 ))); do
print_line ${container_name_list[$i]} ${container_state_list[$i]}
${print_or_parse} ${container_name_list[$i]} ${container_state_list[$i]}
done
printf "\n"
if [ "$QUIET" = false ]; then
printf "\n"
fi
fi
}
# {% endraw %}
while getopts ':hq' OPTION; do
case "$OPTION" in
h)
display_help
exit 0
;;
q)
QUIET=true
;;
\?)
display_help
exit 0
;;
esac
done
main
main
exit $EXITCODE

View File

@@ -4,6 +4,8 @@
{ "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } },
{ "set": { "if": "ctx.winlog?.channel != null", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } },
{ "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } },
{ "rename": { "if": "ctx.winlog?.systemTime != null", "field": "@timestamp", "target_field": "ingest.timestamp", "ignore_missing": true } },
{ "set": { "if": "ctx.winlog?.systemTime != null", "field": "@timestamp", "value": "{{winlog.systemTime}}", "override": true } },
{ "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } },
{ "set": { "field": "event.category", "value": "host", "override": true } },
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_failure": true, "ignore_missing": true } },

View File

@@ -96,6 +96,16 @@ wazuhmgrwhitelist:
- mode: 755
- template: jinja
# Reserve OS port for Wazuh API
wazuhreserveport:
cmd.run:
- name: grep -q 55000 /proc/sys/net/ipv4/ip_local_reserved_ports || sysctl -w net.ipv4.ip_local_reserved_ports="55000" > /dev/null && echo "55000" >> /proc/sys/net/ipv4/ip_local_reserved_ports
# Check to see if Wazuh API port is available
wazuhportavailable:
cmd.run:
- name: netstat -anp | grep 55000 | grep -qv docker && PROCESS=$(netstat -anp | grep 55000 | awk '{print $NF}' | uniq) && echo "Another process ($PROCESS) appears to be using port 55000. Please terminate this process, or reboot to ensure a clean state so that the Wazuh API can start properly." && exit 1 || exit 0
so-wazuh:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-wazuh:{{ VERSION }}
@@ -158,4 +168,4 @@ wazuh_state_not_allowed:
test.fail_without_changes:
- name: wazuh_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -97,8 +97,6 @@ airgap_rules() {
# Don't leave Strelka out
cp -Rv /root/SecurityOnion/agrules/strelka /nsm/repo/rules/
}
analyze_system() {
@@ -116,13 +114,11 @@ accept_salt_key_remote() {
echo "Accept the key remotely on the manager" >> "$setup_log" 2>&1
# Delete the key just in case.
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -d "$MINION_ID" -y
salt-call state.apply ca >> /dev/null 2>&1
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -a "$MINION_ID" -y
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo salt-key -a "$MINION_ID" -y
}
add_admin_user() {
# Add an admin user with full sudo rights if this is an ISO install.
{
@@ -558,7 +554,7 @@ check_requirements() {
}
compare_versions() {
manager_ver=$(ssh -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion)
manager_ver=$("$sshcmd" -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion)
if [[ $manager_ver == "" ]]; then
rm /root/install_opt
@@ -671,6 +667,7 @@ copy_salt_master_config() {
}
copy_minion_tmp_files() {
case "$install_type" in
'MANAGER' | 'EVAL' | 'HELIXSENSOR' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT')
echo "Copying pillar and salt files in $temp_install_dir to $local_salt_dir"
@@ -682,15 +679,15 @@ copy_minion_tmp_files() {
*)
{
echo "scp pillar and salt files in $temp_install_dir to manager $local_salt_dir";
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
ssh -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
scp -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/pillar;
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" mkdir -p /tmp/"$MINION_ID"/schedules;
$scpcmd -prv -i /root/.ssh/so.key "$temp_install_dir"/pillar/minions/* soremote@"$MSRV":/tmp/"$MINION_ID"/pillar/;
if [ -d $temp_install_dir/salt/patch/os/schedules/ ]; then
if [ "$(ls -A $temp_install_dir/salt/patch/os/schedules/)" ]; then
scp -prv -i /root/.ssh/so.key $temp_install_dir/salt/patch/os/schedules/* soremote@$MSRV:/tmp/$MINION_ID/schedules;
$scpcmd -prv -i /root/.ssh/so.key $temp_install_dir/salt/patch/os/schedules/* soremote@$MSRV:/tmp/$MINION_ID/schedules;
fi
fi
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/manager/files/add_minion.sh "$MINION_ID";
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/manager/files/add_minion.sh "$MINION_ID";
} >> "$setup_log" 2>&1
;;
esac
@@ -709,7 +706,7 @@ copy_ssh_key() {
echo "Copying the SSH key to the manager"
#Copy the key over to the manager
ssh-copy-id -f -i /root/.ssh/so.key soremote@"$MSRV"
$sshcopyidcmd -f -i /root/.ssh/so.key soremote@"$MSRV"
}
create_local_directories() {
@@ -974,11 +971,12 @@ docker_seed_registry() {
}
download_repo_tarball() {
mkdir -p /root/manager_setup/securityonion
{
local manager_ver
manager_ver=$(ssh -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion)
scp -i /root/.ssh/so.key soremote@"$MSRV":/opt/so/repo/"$manager_ver".tar.gz /root/manager_setup
manager_ver=$("$sshcmd" -i /root/.ssh/so.key soremote@"$MSRV" cat /etc/soversion)
$scpcmd -i /root/.ssh/so.key soremote@"$MSRV":/opt/so/repo/"$manager_ver".tar.gz /root/manager_setup
} >> "$setup_log" 2>&1
# Fail if the file doesn't download
@@ -1774,7 +1772,7 @@ saltify() {
# Copy down the gpg keys and install them from the manager
mkdir "$temp_install_dir"/gpg >> "$setup_log" 2>&1
echo "scp the gpg keys and install them from the manager" >> "$setup_log" 2>&1
scp -v -i /root/.ssh/so.key soremote@"$MSRV":/opt/so/gpg/* "$temp_install_dir"/gpg >> "$setup_log" 2>&1
$scpcmd -v -i /root/.ssh/so.key soremote@"$MSRV":/opt/so/gpg/* "$temp_install_dir"/gpg >> "$setup_log" 2>&1
echo "Using apt-key add to add SALTSTACK-GPG-KEY.pub and GPG-KEY-WAZUH" >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/SALTSTACK-GPG-KEY.pub >> "$setup_log" 2>&1
apt-key add "$temp_install_dir"/gpg/GPG-KEY-WAZUH >> "$setup_log" 2>&1
@@ -1976,6 +1974,20 @@ set_progress_str() {
"----" >> "$setup_log" 2>&1
}
set_ssh_cmds() {
local automated=$1
if [ $automated == yes ]; then
sshcmd='sshpass -p "automation" ssh -o StrictHostKeyChecking=no'
sshcopyidcmd='sshpass -p "automation" ssh-copy-id -o StrictHostKeyChecking=no'
scpcmd='sshpass -p "automation" scp -o StrictHostKeyChecking=no'
else
sshcmd='ssh'
sshcopyidcmd='ssh-copy-id'
scpcmd='scp'
fi
}
sensor_pillar() {
local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls
@@ -2087,24 +2099,24 @@ set_initial_firewall_policy() {
$default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
;;
'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP"
case "$install_type" in
'SENSOR')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE"
;;
'SEARCHNODE')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'HEAVYNODE')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost heavy_node "$MAINIP"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE"
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost heavy_node "$MAINIP"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh sensorstab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/pillar/data/addtotab.sh nodestab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm"
;;
'FLEET')
ssh -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost beats_endpoint_ssl "$MAINIP"
$sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost beats_endpoint_ssl "$MAINIP"
;;
esac
;;
@@ -2200,6 +2212,13 @@ mark_version() {
echo "$SOVERSION" > /etc/soversion
}
update_sudoers_for_testing() {
if [ -n "$TESTING" ]; then
info "Ensuring $INSTALLUSERNAME has password-less sudo access for automated testing purposes."
sed -i "s/^$INSTALLUSERNAME ALL=(ALL) ALL/$INSTALLUSERNAME ALL=(ALL) NOPASSWD: ALL/" /etc/sudoers
fi
}
update_sudoers() {
if ! grep -qE '^soremote\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then

View File

@@ -124,6 +124,15 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th
ip a | grep "$MNIC:" | grep "state UP" >> $setup_log 2>&1
done
echo "Network is up on $MNIC" >> $setup_log 2>&1
if [[ ! $is_iso ]]; then
echo "Installing sshpass for automated testing." >> $setup_log 2>&1
if [ "$OS" == ubuntu ]; then
apt-get -y install sshpass >> $setup_log 2>&1
else
yum -y install sshpass >> $setup_log 2>&1
fi
fi
fi
case "$setup_type" in
@@ -136,6 +145,9 @@ case "$setup_type" in
;;
esac
#set ssh commands that will be used based on if this is an automated test install or not
set_ssh_cmds $automated
# Allow execution of SO tools during setup
local_sbin="$(pwd)/../salt/common/tools/sbin"
export PATH=$PATH:$local_sbin
@@ -285,7 +297,7 @@ if ! [[ -f $install_opt_file ]]; then
fi
if [[ $is_minion ]]; then
[ "$automated" == no ] && copy_ssh_key >> $setup_log 2>&1
copy_ssh_key >> $setup_log 2>&1
fi
if [[ $is_minion ]] && ! (compare_versions); then
@@ -594,6 +606,7 @@ set_redirect >> $setup_log 2>&1
set_progress_str 10 'Updating sudoers file for soremote user'
update_sudoers >> $setup_log 2>&1
update_sudoers_for_testing >> $setup_log 2>&1
set_progress_str 11 'Generating manager global pillar'
#minio_generate_keys