mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 01:02:46 +01:00
Merge remote-tracking branch 'remotes/origin/dev' into salt3003.1
This commit is contained in:
@@ -303,6 +303,25 @@ commonlogrotateconf:
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
# Create the status directory
|
||||
sostatusdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/sostatus
|
||||
- user: 0
|
||||
- group: 0
|
||||
- makedirs: True
|
||||
|
||||
# Install sostatus check cron
|
||||
/usr/sbin/so-status -q && echo $? > /opt/so/log/sostatus/status.log 2>&1:
|
||||
cron.present:
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
- hour: '*'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
|
||||
{% if role in ['eval', 'manager', 'managersearch', 'standalone'] %}
|
||||
# Lock permissions on the backup directory
|
||||
backupdir:
|
||||
|
||||
@@ -47,6 +47,13 @@ enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=file:///etc/pki/rpm-gpg/SALTSTACK-GPG-KEY.pub
|
||||
|
||||
[saltstack3003]
|
||||
name=SaltStack repo for RHEL/CentOS $releasever PY3
|
||||
baseurl=https://repo.securityonion.net/file/securityonion-repo/saltstack3003/
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
|
||||
|
||||
[wazuh_repo]
|
||||
gpgcheck=1
|
||||
gpgkey=file:///etc/pki/rpm-gpg/GPG-KEY-WAZUH
|
||||
|
||||
@@ -47,6 +47,13 @@ enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
|
||||
|
||||
[saltstack3003]
|
||||
name=SaltStack repo for RHEL/CentOS $releasever PY3
|
||||
baseurl=http://repocache.securityonion.net/file/securityonion-repo/saltstack3003/
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/SALTSTACK-GPG-KEY.pub
|
||||
|
||||
[wazuh_repo]
|
||||
gpgcheck=1
|
||||
gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/GPG-KEY-WAZUH
|
||||
|
||||
@@ -663,6 +663,15 @@
|
||||
|
||||
# # Read metrics from one or more commands that can output to stdout
|
||||
|
||||
[[inputs.exec]]
|
||||
commands = [
|
||||
"/scripts/sostatus.sh"
|
||||
]
|
||||
data_format = "influx"
|
||||
timeout = "15s"
|
||||
interval = "180s"
|
||||
|
||||
|
||||
# ## Commands array
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch'] %}
|
||||
[[inputs.exec]]
|
||||
|
||||
@@ -72,6 +72,8 @@ so-telegraf:
|
||||
- /opt/so/conf/telegraf/scripts:/scripts:ro
|
||||
- /opt/so/log/stenographer:/var/log/stenographer:ro
|
||||
- /opt/so/log/suricata:/var/log/suricata:ro
|
||||
- /opt/so/log/raid:/var/log/raid:ro
|
||||
- /opt/so/log/sostatus:/var/log/sostatus:ro
|
||||
- watch:
|
||||
- file: tgrafconf
|
||||
- file: tgrafsyncscripts
|
||||
|
||||
33
salt/telegraf/scripts/sostatus.sh
Normal file
33
salt/telegraf/scripts/sostatus.sh
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
APP=sostatus
|
||||
lf=/tmp/$APP-pidLockFile
|
||||
# create empty lock file if none exists
|
||||
cat /dev/null >> $lf
|
||||
read lastPID < $lf
|
||||
# if lastPID is not null and a process with that pid exists , exit
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
SOSTATUSLOG=/var/log/sostatus/status.log
|
||||
SOSTATUSSTATUS=$(cat /var/log/sostatus/status.log)
|
||||
|
||||
if [ -f "$SOSTATUSLOG" ]; then
|
||||
echo "sostatus status=$SOSTATUSSTATUS"
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
@@ -563,7 +563,7 @@ collect_patch_schedule_name_import() {
|
||||
|
||||
collect_proxy() {
|
||||
[[ -n $TESTING ]] && return
|
||||
collect_proxy_details
|
||||
collect_proxy_details || return
|
||||
while ! proxy_validate; do
|
||||
if whiptail_invalid_proxy; then
|
||||
collect_proxy_details no_ask
|
||||
@@ -608,6 +608,8 @@ collect_proxy_details() {
|
||||
so_proxy="$proxy_addr"
|
||||
fi
|
||||
export so_proxy
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -1012,7 +1014,7 @@ create_repo() {
|
||||
|
||||
detect_cloud() {
|
||||
echo "Testing if setup is running on a cloud instance..." | tee -a "$setup_log"
|
||||
if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null); then export is_cloud="true"; fi
|
||||
if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null) || [ -f /var/log/waagent.log ]; then export is_cloud="true"; fi
|
||||
}
|
||||
|
||||
detect_os() {
|
||||
@@ -1870,12 +1872,13 @@ print_salt_state_apply() {
|
||||
}
|
||||
|
||||
proxy_validate() {
|
||||
echo "Testing proxy..."
|
||||
local test_url="https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS"
|
||||
proxy_test_err=$(curl -sS "$test_url" --proxy "$so_proxy" 2>&1)
|
||||
proxy_test_err=$(curl -sS "$test_url" --proxy "$so_proxy" --connect-timeout 5 2>&1) # set short connection timeout so user doesn't sit waiting for proxy test to timeout
|
||||
local ret=$?
|
||||
|
||||
if [[ $ret != 0 ]]; then
|
||||
error "Could not reach $test_url using proxy $so_proxy"
|
||||
error "Could not reach $test_url using proxy provided"
|
||||
error "Received error: $proxy_test_err"
|
||||
if [[ -n $TESTING ]]; then
|
||||
error "Exiting setup"
|
||||
@@ -2286,13 +2289,21 @@ secrets_pillar(){
|
||||
securityonion_repo() {
|
||||
# Remove all the current repos
|
||||
if [[ "$OS" == "centos" ]]; then
|
||||
mkdir -p /root/oldrepos
|
||||
mv /etc/yum.repos.d/* /root/oldrepos/
|
||||
rm -f /etc/yum.repos.d/*
|
||||
if [[ ! $is_manager && "$MANAGERUPDATES" == "1" ]]; then
|
||||
cp -f ../salt/common/yum_repos/securityonioncache.repo /etc/yum.repos.d/
|
||||
if [[ "$INTERWEBS" == "AIRGAP" ]]; then
|
||||
echo "This is airgap I don't need to add this repo"
|
||||
else
|
||||
cp -f ../salt/common/yum_repos/securityonion.repo /etc/yum.repos.d/
|
||||
mkdir -p /root/oldrepos
|
||||
mv -v /etc/yum.repos.d/* /root/oldrepos/
|
||||
ls -la /etc/yum.repos.d/
|
||||
rm -rf /etc/yum.repos.d
|
||||
yum clean all
|
||||
yum repolist all
|
||||
mkdir -p /etc/yum.repos.d
|
||||
if [[ ! $is_manager && "$MANAGERUPDATES" == "1" ]]; then
|
||||
cp -f ../salt/common/yum_repos/securityonioncache.repo /etc/yum.repos.d/
|
||||
else
|
||||
cp -f ../salt/common/yum_repos/securityonion.repo /etc/yum.repos.d/
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "This is Ubuntu"
|
||||
@@ -2707,7 +2718,7 @@ update_sudoers() {
|
||||
update_packages() {
|
||||
if [ "$OS" = 'centos' ]; then
|
||||
yum repolist >> /dev/null
|
||||
yum -y update >> "$setup_log"
|
||||
yum -y update --exclude=salt* >> "$setup_log"
|
||||
else
|
||||
retry 50 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1
|
||||
retry 50 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1
|
||||
|
||||
@@ -558,7 +558,6 @@ if [[ $is_node && ! $is_eval ]]; then
|
||||
LSPIPELINEWORKERS=$num_cpu_cores
|
||||
LSPIPELINEBATCH=125
|
||||
LSINPUTTHREADS=1
|
||||
LSPIPELINEBATCH=125
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -572,14 +571,14 @@ fi
|
||||
|
||||
if [[ $is_manager || $is_import ]]; then collect_so_allow; fi
|
||||
|
||||
whiptail_make_changes
|
||||
# This block sets REDIRECTIT which is used by a function outside the below subshell
|
||||
set_redirect >> $setup_log 2>&1
|
||||
|
||||
whiptail_end_settings
|
||||
|
||||
# From here on changes will be made.
|
||||
echo "1" > /root/accept_changes
|
||||
|
||||
# This block sets REDIRECTIT which is used by a function outside the below subshell
|
||||
set_redirect >> $setup_log 2>&1
|
||||
|
||||
|
||||
# Begin install
|
||||
{
|
||||
@@ -962,6 +961,7 @@ else
|
||||
} | whiptail_gauge_post_setup "Running post-installation steps..."
|
||||
|
||||
whiptail_setup_complete
|
||||
[[ $setup_type != 'iso' ]] && whitpail_ssh_warning
|
||||
echo "Post-installation steps have completed." >> $setup_log 2>&1
|
||||
fi
|
||||
|
||||
|
||||
@@ -391,6 +391,7 @@ whiptail_dockernet_net() {
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
}
|
||||
|
||||
whiptail_enable_components() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -423,6 +424,211 @@ whiptail_enable_components() {
|
||||
done
|
||||
}
|
||||
|
||||
whiptail_end_settings() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
# BASIC INFO (NETWORK, HOSTNAME, DESCRIPTION, ETC)
|
||||
|
||||
read -r -d '' end_msg <<- EOM
|
||||
Node Type: $install_type
|
||||
Hostname: $HOSTNAME
|
||||
EOM
|
||||
|
||||
[[ -n $NODE_DESCRIPTION ]] && __append_end_msg "Description: $NODE_DESCRIPTION"
|
||||
|
||||
[[ $is_airgap ]] && __append_end_msg "Airgap: True"
|
||||
|
||||
if [[ $is_minion ]]; then
|
||||
__append_end_msg "Manager Hostname: $MSRV"
|
||||
__append_end_msg "Manager IP: $MSRVIP"
|
||||
fi
|
||||
|
||||
|
||||
[[ $is_iso ]] && __append_end_msg "Network: $address_type"
|
||||
|
||||
__append_end_msg "Management NIC: $MNIC"
|
||||
__append_end_msg "Management IP: $MAINIP"
|
||||
|
||||
if [[ $address_type == 'STATIC' ]]; then
|
||||
__append_end_msg "Gateway: $MGATEWAY"
|
||||
__append_end_msg "DNS: $MDNS"
|
||||
__append_end_msg "DNS Domain: $MSEARCH"
|
||||
fi
|
||||
|
||||
if [[ -n $so_proxy ]]; then
|
||||
__append_end_msg "Proxy:"
|
||||
__append_end_msg " Server URL: $proxy_addr"
|
||||
[[ -n $proxy_user ]] && __append_end_msg " User: $proxy_user"
|
||||
else
|
||||
__append_end_msg "Proxy: N/A"
|
||||
fi
|
||||
|
||||
if [[ $is_sensor ]]; then
|
||||
__append_end_msg "Bond NIC(s):"
|
||||
for nic in "${BNICS[@]}"; do
|
||||
__append_end_msg " - $nic"
|
||||
done
|
||||
[[ -n $MTU ]] && __append_end_msg "MTU: $MTU"
|
||||
fi
|
||||
|
||||
local homenet_arr
|
||||
if [[ -n $HNMANAGER ]]; then
|
||||
__append_end_msg "Home Network(s):"
|
||||
IFS="," read -r -a homenet_arr <<< "$HNMANAGER"
|
||||
for net in "${homenet_arr[@]}"; do
|
||||
__append_end_msg " - $net"
|
||||
done
|
||||
elif [[ -n $HNSENSOR ]]; then
|
||||
__append_end_msg "Home Network(s):"
|
||||
IFS="," read -r -a homenet_arr <<< "$HNSENSOR"
|
||||
for net in "${homenet_arr[@]}"; do
|
||||
__append_end_msg " - $net"
|
||||
done
|
||||
fi
|
||||
|
||||
[[ -n $REDIRECTIT ]] && __append_end_msg "Access URL: https://${REDIRECTIT}"
|
||||
|
||||
[[ -n $ALLOW_CIDR ]] && __append_end_msg "Allowed IP or Subnet: $ALLOW_CIDR"
|
||||
|
||||
[[ -n $WEBUSER ]] && __append_end_msg "Web User: $WEBUSER"
|
||||
|
||||
[[ -n $FLEETNODEUSER ]] && __append_end_msg "Fleet User: $FLEETNODEUSER"
|
||||
|
||||
if [[ $is_manager ]]; then
|
||||
__append_end_msg "Enabled Optional Components:"
|
||||
for component in "${COMPONENTS[@]}"; do
|
||||
__append_end_msg " - $component"
|
||||
done
|
||||
fi
|
||||
|
||||
# METADATA / IDS
|
||||
|
||||
if [[ -n $ZEEKVERSION ]]; then
|
||||
local md_tool_string=${ZEEKVERSION,;}
|
||||
md_tool_string=${md_tool_string^}
|
||||
|
||||
__append_end_msg "Metadata Tool: $md_tool_string"
|
||||
fi
|
||||
|
||||
[[ -n $RULESETUP ]] && __append_end_msg "IDS Ruleset: $RULESETUP"
|
||||
[[ -n $OINKCODE ]] && __append_end_msg "Oinkcode: $OINKCODE"
|
||||
|
||||
# PATCH SCHEDULE
|
||||
|
||||
if [[ -n $PATCHSCHEDULENAME ]]; then
|
||||
__append_end_msg "Patch Schedule:"
|
||||
if [[ $PATCHSCHEDULENAME == 'auto'|| $PATCHSCHEDULENAME == 'manual' ]]; then
|
||||
__append_end_msg " Type: $PATCHSCHEDULENAME"
|
||||
else
|
||||
__append_end_msg " Name: $PATCHSCHEDULENAME"
|
||||
fi
|
||||
if [[ ${#PATCHSCHEDULEDAYS[@]} -gt 0 ]]; then
|
||||
__append_end_msg " Day(s):"
|
||||
for day in "${PATCHSCHEDULEDAYS[@]}"; do
|
||||
__append_end_msg " - $day"
|
||||
done
|
||||
fi
|
||||
if [[ ${#PATCHSCHEDULEHOURS[@]} -gt 0 ]]; then
|
||||
__append_end_msg " Hours(s):"
|
||||
for hour in "${PATCHSCHEDULEHOURS[@]}"; do
|
||||
__append_end_msg " - $hour"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# MISC
|
||||
|
||||
[[ $is_helix ]] && __append_end_msg "Helix API key: $HELIXAPIKEY"
|
||||
[[ -n $DOCKERNET ]] && __append_end_msg "Docker network: $DOCKERNET"
|
||||
if [[ -n $MANAGERUPDATES ]]; then
|
||||
__append_end_msg "OS Package Updates: Manager"
|
||||
else
|
||||
__append_end_msg "OS Package Updates: Open"
|
||||
fi
|
||||
if [[ ${#ntp_servers[@]} -gt 0 ]]; then
|
||||
__append_end_msg "NTP Servers:"
|
||||
for server in "${ntp_servers[@]}"; do
|
||||
__append_end_msg " - $server"
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ $NSMSETUP != 'ADVANCED' ]]; then
|
||||
[[ -n $BASICZEEK ]] && __append_end_msg "Zeek Processes: $BASICZEEK"
|
||||
[[ -n $BASICSURI ]] && __append_end_msg "Suricata Processes: $BASICSURI"
|
||||
fi
|
||||
|
||||
# ADVANCED OR REGULAR
|
||||
|
||||
if [[ $NODESETUP == 'NODEADVANCED' ]]; then
|
||||
__append_end_msg "Advanced Node Settings:"
|
||||
__append_end_msg " Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
|
||||
__append_end_msg " Logstash Heap Size: $NODE_LS_HEAP_SIZE"
|
||||
__append_end_msg " Logstash Worker Count: $LSPIPELINEWORKERS"
|
||||
__append_end_msg " Logstash Batch Size: $LSPIPELINEBATCH"
|
||||
__append_end_msg " Logstash Input Threads: $LSINPUTTHREADS"
|
||||
__append_end_msg " Curator Day Cutoff: $CURCLOSEDAYS days"
|
||||
__append_end_msg " Elasticsearch Storage Space: ${log_size_limit}GB"
|
||||
else
|
||||
__append_end_msg "Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE"
|
||||
__append_end_msg "Logstash Heap Size: $NODE_LS_HEAP_SIZE"
|
||||
__append_end_msg "Logstash Worker Count: $LSPIPELINEWORKERS"
|
||||
__append_end_msg "Logstash Batch Size: $LSPIPELINEBATCH"
|
||||
__append_end_msg "Logstash Input Threads: $LSINPUTTHREADS"
|
||||
__append_end_msg "Curator Close After: $CURCLOSEDAYS days"
|
||||
__append_end_msg "Elasticsearch Storage Space: ${log_size_limit}GB"
|
||||
fi
|
||||
|
||||
|
||||
# ADVANCED
|
||||
if [[ $MANAGERADV == 'ADVANCED' ]]; then
|
||||
__append_end_msg "Advanced Manager Settings:"
|
||||
[[ -n $ESCLUSTERNAME ]] && __append_end_msg " ES Cluster Name: $ESCLUSTERNAME"
|
||||
if [[ ${#BLOGS[@]} -gt 0 ]]; then
|
||||
__append_end_msg " Zeek Logs Enabled:"
|
||||
for log in "${BLOGS[@]}"; do
|
||||
__append_end_msg " - $log"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $NSMSETUP == 'ADVANCED' ]]; then
|
||||
__append_end_msg "Advanced NSM Settings:"
|
||||
if [[ ${#ZEEKPINS[@]} -gt 0 ]]; then
|
||||
local zeek_pin_str
|
||||
for core in "${ZEEKPINS[@]}"; do
|
||||
zeek_pin_str="${zeek_pin_str}${core},"
|
||||
done
|
||||
zeek_pin_str=${zeek_pin_str%,}
|
||||
__append_end_msg " Zeek Pinned Cores: ${zeek_pin_str}"
|
||||
fi
|
||||
if [[ ${#SURIPINS[@]} -gt 0 ]]; then
|
||||
local suri_pin_str
|
||||
for core in "${SURIPINS[@]}"; do
|
||||
suri_pin_str="${suri_pin_str}${core},"
|
||||
done
|
||||
suri_pin_str=${suri_pin_str%,}
|
||||
__append_end_msg " Suricata Pinned Cores: ${suri_pin_str}"
|
||||
fi
|
||||
fi
|
||||
|
||||
whiptail --title "The following options have been set, would you like to proceed?" --yesno "$end_msg" 24 75 --scrolltext
|
||||
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
echo "$end_msg" > /root/install_summary
|
||||
printf '%s\n' 'Install summary:' "$end_msg" >> "$setup_log"
|
||||
}
|
||||
|
||||
__append_end_msg() {
|
||||
local newline=$1
|
||||
|
||||
read -r -d '' end_msg <<- EOM
|
||||
$end_msg
|
||||
$newline
|
||||
EOM
|
||||
}
|
||||
|
||||
whiptail_eval_adv() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -1491,6 +1697,22 @@ whiptail_so_allow() {
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
}
|
||||
|
||||
whitpail_ssh_warning() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
local msg
|
||||
|
||||
read -r -d '' msg <<- EOM
|
||||
NOTE: You will recceive a warning upon SSH reconnect that the host key has changed.
|
||||
|
||||
This is expected due to hardening of the OpenSSH server config.
|
||||
|
||||
The host key algorithm will now be ED25519, follow the instructions given by your SSH client to remove the old key fingerprint then retry the connection.
|
||||
EOM
|
||||
|
||||
whiptail --msgbox "$msg" 14 75
|
||||
}
|
||||
|
||||
whiptail_storage_requirements() {
|
||||
local mount=$1
|
||||
local current_val=$2
|
||||
|
||||
Reference in New Issue
Block a user