#!/bin/bash # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. # README - DO NOT DEFINE GLOBAL VARIABLES IN THIS FILE. Instead use so-variables. ### Begin Logging Section ### log() { msg=$1 level=${2:-I} now=$(TZ=GMT date +"%Y-%m-%dT%H:%M:%SZ") echo -e "$now | $level | $msg" 2>&1 | tee -a "$setup_log" } error() { log "$1" "E" } info() { log "$1" "I" } title() { echo -e "\n-----------------------------\n $1\n-----------------------------\n" >> "$setup_log" 2>&1 } logCmd() { cmd=$1 info "Executing command: $cmd" $cmd 2>&1 | tee -a $setup_log } ### End Logging Section ### airgap_rules() { # Copy the rules for suricata if using Airgap mkdir -p /nsm/repo/rules cp -v /root/SecurityOnion/agrules/emerging-all.rules /nsm/repo/rules/ # Copy over sigma rules cp -Rv /root/SecurityOnion/agrules/sigma /nsm/repo/rules/ # Don't leave Strelka out cp -Rv /root/SecurityOnion/agrules/strelka /nsm/repo/rules/ } add_admin_user() { title "Adding $ADMINUSER to the system with sudo rights" logCmd "useradd '$ADMINUSER'" echo "$ADMINUSER":"$ADMINPASS1" | chpasswd --crypt-method=SHA512 logCmd "usermod -aG wheel '$ADMINUSER'" } add_mngr_ip_to_hosts() { info "Adding $MSRV to /etc/hosts with IP: $MSRVIP" echo "$MSRVIP $MSRV" >> /etc/hosts } add_socore_user_manager() { info "Adding socore user" logCmd "so_add_user 'socore' '939' '939' '/opt/so'" } add_web_user() { wait_for_file /nsm/kratos/db/db.sqlite 30 5 { info "Attempting to add administrator user for web interface..."; export SKIP_STATE_APPLY=true echo "$WEBPASSWD1" | /usr/sbin/so-user add --email "$WEBUSER" --role "superuser"; unset SKIP_STATE_APPLY info "Add user result: $?"; } >> "/root/so-user-add.log" 2>&1 } analyze_system() { title "System Characteristics" logCmd "uptime" logCmd "uname -a" logCmd "free -h" logCmd "lscpu" logCmd "df -h" logCmd "ip a" } analyst_salt_local() { # Install everything using local salt # Set the repo securityonion_repo gpg_rpm_import # Install salt logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq" logCmd "yum -y update --exclude=salt*" logCmd "salt-call state.apply workstation --local --file-root=../salt/ -l info" read -r -d '' message <<- EOM Finished Analyst workstation installation. Press the Enter key to reboot. EOM whiptail --title "$whiptail_title" --msgbox "$message" 12 75 reboot exit 0 } analyst_workstation_pillar() { local pillar_file=$local_salt_dir/pillar/minions/$MINION_ID.sls # Create the analyst workstation pillar printf '%s\n'\ "host:"\ " mainint: '$MNIC'"\ "workstation:"\ " gui:"\ " enabled: true" >> "$pillar_file"\ "sensoroni:"\ " node_description: '${NODE_DESCRIPTION//\'/''}'" > $pillar_file } calculate_useable_cores() { # Calculate reasonable core usage local cores_for_zeek=$(( (num_cpu_cores/2) - 1 )) local lb_procs_round lb_procs_round=$(printf "%.0f\n" $cores_for_zeek) if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi export lb_procs } check_admin_pass() { check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH" } check_manager_connection() { # See if you can curl the manager. If not you can either try again or continue info "Checking manager connectivity" man_test_err=$(curl -k -L -sS https://$MSRVIP/repo --connect-timeout 5 2>&1) local ret=$? if [[ $ret != 0 ]]; then error "Could not reach $MSRV" whiptail_manager_unreachable fi } check_network_manager_conf() { local gmdconf="/usr/lib/NetworkManager/conf.d/10-globally-managed-devices.conf" local nmconf="/etc/NetworkManager/NetworkManager.conf" local preupdir="/etc/NetworkManager/dispatcher.d/pre-up.d" if test -f "$gmdconf" && ! test -f "${gmdconf}.bak"; then { mv "$gmdconf" "${gmdconf}.bak" touch "$gmdconf" systemctl restart NetworkManager } >> "$setup_log" 2>&1 fi if [[ ! -d "$preupdir" ]]; then mkdir "$preupdir" >> "$setup_log" 2>&1 fi } check_pass_match() { info "Making sure passwords match" local pass=$1 local confirm_pass=$2 local var=$3 if [ "$pass" = "$confirm_pass" ]; then export "$var=yes" else whiptail_passwords_dont_match fi } # False if stopped, true if running check_service_status() { local service_name=$1 info "Checking service $service_name status" systemctl status $service_name > /dev/null 2>&1 local status=$? if [ $status -gt 0 ]; then info " $service_name is not running" return 1; else info " $service_name is running" return 0; fi } check_web_pass() { info Making sure web credential passwords match check_pass_match "$WEBPASSWD1" "$WEBPASSWD2" "WPMATCH" } clear_manager() { # Clear out the old manager public key in case this is a re-install. # This only happens if you re-install the manager. if [ -f /etc/salt/pki/minion/minion_master.pub ]; then info "Clearing old Salt master key" logCmd "rm -f /etc/salt/pki/minion/minion_master.pub" info "Restarting Salt Minion" logCmd "systemctl -q restart salt-minion" fi } collect_adminuser_inputs() { whiptail_create_admin_user while ! valid_username "$ADMINUSER"; do whiptail_invalid_input whiptail_create_admin_user "$ADMINUSER" done APMATCH=no while [[ $APMATCH != yes ]]; do whiptail_create_admin_user_password1 whiptail_create_admin_user_password2 check_admin_pass done } collect_dns() { whiptail_management_interface_dns "8.8.8.8,8.8.4.4" while ! valid_dns_list "$MDNS"; do whiptail_invalid_input whiptail_management_interface_dns "$MDNS" done MDNS=$(echo "$MDNS" | tr -s "," " ") # MDNS needs to be space separated, we prompt for comma separated for consistency } collect_dns_domain() { whiptail_management_interface_dns_search "searchdomain.local" while ! valid_fqdn "$MSEARCH"; do whiptail_invalid_input whiptail_management_interface_dns_search "$MSEARCH" done } collect_dockernet() { if ! whiptail_dockernet_check; then whiptail_dockernet_sosnet "172.17.1.0" whiptail_dockernet_nososnet "172.17.0.0" while ! valid_ip4 "$DOCKERNET"; do whiptail_invalid_input whiptail_dockernet_nonsosnet "$DOCKERNET" done while ! valid_ip4 "$DOCKERNET2"; do whiptail_invalid_input whiptail_dockernet_sosnet "$DOCKERNET2" done fi } collect_es_space_limit() { whiptail_log_size_limit "$log_size_limit" while ! valid_int "$log_size_limit"; do # Upper/lower bounds? whiptail_invalid_input whiptail_log_size_limit "$log_size_limit" done } collect_gateway() { whiptail_management_interface_gateway while ! valid_ip4 "$MGATEWAY"; do whiptail_invalid_input whiptail_management_interface_gateway "$MGATEWAY" done } collect_homenet_mngr() { whiptail_homenet_manager "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12" while ! valid_cidr_list "$HNMANAGER"; do whiptail_invalid_input whiptail_homenet_manager "$HNMANAGER" done } collect_homenet_snsr() { if whiptail_homenet_sensor_inherit; then export HNSENSOR=inherit else whiptail_homenet_sensor "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12" while ! valid_cidr_list "$HNSENSOR"; do whiptail_invalid_input whiptail_homenet_sensor "$HNSENSOR" done fi } collect_hostname() { collect_hostname_validate while has_uppercase "$HOSTNAME"; do if ! (whiptail_uppercase_warning); then collect_hostname_validate else no_use_hostname=true break fi done } collect_hostname_validate() { if [[ $automated == no ]] && [[ "$HOSTNAME" == *'localhost'* ]]; then HOSTNAME=securityonion; fi whiptail_set_hostname "$HOSTNAME" if [[ -z $default_hostname_flag ]] && [[ $HOSTNAME == 'securityonion' ]]; then # Will only check HOSTNAME=securityonion once if ! (whiptail_avoid_default_hostname); then whiptail_set_hostname "$HOSTNAME" fi default_hostname_flag=true fi while ! valid_hostname "$HOSTNAME"; do whiptail_invalid_hostname whiptail_set_hostname "$HOSTNAME" done } collect_int_ip_mask() { whiptail_management_interface_ip_mask while ! valid_ip4_cidr_mask "$manager_ip_mask"; do whiptail_invalid_input whiptail_management_interface_ip_mask "$manager_ip_mask" done MIP=$(echo "$manager_ip_mask" | sed 's/\/.*//' ) MMASK=$(echo "$manager_ip_mask" | sed 's/.*\///') } collect_mngr_hostname() { whiptail_management_server while ! valid_hostname "$MSRV"; do whiptail_invalid_hostname whiptail_management_server "$MSRV" done while [[ $MSRV == "$HOSTNAME" ]]; do whiptail_invalid_hostname 0 whiptail_management_server "$MSRV" done # Remove the manager from /etc/hosts incase a user entered the wrong IP when prompted # and they are going through the installer again if [[ "$HOSTNAME" != "$MSRV" ]]; then info "Removing $MSRV from /etc/hosts if present." sed -i "/$MSRV/d" /etc/hosts fi if ! getent hosts "$MSRV"; then whiptail_manager_ip while ! valid_ip4 "$MSRVIP" || [[ $MSRVIP == "$MAINIP" || $MSRVIP == "127.0.0.1" ]]; do whiptail_invalid_input whiptail_manager_ip "$MSRVIP" done else MSRVIP=$(getent hosts "$MSRV" | awk 'NR==1{print $1}') fi } collect_net_method() { whiptail_net_method if [[ "$network_traffic" == *"_MANAGER" ]]; then whiptail_manager_updates_warning MANAGERUPDATES=1 fi if [[ "$network_traffic" == "PROXY"* ]]; then collect_proxy no_ask fi } collect_ntp_servers() { if whiptail_ntp_ask; then [[ $is_airgap ]] && ntp_string="" whiptail_ntp_servers "$ntp_string" while ! valid_ntp_list "$ntp_string"; do whiptail_invalid_input whiptail_ntp_servers "$ntp_string" done IFS="," read -r -a ntp_servers <<< "$ntp_string" # Split string on commas into array else ntp_servers=() fi } collect_oinkcode() { whiptail_oinkcode while ! valid_string "$OINKCODE" "" "128"; do whiptail_invalid_input whiptail_oinkcode "$OINKCODE" done } collect_patch_schedule() { whiptail_patch_schedule case "$patch_schedule" in 'New Schedule') whiptail_patch_schedule_select_days whiptail_patch_schedule_select_hours collect_patch_schedule_name_new patch_schedule_os_new ;; 'Import Schedule') collect_patch_schedule_name_import ;; 'Automatic') PATCHSCHEDULENAME='auto' ;; 'Manual') PATCHSCHEDULENAME='manual' ;; esac } collect_patch_schedule_name_new() { whiptail_patch_name_new_schedule while ! valid_string "$PATCHSCHEDULENAME"; do whiptail_invalid_string "schedule name" whiptail_patch_name_new_schedule "$PATCHSCHEDULENAME" done } collect_patch_schedule_name_import() { whiptail_patch_schedule_import while ! valid_string "$PATCHSCHEDULENAME"; do whiptail_invalid_string "schedule name" whiptail_patch_schedule_import "$PATCHSCHEDULENAME" done } collect_proxy() { [[ -n $TESTING ]] && return local ask=${1:-true} collect_proxy_details "$ask" || return while ! proxy_validate; do if whiptail_invalid_proxy; then collect_proxy_details no_ask else so_proxy="" break fi done } collect_proxy_details() { local ask=${1:-true} local use_proxy if [[ $ask != true ]]; then use_proxy=0 else whiptail_proxy_ask use_proxy=$? fi if [[ $use_proxy == 0 ]]; then whiptail_proxy_addr "$proxy_addr" while ! valid_proxy "$proxy_addr"; do whiptail_invalid_input whiptail_proxy_addr "$proxy_addr" done if whiptail_proxy_auth_ask; then whiptail_proxy_auth_user "$proxy_user" whiptail_proxy_auth_pass "$proxy_pass" local url_prefixes=( 'http://' 'https://' ) for prefix in "${url_prefixes[@]}"; do if echo "$proxy_addr" | grep -q "$prefix"; then local proxy=${proxy_addr#"$prefix"} so_proxy="${prefix}${proxy_user}:${proxy_pass}@${proxy}" break fi done else so_proxy="$proxy_addr" fi export so_proxy else return 1 fi } collect_redirect_host() { collect_redirect_host_validate while has_uppercase "$REDIRECTHOST"; do local text ! valid_hostname "$REDIRECTHOST" && text="domain name" || text="hostname" if ! (whiptail_uppercase_warning "$text"); then collect_redirect_host_validate "$REDIRECTHOST" else break fi done } collect_redirect_host_validate() { local prefill=${1:-$HOSTNAME} whiptail_set_redirect_host "$prefill" while ! valid_ip4 "$REDIRECTHOST" && ! valid_hostname "$REDIRECTHOST" && ! valid_fqdn "$REDIRECTHOST"; do whiptail_invalid_input whiptail_set_redirect_host "$REDIRECTHOST" done } collect_so_allow() { if whiptail_so_allow_yesno; then whiptail_so_allow while ! valid_cidr "$ALLOW_CIDR" && ! valid_ip4 "$ALLOW_CIDR"; do whiptail_invalid_input whiptail_so_allow "$ALLOW_CIDR" done fi } # Get an email & password for the web admin user collect_webuser_inputs() { whiptail_create_web_user while ! so-user valemail --email "$WEBUSER" >> "$setup_log" 2>&1; do whiptail_invalid_user_warning whiptail_create_web_user "$WEBUSER" done WPMATCH=no while [[ $WPMATCH != yes ]]; do whiptail_create_web_user_password1 while ! check_password "$WEBPASSWD1"; do whiptail_invalid_pass_characters_warning whiptail_create_web_user_password1 done if echo "$WEBPASSWD1" | so-user valpass >> "$setup_log" 2>&1; then whiptail_create_web_user_password2 check_web_pass else whiptail_invalid_pass_warning fi done } configure_minion() { local minion_type=$1 if [[ $is_analyst ]]; then minion_type=workstation fi info "Configuring minion type as $minion_type" echo "role: so-$minion_type" > /etc/salt/grains local minion_config=/etc/salt/minion echo "id: '$MINION_ID'" > "$minion_config" case "$minion_type" in 'workstation') echo "master: '$MSRV'" >> "$minion_config" ;; 'manager' | 'eval' | 'managersearch' | 'standalone' | 'import') cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf printf '%s\n'\ "master: '$HOSTNAME'"\ "mysql.host: '$MAINIP'"\ "mysql.port: '3306'"\ "mysql.user: 'root'" >> "$minion_config" if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then echo "mysql.pass: '$MYSQLPASS'" >> "$minion_config" else OLDPASS=$(grep "mysql" $local_salt_dir/pillar/secrets.sls | awk '{print $2}') echo "mysql.pass: '$OLDPASS'" >> "$minion_config" fi ;; *) echo "master: '$MSRV'" >> "$minion_config" ;; esac printf '%s\n'\ "use_superseded:"\ " - module.run"\ "log_level: info"\ "log_level_logfile: info"\ "log_file: /opt/so/log/salt/minion" >> "$minion_config" cp -f ../salt/salt/etc/minion.d/mine_functions.conf /etc/salt/minion.d/mine_functions.conf sed -i "s/{{ GLOBALS.main_interface }}/$MAININT/" /etc/salt/minion.d/mine_functions.conf { systemctl restart salt-minion; } >> "$setup_log" 2>&1 } configure_ntp() { local chrony_conf=/etc/chrony.conf # Install chrony if it isn't already installed if ! command -v chronyc &> /dev/null; then logCmd "yum -y install chrony" fi [[ -f $chrony_conf ]] && mv $chrony_conf "$chrony_conf.bak" printf '%s\n' "# NTP server list" > $chrony_conf # Build list of servers for addr in "${ntp_servers[@]}"; do echo "server $addr iburst" >> $chrony_conf done printf '\n%s\n' "# Config options" >> $chrony_conf printf '%s\n' \ 'driftfile /var/lib/chrony/drift' \ 'makestep 1.0 3' \ 'rtcsync' \ 'logdir /var/log/chrony' >> $chrony_conf systemctl enable chronyd systemctl restart chronyd # Tell the chrony daemon to sync time & update the system time # Since these commands only make a call to chronyd, wait after each command to make sure the changes are made printf "Syncing chrony time to server: " chronyc -a 'burst 4/4' && sleep 30 printf "Forcing chrony to update the time: " chronyc -a makestep && sleep 30 } checkin_at_boot() { local minion_config=/etc/salt/minion info "Enabling checkin at boot" echo "startup_states: highstate" >> "$minion_config" } check_requirements() { local standalone_or_dist=$1 local node_type=$2 # optional local req_mem local req_cores local req_storage local nic_list readarray -t nic_list <<< "$(ip link| awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "bond0" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g')" local num_nics=${#nic_list[@]} if [[ "$standalone_or_dist" == 'standalone' ]]; then req_mem=12 req_cores=4 req_nics=2 elif [[ "$standalone_or_dist" == 'dist' ]]; then req_mem=8 req_cores=4 if [[ "$node_type" == 'sensor' ]]; then req_nics=2; else req_nics=1; fi if [[ "$node_type" == 'fleet' ]]; then req_mem=4; fi if [[ "$node_type" == 'idh' ]]; then req_mem=1 req_cores=2; fi elif [[ "$standalone_or_dist" == 'import' ]]; then req_mem=4 req_cores=2 req_nics=1 fi if [[ $setup_type == 'network' ]] ; then if [[ -n $nsm_mount ]]; then if [[ "$standalone_or_dist" == 'import' ]]; then req_storage=50 elif [[ "$node_type" == 'idh' ]]; then req_storage=12 else req_storage=100 fi if [[ $free_space_root -lt $req_storage ]]; then whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" fi if [[ $free_space_nsm -lt $req_storage ]]; then whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB" fi else if [[ "$standalone_or_dist" == 'import' ]]; then req_storage=50 elif [[ "$node_type" == 'idh' ]]; then req_storage=12 else req_storage=200 fi if [[ $free_space_root -lt $req_storage ]]; then whiptail_storage_requirements "/" "${free_space_root} GB" "${req_storage} GB" fi fi fi if [[ $num_nics -lt $req_nics ]]; then if [[ $num_nics -eq 1 ]]; then whiptail_requirements_error "NIC" "$num_nics" "$req_nics" else whiptail_requirements_error "NICs" "$num_nics" "$req_nics" fi fi if [[ $num_cpu_cores -lt $req_cores ]]; then if [[ $num_cpu_cores -eq 1 ]]; then whiptail_requirements_error "core" "$num_cpu_cores" "$req_cores" else whiptail_requirements_error "cores" "$num_cpu_cores" "$req_cores" fi fi if [[ $total_mem_hr -lt $req_mem ]]; then whiptail_requirements_error "memory" "${total_mem_hr} GB" "${req_mem} GB" fi } check_sos_appliance() { title "Is this is an SOS Appliance?" if [ -f "/etc/SOSMODEL" ]; then local MODEL=$(cat /etc/SOSMODEL) info "Found SOS Model $MODEL" echo "sosmodel: $MODEL" >> /etc/salt/grains fi } compare_main_nic_ip() { if ! [[ $MNIC =~ ^(tun|wg|vpn).*$ ]]; then if [[ "$MAINIP" != "$MNIC_IP" ]]; then error "[ERROR] Main gateway ($MAINIP) does not match ip address of management NIC ($MNIC_IP)." read -r -d '' message <<- EOM The IP being routed by Linux is not the IP address assigned to the management interface ($MNIC). This is not a supported configuration, please remediate and rerun setup. EOM [[ -n $TESTING ]] || whiptail --title "$whiptail_title" --msgbox "$message" 11 75 kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1 fi else # Setup uses MAINIP, but since we ignore the equality condition when using a VPN # just set the variable to the IP of the VPN interface MAINIP=$MNIC_IP fi } configure_network_sensor() { info "Setting up sensor interface" if [[ $is_cloud ]]; then local nmcli_con_args=( "type" "ethernet" ) else local nmcli_con_args=( "type" "bond" "mode" "0" ) fi # Create the bond interface only if it doesn't already exist nmcli -f name,uuid -p con | grep -q '$INTERFACE' local found_int=$? if [[ $found_int != 0 ]]; then nmcli con add ifname "$INTERFACE" con-name "$INTERFACE" "${nmcli_con_args[@]}" -- \ ipv4.method disabled \ ipv6.method ignore \ ethernet.mtu "$MTU" \ connection.autoconnect "yes" >> "$setup_log" 2>&1 else local int_uuid int_uuid=$(nmcli -f name,uuid -p con | sed -n "s/$INTERFACE //p" | tr -d ' ') nmcli con mod "$int_uuid" \ ipv4.method disabled \ ipv6.method ignore \ ethernet.mtu "$MTU" \ connection.autoconnect "yes" >> "$setup_log" 2>&1 fi local err=0 for BNIC in "${BNICS[@]}"; do add_interface_bond0 "$BNIC" --verbose >> "$setup_log" 2>&1 local ret=$? [[ $ret -eq 0 ]] || err=$ret done return $err } copy_salt_master_config() { title "Copy the Salt master config template to the proper directory" if [ "$setup_type" = 'iso' ]; then logCmd "cp /root/SecurityOnion/files/salt/master/master /etc/salt/master" logCmd "cp /root/SecurityOnion/files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service" else logCmd "cp ../files/salt/master/master /etc/salt/master" logCmd "cp ../files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service" fi info "Copying pillar and salt files in $temp_install_dir to $local_salt_dir" logCmd "cp -Rv $temp_install_dir/pillar/ $local_salt_dir/" if [ -d "$temp_install_dir"/salt ] ; then logCmd "cp -Rv $temp_install_dir/salt/ $local_salt_dir/" fi # Restart the service so it picks up the changes logCmd "systemctl daemon-reload" logCmd "systemctl restart salt-master" } create_local_directories() { info "Creating local pillar and salt directories" PILLARSALTDIR=${SCRIPTDIR::-5} for i in "pillar" "salt"; do for d in $(find $PILLARSALTDIR/$i -type d); do suffixdir=${d//$PILLARSALTDIR/} if [ ! -d "$local_salt_dir/$suffixdir" ]; then logCmd "mkdir -pv $local_salt_dir$suffixdir" fi done logCmd "chown -R socore:socore $local_salt_dir/$i" done } create_local_nids_rules() { title "Create a local.rules file so it doesn't get removed on updates" logCmd "mkdir -p /opt/so/saltstack/local/salt/idstools" echo "# Custom Suricata rules go in this file" > /opt/so/saltstack/local/salt/idstools/local.rules logCmd "salt-run fileserver.clear_file_list_cache" } create_manager_pillars() { elasticsearch_pillar logstash_pillar manager_pillar create_global create_sensoroni_pillar #create_strelka_pillar backup_pillar soctopus_pillar docker_pillar redis_pillar idstools_pillar kratos_pillar soc_pillar idh_pillar } create_repo() { title "Create the repo directory" logCmd "createrepo /nsm/repo" } detect_cloud() { info "Testing if setup is running on a cloud instance..." if ( curl --fail -s -m 5 http://169.254.169.254/latest/meta-data/instance-id > /dev/null ) || ( dmidecode -s bios-vendor | grep -q Google > /dev/null) || [ -f /var/log/waagent.log ]; then export is_cloud="true"; fi } detect_os() { title "Detecting Base OS" if [ -f /etc/redhat-release ]; then if grep -q "CentOS Linux release 7" /etc/redhat-release; then OS=centos OSVER=7 is_centos=true pkgman="yum" elif grep -q "Rocky Linux release 8" /etc/redhat-release; then OS=rocky OSVER=8 is_rocky=true pkgman="dnf" info "We currently do not support Rocky Linux $OSVER but we are working on it!" else info "We do not support the version of CentOS you are trying to use." exit 1 fi elif [ -f /etc/os-release ]; then OS=ubuntu if grep -q "UBUNTU_CODENAME=bionic" /etc/os-release; then OSVER=bionic elif grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then OSVER=focal else info "We do not support your current version of Ubuntu." exit 1 fi else info "We were unable to determine if you are using a supported OS." exit 1 fi info "Found OS: $OS $OSVER" } installer_progress_loop() { local i=0 local msg="${1:-Performing background actions...}" while true; do [[ $i -lt 98 ]] && ((i++)) set_progress_str "$i" "$msg" nolog [[ $i -gt 0 ]] && sleep 5s done } installer_prereq_packages() { if [ "$OS" == centos ]; then if [[ ! $is_iso ]]; then if ! yum versionlock > /dev/null 2>&1; then logCmd "yum -y install yum-plugin-versionlock" fi if ! command -v nmcli > /dev/null 2>&1; then logCmd "yum -y install NetworkManager" fi fi logCmd "systemctl enable NetworkManager" logCmd "systemctl start NetworkManager" elif [ "$OS" == ubuntu ]; then # Print message to stdout so the user knows setup is doing something retry 150 10 "apt-get update" "" "Err:" >> "$setup_log" 2>&1 || exit 1 # Install network manager so we can do interface stuff if ! command -v nmcli > /dev/null 2>&1; then retry 150 10 "apt-get -y install network-manager" >> "$setup_log" 2>&1 || exit 1 { systemctl enable NetworkManager systemctl start NetworkManager } >> "$setup_log" 2<&1 fi if ! command -v curl > /dev/null 2>&1; then retry 150 10 "apt-get -y install curl" >> "$setup_log" 2>&1 || exit 1 fi fi } disable_auto_start() { if crontab -l -u $INSTALLUSERNAME 2>&1 | grep -q so-setup; then # Remove the automated setup script from crontab, if it exists logCmd "crontab -u $INSTALLUSERNAME -r" fi if grep -s -q so-setup /home/$INSTALLUSERNAME/.bash_profile; then # Truncate last line of the bash profile info "Removing auto-run of setup from bash profile" sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1 fi } disable_ipv6() { { info "Disabling ipv6" logCmd "sysctl -w net.ipv6.conf.all.disable_ipv6=1" logCmd "sysctl -w net.ipv6.conf.default.disable_ipv6=1" } >> "$setup_log" 2>&1 { echo "net.ipv6.conf.all.disable_ipv6 = 1" echo "net.ipv6.conf.default.disable_ipv6 = 1" echo "net.ipv6.conf.lo.disable_ipv6 = 1" } >> /etc/sysctl.conf } docker_registry() { title "Setting up Docker Registry" logCmd "mkdir -p /etc/docker" # This will get applied so docker can attempt to start if [ -z "$DOCKERNET" ]; then DOCKERNET=172.17.0.0 fi if [ -z "$DOCKERNET2" ]; then DOCKERNET2=172.17.1.0 fi # Make the host use the manager docker registry DNETBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 if [ -n "$TURBO" ]; then local proxy="$TURBO"; else local proxy="https://$MSRV"; fi printf '%s\n'\ "{"\ " \"registry-mirrors\": [ \"$proxy:5000\" ],"\ " \"bip\": \"$DNETBIP\","\ " \"default-address-pools\": ["\ " {"\ " \"base\" : \"$DOCKERNET/24\","\ " \"size\" : 24"\ " }"\ " ]"\ "}" > /etc/docker/daemon.json info "Docker Registry Setup - Complete" } docker_seed_update() { local name=$1 local percent_delta=1 ((docker_seed_update_percent+=percent_delta)) set_progress_str "$docker_seed_update_percent" "Downloading $name" } docker_seed_registry() { local VERSION="$SOVERSION" if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then if [ "$install_type" == 'IMPORT' ]; then container_list 'so-import' elif [ "$install_type" == 'HELIXSENSOR' ]; then container_list 'so-helix' else container_list fi docker_seed_update_percent=25 update_docker_containers 'netinstall' '' 'docker_seed_update' "$setup_log" else logCmd "tar xvf /nsm/docker-registry/docker/registry.tar -C /nsm/docker-registry/docker" logCmd "rm /nsm/docker-registry/docker/registry.tar" fi } elasticsearch_pillar() { title "Create Advanced File" logCmd "touch $adv_elasticsearch_pillar_file" # Create the Elasticsearch pillar printf '%s\n'\ "elasticsearch:"\ " config:"\ " cluster:"\ " name: securityonion"\ " routing:"\ " allocation:"\ " disk:"\ " threshold_enabled: true"\ " watermark:"\ " low: 80%"\ " high: 85%"\ " flood_stage: 90%"\ " script:"\ " max_compilations_rate: 20000/1m"\ " indices:"\ " query:"\ " bool:"\ " max_clause_count: 3500"\ " index_settings:"\ > $elasticsearch_pillar_file for INDEX in aws azure barracuda beats bluecoat cef checkpoint cisco cyberark cylance elasticsearch endgame f5 firewall fortinet gcp google_workspace ids imperva infoblox juniper kibana logstash microsoft misp netflow netscout o365 okta osquery proofpoint radware redis snort snyk sonicwall sophos strelka syslog tomcat zeek zscaler do printf '%s\n'\ " so-$INDEX:"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " index_sorting: False"\ " index_template:"\ " template:"\ " settings:"\ " index:"\ " mapping:"\ " total_fields:"\ " limit: 5000"\ " refresh_interval: 30s"\ " number_of_shards: 1"\ " number_of_replicas: 0" >> $elasticsearch_pillar_file done for INDEX in import do printf '%s\n'\ " so-$INDEX:"\ " warm: 7"\ " close: 73000"\ " delete: 73001"\ " index_sorting: False"\ " index_template:"\ " template:"\ " settings:"\ " index:"\ " mapping:"\ " total_fields:"\ " limit: 5000"\ " refresh_interval: 30s"\ " number_of_shards: 1"\ " number_of_replicas: 0" >> $elasticsearch_pillar_file done } es_heapsize() { title "Determine ES Heap Size" if [ "$total_mem" -lt 8000 ] ; then ES_HEAP_SIZE="600m" elif [ "$total_mem" -ge 100000 ]; then # Set a max of 25GB for heap size # https://www.elastic.co/guide/en/elasticsearch/guide/current/heap-sizing.html ES_HEAP_SIZE="25000m" else # Set heap size to 33% of available memory ES_HEAP_SIZE=$(( total_mem / 3 )) if [ "$ES_HEAP_SIZE" -ge 25001 ] ; then ES_HEAP_SIZE="25000m" else ES_HEAP_SIZE=$ES_HEAP_SIZE"m" fi fi export ES_HEAP_SIZE if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE|IMPORT)$ ]]; then NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE export NODE_ES_HEAP_SIZE fi } filter_unused_nics() { if [[ $MNIC ]]; then local grep_string="$MNIC\|bond0"; else local grep_string="bond0"; fi # If we call this function and NICs have already been assigned to the bond interface then add them to the grep search string if [[ $BNICS ]]; then grep_string="$grep_string" for BONDNIC in "${BNICS[@]}"; do grep_string="$grep_string\|$BONDNIC" done fi # Finally, set filtered_nics to any NICs we aren't using (and ignore interfaces that aren't of use) filtered_nics=$(ip link | awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "$grep_string" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g') readarray -t filtered_nics <<< "$filtered_nics" nic_list=() for nic in "${filtered_nics[@]}"; do local nic_mac=$(cat "/sys/class/net/${nic}/address" 2>/dev/null) case $(cat "/sys/class/net/${nic}/carrier" 2>/dev/null) in 1) nic_list+=("$nic" "$nic_mac Link UP " "OFF") ;; 0) nic_list+=("$nic" "$nic_mac Link DOWN " "OFF") ;; *) nic_list+=("$nic" "$nic_mac Link UNKNOWN " "OFF") ;; esac done export nic_list } # Generate Firewall Templates firewall_generate_templates() { title "Generate Firewall Template" local firewall_pillar_path=$local_salt_dir/salt/firewall logCmd "mkdir -p $firewall_pillar_path" logCmd "cp ../files/firewall/* /opt/so/saltstack/local/salt/firewall/" for i in analyst beats_endpoint endgame sensor manager minion elastic_agent_endpoint search_node; do $default_salt_dir/salt/common/tools/sbin/so-firewall includehost "$i" 127.0.0.1 done } generate_ca() { title "Generating the certificate authority" logCmd "salt-call state.apply ca -l info" info "Confirming existence of the CA certificate" logCmd "openssl x509 -in /etc/pki/ca.crt -noout -subject -issuer -dates" } generate_ssl() { # if the install type is a manager then we need to wait for the minion to be ready before trying # to run the ssl state since we need the minion to sign the certs if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then wait_for_salt_minion fi info "Applying SSL state" logCmd "salt-call state.apply ssl -l info" } generate_passwords(){ title "Generate Random Passwords" MYSQLPASS=$(get_random_value) PLAYBOOKDBPASS=$(get_random_value) PLAYBOOKADMINPASS=$(get_random_value) PLAYBOOKAUTOMATIONPASS=$(get_random_value) GRAFANAPASS=$(get_random_value) SENSORONIKEY=$(get_random_value) KRATOSKEY=$(get_random_value) } generate_interface_vars() { title "Setting the MTU to 9000 on all monitor NICS" MTU=9000 export MTU # Set interface variable if [[ $is_cloud ]]; then INTERFACE=${BNICS[0]} else INTERFACE='bond0' fi export INTERFACE } get_redirect() { whiptail_set_redirect if [ "$REDIRECTINFO" = "OTHER" ]; then collect_redirect_host fi } get_minion_type() { local minion_type case "$install_type" in 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER') minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]') ;; esac echo "$minion_type" } install_cleanup() { if [ -f "$temp_install_dir" ]; then info "Installer removing the following files:" logCmd "ls -lR $temp_install_dir" # Clean up after ourselves logCmd "rm -rf $temp_install_dir" fi # All cleanup prior to this statement must be compatible with automated testing. Cleanup # that will disrupt automated tests should be placed beneath this statement. [ -n "$TESTING" ] && return # If Mysql is running stop it if docker ps --format "{{.Names}}" 2>&1 | grep -q "so-mysql"; then logVmd "/usr/sbin/so-mysql-stop" fi if [[ $setup_type == 'iso' ]]; then info "Removing so-setup permission entry from sudoers file" logCmd "sed -i '/so-setup/d' /etc/sudoers" fi if [[ -z $SO_ERROR ]]; then info "Setup completed at $(date)" fi } import_registry_docker() { if [ -f /nsm/docker-registry/docker/registry_image.tar ]; then logCmd "service docker start" logCmd "docker load -i /nsm/docker-registry/docker/registry_image.tar" else info "Need to download registry" fi } idh_pillar() { touch $adv_idh_pillar_file title "Create the IDH Pillar" printf '%s\n'\ "idh:"\ " listen_on_mgnt_int: True"\ " services:"\ " - HTTP"\ " - FTP"\ " - MYSQL"\ " - MSSQL"\ " - VNC"\ " - SSH" > "$idh_pillar_file" } logstash_pillar() { # Create the logstash advanced pillar touch $adv_logstash_pillar_file title "Create the logstash pillar" printf '%s\n'\ "logstash_settings:"\ " ls_host: '$HOSTNAME'"\ " ls_pipeline_batch_size: 125"\ " ls_input_threads: 1"\ " lsheap: $NODE_LS_HEAP_SIZE"\ " ls_pipeline_workers: $num_cpu_cores"\ "" > "$logstash_pillar_file" } # Set Logstash heap size based on total memory ls_heapsize() { title "Setting Logstash heap size" if [ "$total_mem" -ge 32000 ]; then LS_HEAP_SIZE='1000m' return fi case "$install_type" in 'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE') LS_HEAP_SIZE='1000m' ;; 'EVAL') LS_HEAP_SIZE='700m' ;; *) LS_HEAP_SIZE='500m' ;; esac export LS_HEAP_SIZE if [[ "$install_type" =~ ^(EVAL|MANAGERSEARCH|STANDALONE)$ ]]; then NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE export NODE_LS_HEAP_SIZE fi } idstools_pillar() { title "Ading IDSTOOLS pillar options" touch $adv_idstools_pillar_file printf '%s\n'\ "idstools:"\ " config:"\ " ruleset: '$RULESETUP'"\ " oinkcode: '$OINKCODE'"\ " urls: []"\ " sids:"\ " enabled: []"\ " disabled: []"\ " modify: []"\ "" > "$idstools_pillar_file" } soc_pillar() { title "Creating the SOC pillar" touch $adv_soc_pillar_file touch $soc_pillar_file } telegraf_pillar() { title "Creating telegraf pillar" touch $adv_telegraf_pillar_file touch $telegraf_pillar_file } manager_pillar() { touch $adv_manager_pillar_file title "Create the manager pillar" printf '%s\n'\ "manager:"\ " proxy: '$so_proxy'"\ " no_proxy: '$no_proxy_string'"\ " elastalert: 1"\ " grafana: $GRAFANA"\ " playbook: $PLAYBOOK"\ "" > "$manager_pillar_file" } kratos_pillar() { title "Create the Kratos pillar file" touch $adv_kratos_pillar_file printf '%s\n'\ "kratos:"\ " config:"\ " secrets:"\ " default:"\ " - '$KRATOSKEY'"\ "" > "$kratos_pillar_file" } create_global() { title "Creating the global.sls" touch $adv_global_pillar_file if [ -z "$NODE_CHECKIN_INTERVAL_MS" ]; then NODE_CHECKIN_INTERVAL_MS=10000 if [ "$install_type" = 'EVAL' ] || [ "$install_type" = 'STANDALONE' ] || [ "$install_type" = 'IMPORT' ]; then NODE_CHECKIN_INTERVAL_MS=1000 fi fi if [ -z "$DOCKERNET" ]; then DOCKERNET=172.17.0.0 DOCKERNET2=172.17.1.0 DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 DOCKER2BIP=$(echo $DOCKERNET2 | awk -F'.' '{print $1,$2,$3,1}' OFS='.') else DOCKERBIP=$(echo $DOCKERNET | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24 DOCKER2BIP=$(echo $DOCKERNET2 | awk -F'.' '{print $1,$2,$3,1}' OFS='.') fi if [ -f "$global_pillar_file" ]; then rm $global_pillar_file fi # Create a global file for global values echo "global:" >> $global_pillar_file echo " soversion: '$SOVERSION'" >> $global_pillar_file echo " managerip: '$MAINIP'" >> $global_pillar_file echo " mdengine: 'ZEEK'" >> $global_pillar_file echo " ids: 'Suricata'" >> $global_pillar_file echo " url_base: '$REDIRECTIT'" >> $global_pillar_file if [[ $HIGHLANDER == 'True' ]]; then echo " highlander: True" >> $global_pillar_file fi if [[ $is_airgap ]]; then echo " airgap: True" >> $global_pillar_file else echo " airgap: False" >> $global_pillar_file fi # Continue adding other details echo " imagerepo: '$IMAGEREPO'" >> $global_pillar_file echo " pipeline: 'redis'" >> $global_pillar_file echo " repo_host: '$MAINIP'" >> $global_pillar_file echo " registry_host: '$MAINIP'" >> $global_pillar_file echo " endgamehost: '$ENDGAMEHOST'" >> $global_pillar_file } create_sensoroni_pillar() { title "Create the sensoroni pillar file" touch $adv_sensoroni_pillar_file printf '%s\n'\ "sensoroni:"\ " node_checkin_interval_ms: $NODE_CHECKIN_INTERVAL_MS"\ " sensoronikey: '$SENSORONIKEY'"\ " soc_host: '$REDIRECTIT'" > $sensoroni_pillar_file } create_strelka_pillar() { title "Create the Strelka pillar file" touch $adv_strelka_pillar_file printf '%s\n'\ "strelka:"\ " enabled: $STRELKA"\ " rules: 1" > "$strelka_pillar_file" if [[ $is_airgap ]]; then printf '%s\n'\ " repos:"\ " - 'https://$HOSTNAME/repo/rules/strelka'" >> "$strelka_pillar_file" else printf '%s\n'\ " repos:"\ " - 'https://github.com/Neo23x0/signature-base'" >> "$strelka_pillar_file" fi } backup_pillar() { title "Create the backup pillar file" touch $adv_backup_pillar_file printf '%s\n'\ "backup:"\ " locations: []" > "$backup_pillar_file" } soctopus_pillar() { title "Create the soctopus pillar file" touch $adv_soctopus_pillar_file printf '%s\n'\ "soctopus:"\ " playbook:"\ " rulesets:"\ " - windows" > "$soctopus_pillar_file" } docker_pillar() { title "Create the docker pillar file" touch $adv_docker_pillar_file printf '%s\n'\ "docker:"\ " sosrange: '$DOCKERNET2/24'"\ " sosbip: '$DOCKER2BIP'"\ " range: '$DOCKERNET/24'"\ " bip: '$DOCKERBIP'" > $docker_pillar_file } redis_pillar() { title "Create the redis pillar file" touch $adv_redis_pillar_file touch $redis_pillar_file } influxdb_pillar() { title "Create the influxdb pillar file" touch $adv_influxdb_pillar_file touch $influxdb_pillar_file } mark_version() { title "Marking the current version" echo "$SOVERSION" > /etc/soversion } network_init() { title "Initializing Network" disable_ipv6 set_hostname if [[ ( $is_iso || $is_analyst_iso ) ]]; then set_management_interface fi } network_init_whiptail() { case "$setup_type" in 'iso') whiptail_management_nic whiptail_dhcp_or_static if [ "$address_type" != 'DHCP' ]; then collect_int_ip_mask collect_gateway collect_dns collect_dns_domain fi ;; 'network') whiptail_network_notice whiptail_dhcp_warn whiptail_management_nic ;; esac } networking_needful() { [[ -f $net_init_file ]] && whiptail_net_reinit && reinit_networking=true if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then collect_hostname fi [[ ! ( $is_eval || $is_import ) ]] && whiptail_node_description if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then network_init_whiptail else source "$net_init_file" fi if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then network_init fi set_main_ip compare_main_nic_ip } network_setup() { info "Finishing up network setup" info "... Copying 99-so-checksum-offload-disable" logCmd "cp ./install_scripts/99-so-checksum-offload-disable /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable" info "... Modifying 99-so-checksum-offload-disable"; logCmd "sed -i '/\$MNIC/${INTERFACE}/g' /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable" } ntp_pillar_entries() { local pillar_file=$local_salt_dir/pillar/minions/$MINION_ID.sls if [[ ${#ntp_servers[@]} -gt 0 ]]; then printf '%s\n'\ "ntp:"\ " servers:" > "$pillar_file" for addr in "${ntp_servers[@]}"; do printf '%s\n' " - '$addr'" >> "$pillar_file" done fi } parse_install_username() { # parse out the install username so things copy correctly INSTALLUSERNAME=${SUDO_USER:-${USER}} } patch_pillar() { title "Create the patch pillar file" local pillar_file=$local_salt_dir/pillar/minions/$MINION_ID.sls if [[ $MANAGERUPDATES == 1 ]]; then local source="manager" else local source="direct" fi printf '%s\n'\ "patch:"\ " os:"\ " source: '$source'"\ " schedule_name: '$PATCHSCHEDULENAME'"\ " enabled: True"\ " splay: 300"\ "" > "$pillar_file" } patch_schedule_os_new() { title "Create the patch schedule" local OSPATCHSCHEDULEDIR="$temp_install_dir/salt/patch/os/schedules" local OSPATCHSCHEDULE="$OSPATCHSCHEDULEDIR/$PATCHSCHEDULENAME.yml" logCmd "mkdir -p $OSPATCHSCHEDULEDIR" printf '%s\n'\ "patch:"\ " os:"\ " schedule:"> "$OSPATCHSCHEDULE" for psd in "${PATCHSCHEDULEDAYS[@]}";do psd="${psd//\"/}" echo " - $psd:" >> "$OSPATCHSCHEDULE" for psh in "${PATCHSCHEDULEHOURS[@]}" do psh="${psh//\"/}" echo " - '$psh'" >> "$OSPATCHSCHEDULE" done done } print_salt_state_apply() { local state=$1 info "Applying $state Salt state" } process_installtype() { if [ "$install_type" = 'EVAL' ]; then is_eval=true STRELKARULES=1 elif [ "$install_type" = 'STANDALONE' ]; then is_standalone=true elif [ "$install_type" = 'MANAGERSEARCH' ]; then is_managersearch=true elif [ "$install_type" = 'MANAGER' ]; then is_manager=true elif [ "$install_type" = 'SENSOR' ]; then is_sensor=true elif [ "$install_type" = 'SEARCHNODE' ]; then is_searchnode=true elif [ "$install_type" = 'HEAVYNODE' ]; then is_heavy=true elif [ "$install_type" = 'FLEET' ]; then is_fleet=true elif [ "$install_type" = 'IDH' ]; then is_idh=true elif [ "$install_type" = 'IMPORT' ]; then is_import=true elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true elif [ "$install_type" = 'ANALYST' ]; then if [ "$setup_type" != 'analyst' ]; then exec bash so-setup analyst fi fi } proxy_validate() { info "Testing proxy..." local test_url="https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS" proxy_test_err=$(curl -sS "$test_url" --proxy "$so_proxy" --connect-timeout 5 2>&1) # set short connection timeout so user doesn't sit waiting for proxy test to timeout local ret=$? if [[ $ret != 0 ]]; then error "Could not reach $test_url using proxy provided" error "Received error: $proxy_test_err" if [[ -n $TESTING ]]; then error "Exiting setup" kill -SIGINT "$(ps --pid $$ -oppid=)"; exit 1 fi fi return $ret } reserve_group_ids() { # This is a hack to fix CentOS from taking group IDs that we need logCmd "groupadd -g 928 kratos" logCmd "groupadd -g 930 elasticsearch" logCmd "groupadd -g 931 logstash" logCmd "groupadd -g 932 kibana" logCmd "groupadd -g 933 elastalert" logCmd "groupadd -g 934 curator" logCmd "groupadd -g 937 zeek" logCmd "groupadd -g 940 suricata" logCmd "groupadd -g 941 stenographer" logCmd "groupadd -g 945 ossec" logCmd "groupadd -g 946 cyberchef" } reserve_ports() { # These are also set via salt but need to be set pre-install to avoid conflicts before salt runs if ! sysctl net.ipv4.ip_local_reserved_ports | grep 55000 | grep 57314; then info "Reserving ephemeral ports used by Security Onion components to avoid collisions" sysctl -w net.ipv4.ip_local_reserved_ports="55000,57314" else info "Ephemeral ports already reserved" fi } reinstall_init() { info "Putting system in state to run setup again" if [[ $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE|FLEET|IMPORT)$ ]]; then local salt_services=( "salt-master" "salt-minion" ) else local salt_services=( "salt-minion" ) fi local service_retry_count=20 { # remove all of root's cronjobs logCmd "crontab -r -u root" if command -v salt-call &> /dev/null && grep -q "master:" /etc/salt/minion 2> /dev/null; then # Disable schedule so highstate doesn't start running during the install salt-call -l info schedule.disable --local # Kill any currently running salt jobs, also to prevent issues with highstate. salt-call -l info saltutil.kill_all_jobs --local fi # Kill any salt processes (safely) for service in "${salt_services[@]}"; do # Stop the service in the background so we can exit after a certain amount of time systemctl stop "$service" & local pid=$! local count=0 while check_service_status "$service"; do if [[ $count -gt $service_retry_count ]]; then info "Could not stop $service after 1 minute, exiting setup." # Stop the systemctl process trying to kill the service, show user a message, then exit setup kill -9 $pid exit 1 fi sleep 5 ((count++)) done done logCmd "salt-call state.apply ca.remove -linfo --local --file-root=../salt" logCmd "salt-call state.apply ssl.remove -linfo --local --file-root=../salt" # Remove all salt configs rm -rf /etc/salt/engines/* /etc/salt/grains /etc/salt/master /etc/salt/master.d/* /etc/salt/minion /etc/salt/minion.d/* /etc/salt/pki/* /etc/salt/proxy /etc/salt/proxy.d/* /var/cache/salt/ if command -v docker &> /dev/null; then # Stop and remove all so-* containers so files can be changed with more safety if [[ $(docker ps -a -q --filter "name=so-" | wc -l) -gt 0 ]]; then docker stop $(docker ps -a -q --filter "name=so-") docker rm -f $(docker ps -a -q --filter "name=so-") fi fi local date_string date_string=$(date +%s) # Backup /opt/so since we'll be rebuilding this directory during setup backup_dir /opt/so "$date_string" # We need to restore these files during a reinstall so python3-influxdb state doesn't try to patch again restore_file "/opt/so_old_$date_string/state/influxdb_continuous_query.py.patched" "/opt/so/state/" restore_file "/opt/so_old_$date_string/state/influxdb_retention_policy.py.patched" "/opt/so/state/" restore_file "/opt/so_old_$date_string/state/influxdbmod.py.patched" "/opt/so/state/" # If the elastic license has been accepted restore the state file restore_file "/opt/so_old_$date_string/state/yeselastic.txt" "/opt/so/state/" # Backup (and erase) directories in /nsm to prevent app errors backup_dir /nsm/mysql "$date_string" backup_dir /nsm/kratos "$date_string" # Remove the old launcher package in case the config changes remove_package launcher-final if [[ $OS == 'ubuntu' ]]; then info "Unholding previously held packages." apt-mark unhold $(apt-mark showhold) fi } >> "$setup_log" 2>&1 info "System reinstall init has been completed." } reset_proxy() { [[ -f /etc/profile.d/so-proxy.sh ]] && rm -f /etc/profile.d/so-proxy.sh [[ -f /etc/systemd/system/docker.service.d/http-proxy.conf ]] && rm -f /etc/systemd/system/docker.service.d/http-proxy.conf systemctl daemon-reload command -v docker &> /dev/null && info "Restarting Docker..." && logCmd "systemctl restart docker" [[ -f /root/.docker/config.json ]] && rm -f /root/.docker/config.json [[ -f /etc/gitconfig ]] && rm -f /etc/gitconfig if [[ $is_centos ]]; then sed -i "/proxy=/d" /etc/yum.conf else [[ -f /etc/apt/apt.conf.d/00-proxy.conf ]] && rm -f /etc/apt/apt.conf.d/00-proxy.conf fi } restore_file() { src=$1 dst=$2 if [ -f "$src" ]; then [ ! -d "$dst" ] && mkdir -v -p "$dst" info "Restoring $src to $dst." cp -v "$src" "$dst" >> "$setup_log" 2>&1 fi } backup_dir() { dir=$1 backup_suffix=$2 if [[ -d $dir ]]; then mv "$dir" "${dir}_old_${backup_suffix}" fi } drop_install_options() { # Drop the install Variable echo "MAINIP=$MAINIP" > /opt/so/install.txt echo "MNIC=$MNIC" >> /opt/so/install.txt echo "NODE_DESCRIPTION='$NODE_DESCRIPTION'" >> /opt/so/install.txt echo "ES_HEAP_SIZE=$ES_HEAP_SIZE" >> /opt/so/install.txt echo "PATCHSCHEDULENAME=$PATCHSCHEDULENAME" >> /opt/so/install.txt echo "INTERFACE=$INTERFACE" >> /opt/so/install.txt NODETYPE=${install_type^^} echo "NODETYPE=$NODETYPE" >> /opt/so/install.txt echo "CORECOUNT=$lb_procs" >> /opt/so/install.txt } remove_package() { local package_name=$1 if [[ $is_centos ]]; then if rpm -qa | grep -q "$package_name"; then logCmd "yum remove -y $package_name" fi else if dpkg -l | grep -q "$package_name"; then retry 150 10 "apt purge -y \"$package_name\"" fi fi } # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and salt/salt/master.defaults.yaml and salt/salt/minion.defaults.yaml # CAUTION! SALT VERSION UDDATES - READ BELOW # When updating the salt version, also update the version in: # - securityonion-builds/iso-resources/build.sh # - securityonion-builds/iso-resources/packages.lst # - securityonion/salt/salt/master.defaults.yaml # - securityonion/salt/salt/minion.defaults.yaml securityonion_repo() { # Remove all the current repos if [[ $is_centos ]]; then if [[ $waitforstate ]]; then # Build the repo locally so we can use it echo "Syncing Repo" repo_sync_local fi logCmd "yum -v clean all" logCmd "mkdir -vp /root/oldrepos" logCmd "mv -v /etc/yum.repos.d/* /root/oldrepos/" logCmd "ls -la /etc/yum.repos.d/" if [[ ! $waitforstate ]]; then echo "[securityonion]" > /etc/yum.repos.d/securityonion.repo echo "name=Security Onion Repo" >> /etc/yum.repos.d/securityonion.repo echo "baseurl=https://$MSRV/repo" >> /etc/yum.repos.d/securityonion.repo echo "enabled=1" >> /etc/yum.repos.d/securityonion.repo echo "gpgcheck=1" >> /etc/yum.repos.d/securityonion.repo echo "sslverify=0" >> /etc/yum.repos.d/securityonion.repo else echo "[securityonion]" > /etc/yum.repos.d/securityonion.repo echo "name=Security Onion Repo" >> /etc/yum.repos.d/securityonion.repo echo "baseurl=file:///nsm/repo/" >> /etc/yum.repos.d/securityonion.repo echo "enabled=1" >> /etc/yum.repos.d/securityonion.repo echo "gpgcheck=1" >> /etc/yum.repos.d/securityonion.repo fi # need to yum clean all before repo conf files are removed or clean,cleans nothing logCmd "yum repolist all" # update this package because the repo config files get added back # if the package is updated when the update_packages function is called logCmd "yum -v -y update centos-release" info "Backing up the .repo files that were added by the centos-release package." logCmd "find /etc/yum.repos.d/ -type f -not -name 'securityonion*repo' -exec mv -bvf {} /root/oldrepos/ \;" logCmd "yum repolist all" fi } repo_sync_local() { # Sync the repo from the the SO repo locally. # Check for reposync info "Backing up old repos" mkdir -p /nsm/repo mkdir -p /root/reposync_cache echo "[main]" > /root/repodownload.conf echo "cachedir=/root/reposync_cache" >> /root/repodownload.conf echo "keepcache=0" >> /root/repodownload.conf echo "debuglevel=2" >> /root/repodownload.conf echo "logfile=/var/log/yum.log" >> /root/repodownload.conf echo "exactarch=1" >> /root/repodownload.conf echo "obsoletes=1" >> /root/repodownload.conf echo "gpgcheck=1" >> /root/repodownload.conf echo "plugins=1" >> /root/repodownload.conf echo "installonly_limit=2" >> /root/repodownload.conf echo "bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum" >> /root/repodownload.conf echo "distroverpkg=centos-release" >> /root/repodownload.conf echo "clean_requirements_on_remove=1" >> /root/repodownload.conf echo "[securityonionsync]" >> /root/repodownload.conf echo "name=Security Onion Repo repo" >> /root/repodownload.conf echo "baseurl=https://repo.securityonion.net/file/securityonion-repo/c7so/" >> /root/repodownload.conf echo "enabled=1" >> /root/repodownload.conf echo "gpgcheck=1" >> /root/repodownload.conf echo "gpgkey=https://repo.securityonion.net/file/securityonion-repo/keys/securityonion.pub" >> /root/repodownload.conf REPOSYNC=$(rpm -qa | grep createrepo | wc -l) if [[ ! "$REPOSYNC" -gt 0 ]]; then # Install reposync info "Installing createrepo" logCmd "yum -y install -c /root/repodownload.conf yum-utils createrepo" else info "We have what we need to sync" fi logCmd "reposync --norepopath -n -g -l -d -m -c /root/repodownload.conf -r securityonionsync --download-metadata -p /nsm/repo/" # After the download is complete run createrepo create_repo } saltify() { if [[ $is_centos ]]; then RUSALTY=$(rpm -qa | grep salt-minion | wc -l) if [[ "$RUSALTY" -gt 0 ]]; then # Salt is already installed. info "salt is installed" else # Install salt if [[ $waitforstate ]]; then # Since this is a salt master so let's install it logCmd "yum -y install salt-minion salt-master" else # We just need the minion logCmd "yum -y install salt-minion" fi fi fi } # Run a salt command to generate the minion key salt_firstcheckin() { salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput } # Create an secrets pillar so that passwords survive re-install secrets_pillar(){ if [ ! -f $local_salt_dir/pillar/secrets.sls ]; then info "Creating Secrets Pillar" mkdir -p $local_salt_dir/pillar printf '%s\n'\ "secrets:"\ " mysql: $MYSQLPASS"\ " playbook_db: $PLAYBOOKDBPASS"\ " playbook_admin: $PLAYBOOKADMINPASS"\ " playbook_automation: $PLAYBOOKAUTOMATIONPASS"\ " playbook_automation_api_key: "\ " grafana_admin: $GRAFANAPASS" > $local_salt_dir/pillar/secrets.sls fi } set_network_dev_status_list() { readarray -t nmcli_dev_status_list <<< "$(nmcli -t -f DEVICE,STATE -c no dev status)" export nmcli_dev_status_list } set_main_ip() { local count=0 local progress='.' local c=0 local m=3.3 local max_attempts=30 info "Gathering the management IP. " while ! valid_ip4 "$MAINIP" || ! valid_ip4 "$MNIC_IP"; do MAINIP=$(ip route get 1 | awk '{print $7;exit}') MNIC_IP=$(ip a s "$MNIC" | grep -oE 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2) ((count=count+1)) p=$(awk -vp=$m -vq=$count 'BEGIN{printf "%.0f" ,p * q}') printf "%-*s" $((count+1)) '[' | tr ' ' '#' printf "%*s%3d%%\r" $((max_attempts-count)) "]" "$p" if [ $count = $max_attempts ]; then info "ERROR: Could not determine MAINIP or MNIC_IP." info "MAINIP=$MAINIP" info "MNIC_IP=$MNIC_IP" whiptail_error_message "The management IP could not be determined. Please check the log at /root/sosetup.log and verify the network configuration. Select OK to exit." exit 1 fi sleep 1 done } # Add /usr/sbin to everyone's path set_path() { echo "complete -cf sudo" >> /etc/profile.d/securityonion.sh } set_minion_info() { short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}') if [[ $is_analyst ]]; then MINION_ID=$(echo "${short_name}_workstation" | tr '[:upper:]' '[:lower:]') fi if [[ ! $is_analyst ]]; then MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]') fi export MINION_ID info "MINION_ID = $MINION_ID" minion_type=$(get_minion_type) } set_proxy() { # Don't proxy localhost, local ip, and management ip no_proxy_string="localhost, 127.0.0.1, ${MAINIP}, ${HOSTNAME}" if [[ -n $MSRV ]] && [[ -n $MSRVIP ]];then no_proxy_string="${no_proxy_string}, ${MSRVIP}, ${MSRV}" fi # Set proxy environment variables used by curl, wget, docker, and others { echo "export use_proxy=on" echo "export http_proxy=\"${so_proxy}\"" echo "export https_proxy=\"\$http_proxy\"" echo "export ftp_proxy=\"\$http_proxy\"" echo "export no_proxy=\"${no_proxy_string}\"" } > /etc/profile.d/so-proxy.sh source /etc/profile.d/so-proxy.sh [[ -d '/etc/systemd/system/docker.service.d' ]] || mkdir -p /etc/systemd/system/docker.service.d # Create proxy config for dockerd printf '%s\n'\ "[Service]"\ "Environment=\"HTTP_PROXY=${so_proxy}\""\ "Environment=\"HTTPS_PROXY=${so_proxy}\""\ "Environment=\"NO_PROXY=${no_proxy_string}\"" > /etc/systemd/system/docker.service.d/http-proxy.conf systemctl daemon-reload command -v docker &> /dev/null && systemctl restart docker # Create config.json for docker containers [[ -d /root/.docker ]] || mkdir /root/.docker printf '%s\n'\ "{"\ " \"proxies\":"\ " {"\ " \"default\":"\ " {"\ " \"httpProxy\":\"${so_proxy}\","\ " \"httpsProxy\":\"${so_proxy}\","\ " \"ftpProxy\":\"${so_proxy}\","\ " \"noProxy\":\"${no_proxy_string}\""\ " }"\ " }"\ "}" > /root/.docker/config.json # Set proxy for package manager if [[ $is_centos ]]; then echo "proxy=$so_proxy" >> /etc/yum.conf else # Set it up so the updates roll through the manager printf '%s\n'\ "Acquire::http::Proxy \"$so_proxy\";"\ "Acquire::https::Proxy \"$so_proxy\";" > /etc/apt/apt.conf.d/00-proxy.conf fi # Set global git proxy printf '%s\n'\ "[http]"\ " proxy = ${so_proxy}" > /etc/gitconfig } setup_salt_master_dirs() { # Create salt master directories mkdir -p $default_salt_dir/pillar mkdir -p $default_salt_dir/salt mkdir -p $local_salt_dir/pillar mkdir -p $local_salt_dir/salt # Copy over the salt code and templates if [ "$setup_type" = 'iso' ]; then logCmd "rsync -avh --exclude 'TRANS.TBL' /home/$INSTALLUSERNAME/SecurityOnion/pillar/* $default_salt_dir/pillar/" logCmd "rsync -avh --exclude 'TRANS.TBL' /home/$INSTALLUSERNAME/SecurityOnion/salt/* $default_salt_dir/salt/" logCmd "mkdir -p $local_salt_dir/salt/zeek/policy/intel" logCmd "cp -Rv /home/$INSTALLUSERNAME/SecurityOnion/files/intel.dat $local_salt_dir/salt/zeek/policy/intel/" else logCmd "cp -Rv ../pillar/* $default_salt_dir/pillar/" logCmd "cp -Rv ../salt/* $default_salt_dir/salt/" logCmd "mkdir -p $local_salt_dir/salt/zeek/policy/intel" logCmd "cp -Rv files/intel.dat $local_salt_dir/salt/zeek/policy/intel/" fi info "Chown the salt dirs on the manager for socore" logCmd "chown -R socore:socore /opt/so" } set_progress_str() { local percentage_input=$1 progress_bar_text=$2 export progress_bar_text local nolog=$2 if (( "$percentage_input" >= "$percentage" )); then percentage="$percentage_input" fi percentage_str="XXX\n${percentage}\n${progress_bar_text}\nXXX" echo -e "$percentage_str" if [[ -z $nolog ]]; then info "Progressing ($percentage%): $progress_bar_text" # printf '%s\n' \ # '----'\ # "$percentage% - ${progress_bar_text^^}"\ # "----" >> "$setup_log" 2>&1 fi } set_default_log_size() { local percentage case $install_type in STANDALONE | EVAL | HEAVYNODE) percentage=50 ;; *) percentage=80 ;; esac local disk_dir="/" if [ -d /nsm ]; then disk_dir="/nsm" fi if [ -d /nsm/elasticsearch ]; then disk_dir="/nsm/elasticsearch" fi local disk_size_1k disk_size_1k=$(df $disk_dir | grep -v "^Filesystem" | awk '{print $2}') local ratio="1048576" local disk_size_gb disk_size_gb=$( echo "$disk_size_1k" "$ratio" | awk '{print($1/$2)}' ) log_size_limit=$( echo "$disk_size_gb" "$percentage" | awk '{printf("%.0f", $1 * ($2/100))}') } set_hostname() { logCmd "hostnamectl set-hostname --static $HOSTNAME" echo "127.0.0.1 $HOSTNAME $HOSTNAME.localdomain localhost localhost.localdomain localhost4 localhost4.localdomain" > /etc/hosts echo "::1 $HOSTNAME $HOSTNAME.localdomain localhost localhost.localdomain localhost6 localhost6.localdomain6" >> /etc/hosts echo "$HOSTNAME" > /etc/hostname logCmd "hostname -F /etc/hostname" } set_initial_firewall_policy() { title "Setting Initial Firewall Policy" if [ -f $default_salt_dir/salt/common/tools/sbin/so-firewall ]; then chmod +x $default_salt_dir/salt/common/tools/sbin/so-firewall; fi case "$install_type" in 'MANAGER') $default_salt_dir/salt/common/tools/sbin/so-firewall --role=manager --ip=$MAINIP --apply=true ;; 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') $default_salt_dir/salt/common/tools/sbin/so-firewall --role=$install_type --ip=$MAINIP --apply=true ;; esac } set_initial_firewall_access() { if [[ ! -z "$ALLOW_CIDR" ]]; then $default_salt_dir/salt/common/tools/sbin/so-firewall --role=analyst --ip=$ALLOW_CIDR --apply=true fi } # Set up the management interface on the ISO set_management_interface() { title "Setting up the main interface" if [ "$address_type" = 'DHCP' ]; then logCmd "nmcli con mod '$MNIC' connection.autoconnect yes" logCmd "nmcli con up '$MNIC'" else # Set Static IP nmcli con mod "$MNIC" ipv4.addresses "$MIP"/"$MMASK"\ ipv4.gateway "$MGATEWAY" \ ipv4.dns "$MDNS"\ ipv4.dns-search "$MSEARCH"\ connection.autoconnect yes\ ipv4.method manual >> "$setup_log" 2>&1 nmcli con up "$MNIC" >> "$setup_log" 2>&1 fi } set_redirect() { title "Setting redirect host" case $REDIRECTINFO in 'IP') REDIRECTIT="$MAINIP" ;; 'HOSTNAME') REDIRECTIT="$HOSTNAME" ;; *) REDIRECTIT="$REDIRECTHOST" ;; esac } so_add_user() { local username=$1 local uid=$2 local gid=$3 local home_dir=$4 if [ "$5" ]; then local pass=$5; fi info "Add $username user" logCmd "groupadd --gid '$gid' '$username'" logCmd "useradd -m --uid '$uid' --gid '$gid' --home-dir '$home_dir' '$username'" # If a password has been passed in, set the password if [ "$pass" ]; then echo "$username":"$pass" | chpasswd --crypt-method=SHA512 fi } update_sudoers_for_testing() { if [ -n "$TESTING" ]; then info "Ensuring $INSTALLUSERNAME has password-less sudo access for automated testing purposes." sed -i "s/^$INSTALLUSERNAME ALL=(ALL) ALL/$INSTALLUSERNAME ALL=(ALL) NOPASSWD: ALL/" /etc/sudoers fi } update_sudoers() { if ! grep -qE '^soremote\ ALL=\(ALL\)\ NOPASSWD:(\/usr\/bin\/salt\-key|\/opt\/so\/saltstack)' /etc/sudoers; then # Update Sudoers so that soremote can accept keys without a password echo "soremote ALL=(ALL) NOPASSWD:/usr/bin/salt-key" | tee -a /etc/sudoers echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/common/tools/sbin/so-firewall" | tee -a /etc/sudoers echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/pillar/data/addtotab.sh" | tee -a /etc/sudoers echo "soremote ALL=(ALL) NOPASSWD:$default_salt_dir/salt/manager/files/add_minion.sh" | tee -a /etc/sudoers else info "User soremote already granted sudo privileges" fi } update_packages() { if [[ $is_centos ]]; then logCmd "yum repolist" logCmd "yum -y update --exclude=salt*,wazuh*,docker*,containerd*" logCmd "yum -y install yum-utils" else retry 150 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1 retry 150 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1 fi } # This is used for development to speed up network install tests. use_turbo_proxy() { if [[ ! $install_type =~ ^(MANAGER|EVAL|HELIXSENSOR|MANAGERSEARCH|STANDALONE)$ ]]; then info "turbo is not supported on this install type" return fi if [[ $OS == 'centos' ]]; then printf '%s\n' "proxy=${TURBO}:3142" >> /etc/yum.conf else printf '%s\n'\ "Acquire {"\ " HTTP::proxy \"${TURBO}:3142\";"\ " HTTPS::proxy \"${TURBO}:3142\";"\ "}" > /etc/apt/apt.conf.d/proxy.conf fi } wait_for_file() { local filename=$1 local max_attempts=$2 # this is multiplied by the wait interval, so make sure it isn't too large local cur_attempts=0 local wait_interval=$3 local total_time=$(( max_attempts * wait_interval )) local date date=$(date) while [[ $cur_attempts -lt $max_attempts ]]; do if [ -f "$filename" ]; then info "File $filename found at $date" return 0 else ((cur_attempts++)) info "File $filename does not exist; waiting ${wait_interval}s then checking again ($cur_attempts/$max_attempts)..." sleep "$wait_interval" fi done info "Could not find $filename after waiting ${total_time}s" return 1 } wait_for_salt_minion() { retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || exit 1 }