This commit is contained in:
m0duspwnens
2021-01-04 14:10:41 -05:00
21 changed files with 504 additions and 277 deletions

View File

@@ -17,8 +17,8 @@
# Check for prerequisites # Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!" echo "This script must be run using sudo!"
exit 1 exit 1
fi fi
# Define a banner to separate sections # Define a banner to separate sections
@@ -26,46 +26,46 @@ banner="========================================================================
header() { header() {
echo echo
printf '%s\n' "$banner" "$*" "$banner" printf '%s\n' "$banner" "$*" "$banner"
} }
lookup_salt_value() { lookup_salt_value() {
key=$1 key=$1
group=$2 group=$2
kind=$3 kind=$3
if [ -z "$kind" ]; then if [ -z "$kind" ]; then
kind=pillar kind=pillar
fi fi
if [ -n "$group" ]; then if [ -n "$group" ]; then
group=${group}: group=${group}:
fi fi
salt-call --no-color ${kind}.get ${group}${key} --out=newline_values_only salt-call --no-color ${kind}.get ${group}${key} --out=newline_values_only
} }
lookup_pillar() { lookup_pillar() {
key=$1 key=$1
pillar=$2 pillar=$2
if [ -z "$pillar" ]; then if [ -z "$pillar" ]; then
pillar=global pillar=global
fi fi
lookup_salt_value "$key" "$pillar" "pillar" lookup_salt_value "$key" "$pillar" "pillar"
} }
lookup_pillar_secret() { lookup_pillar_secret() {
lookup_pillar "$1" "secrets" lookup_pillar "$1" "secrets"
} }
lookup_grain() { lookup_grain() {
lookup_salt_value "$1" "" "grains" lookup_salt_value "$1" "" "grains"
} }
lookup_role() { lookup_role() {
id=$(lookup_grain id) id=$(lookup_grain id)
pieces=($(echo $id | tr '_' ' ')) pieces=($(echo $id | tr '_' ' '))
echo ${pieces[1]} echo ${pieces[1]}
} }
check_container() { check_container() {
@@ -74,98 +74,143 @@ check_container() {
} }
check_password() { check_password() {
local password=$1 local password=$1
echo "$password" | egrep -v "'|\"|\\$|\\\\" > /dev/null 2>&1 echo "$password" | egrep -v "'|\"|\\$|\\\\" > /dev/null 2>&1
return $? return $?
} }
set_os() { set_os() {
if [ -f /etc/redhat-release ]; then if [ -f /etc/redhat-release ]; then
OS=centos OS=centos
else else
OS=ubuntu OS=ubuntu
fi fi
} }
set_minionid() { set_minionid() {
MINIONID=$(lookup_grain id) MINIONID=$(lookup_grain id)
} }
set_version() { set_version() {
CURRENTVERSION=0.0.0 CURRENTVERSION=0.0.0
if [ -f /etc/soversion ]; then if [ -f /etc/soversion ]; then
CURRENTVERSION=$(cat /etc/soversion) CURRENTVERSION=$(cat /etc/soversion)
fi fi
if [ -z "$VERSION" ]; then if [ -z "$VERSION" ]; then
if [ -z "$NEWVERSION" ]; then if [ -z "$NEWVERSION" ]; then
if [ "$CURRENTVERSION" == "0.0.0" ]; then if [ "$CURRENTVERSION" == "0.0.0" ]; then
echo "ERROR: Unable to detect Security Onion version; terminating script." echo "ERROR: Unable to detect Security Onion version; terminating script."
exit 1 exit 1
else else
VERSION=$CURRENTVERSION VERSION=$CURRENTVERSION
fi fi
else else
VERSION="$NEWVERSION" VERSION="$NEWVERSION"
fi fi
fi fi
} }
require_manager() { require_manager() {
# Check to see if this is a manager # Check to see if this is a manager
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}') MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ] || [ $MANAGERCHECK == 'so-import' ]; then if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ] || [ $MANAGERCHECK == 'so-import' ]; then
echo "This is a manager, We can proceed." echo "This is a manager, We can proceed."
else else
echo "Please run this command on the manager; the manager controls the grid." echo "Please run this command on the manager; the manager controls the grid."
exit 1 exit 1
fi fi
} }
is_single_node_grid() { is_single_node_grid() {
role=$(lookup_role) role=$(lookup_role)
if [ "$role" != "eval" ] && [ "$role" != "standalone" ] && [ "$role" != "import" ]; then if [ "$role" != "eval" ] && [ "$role" != "standalone" ] && [ "$role" != "import" ]; then
return 1 return 1
fi fi
return 0 return 0
} }
fail() { fail() {
msg=$1 msg=$1
echo "ERROR: $msg" echo "ERROR: $msg"
echo "Exiting." echo "Exiting."
exit 1 exit 1
} }
get_random_value() { get_random_value() {
length=${1:-20} length=${1:-20}
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1 head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
wait_for_apt() {
local progress_callback=$1
local retry_count=30
local retry_timeout='10s'
local lock_msg="Could not acquire dpkg lock, waiting $retry_timeout for lock to release."
if [[ -z $progress_callback ]]; then
if [[ -z $progress_bar_text ]]; then
local old_text="Installing..."
else
local old_text="$progress_bar_text"
fi
fi
local count=0
while [[ "$count" -lt "$retry_count" ]]; do
((count++))
[[ -z $progress_callback ]] && echo "Attempting to acquire dpkg lock to run apt command... (Attempt $count/$retry_count)"
if __check_apt_lock; then
if [[ -z $progress_callback ]]; then
echo " $lock_msg" | tee -a "$setup_log"
else
$progress_callback "Could not acquire dpkg lock, waiting $retry_timeout ($count/$retry_count)"
fi
else
[[ -z $progress_callback ]] || $progress_callback "$old_text"
return 0
fi
sleep "$retry_timeout"
done
if __check_apt_lock; then
[[ -z $progress_callback ]] && echo "Could not acquire lock after $retry_count attempts, aborting."
return 1
else
return 0
fi
}
__check_apt_lock() {
lsof /var/lib/dpkg/lock &> /dev/null
local lock=$?
return $lock
} }
wait_for_web_response() { wait_for_web_response() {
url=$1 url=$1
expected=$2 expected=$2
maxAttempts=${3:-300} maxAttempts=${3:-300}
logfile=/root/wait_for_web_response.log logfile=/root/wait_for_web_response.log
attempt=0 attempt=0
while [[ $attempt -lt $maxAttempts ]]; do while [[ $attempt -lt $maxAttempts ]]; do
attempt=$((attempt+1)) attempt=$((attempt+1))
echo "Waiting for value '$expected' at '$url' ($attempt/$maxAttempts)" echo "Waiting for value '$expected' at '$url' ($attempt/$maxAttempts)"
result=$(curl -ks -L $url) result=$(curl -ks -L $url)
exitcode=$? exitcode=$?
echo "--------------------------------------------------" >> $logfile echo "--------------------------------------------------" >> $logfile
echo "$(date) - Checking web URL: $url ($attempt/$maxAttempts)" >> $logfile echo "$(date) - Checking web URL: $url ($attempt/$maxAttempts)" >> $logfile
echo "$result" >> $logfile echo "$result" >> $logfile
echo "exit code=$exitcode" >> $logfile echo "exit code=$exitcode" >> $logfile
echo "" >> $logfile echo "" >> $logfile
if [[ $exitcode -eq 0 && "$result" =~ $expected ]]; then if [[ $exitcode -eq 0 && "$result" =~ $expected ]]; then
echo "Received expected response; proceeding." echo "Received expected response; proceeding."
return 0 return 0
fi fi
echo "Server is not ready" echo "Server is not ready"
sleep 1 sleep 1
done done
echo "Server still not ready after $maxAttempts attempts; giving up." echo "Server still not ready after $maxAttempts attempts; giving up."
return 1 return 1
} }

View File

@@ -16,7 +16,7 @@ if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
fi fi
docker exec so-fleet fleetctl config set --address https://127.0.0.1:8080 --tls-skip-verify --url-prefix /fleet docker exec so-fleet fleetctl config set --address https://127.0.0.1:8080 --tls-skip-verify --url-prefix /fleet
docker exec -it so-fleet bash -c 'while [[ "$(curl -s -o /dev/null --insecure -w ''%{http_code}'' https://127.0.0.1:8080/fleet)" != "301" ]]; do sleep 5; done' docker exec so-fleet bash -c 'while [[ "$(curl -s -o /dev/null --insecure -w ''%{http_code}'' https://127.0.0.1:8080/fleet)" != "301" ]]; do sleep 5; done'
docker exec so-fleet fleetctl setup --email $1 --password $2 docker exec so-fleet fleetctl setup --email $1 --password $2
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml

View File

@@ -17,14 +17,43 @@
# Usage: so-tcpreplay "/opt/samples/*" # Usage: so-tcpreplay "/opt/samples/*"
REPLAY_ENABLED=$(docker images | grep so-tcpreplay) . /usr/sbin/so-common
REPLAY_RUNNING=$(docker ps | grep so-tcpreplay) . /usr/sbin/so-image-common
if [ "$REPLAY_ENABLED" != "" ] && [ "$REPLAY_RUNNING" != "" ]; then REPLAYIFACE=${REPLAYIFACE:-$(lookup_pillar interface sensor)}
docker cp so-tcpreplay:/opt/samples /opt/samples REPLAYSPEED=${REPLAYSPEED:-10}
docker exec -it so-tcpreplay /usr/local/bin/tcpreplay -i bond0 -M10 $1
else mkdir -p /opt/so/samples
echo "Replay functionality not enabled! To enable, run `so-tcpreplay-start`"
echo if [[ $# -lt 1 ]]; then
echo "Note that you will need internet access to download the appropriate components" echo "Replays one or more PCAP sample files to the Security Onion monitoring interface."
echo
echo "Usage: $0 <pcap-sample(s)>"
echo
echo "All PCAPs must be placed in the /opt/so/samples directory unless replaying"
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP sampes"
echo "are located in the /opt/samples directory inside of the image."
echo
echo "Customer provided PCAP example:"
echo " $0 /opt/so/samples/some_event.pcap"
echo
echo "Security Onion-provided PCAP example:"
echo " $0 /opt/samples/4in6.pcap"
exit 1
fi fi
if ! docker ps | grep -q so-tcpreplay; then
echo "Replay functionality not enabled; attempting to enable now (may require Internet access)..."
echo
TRUSTED_CONTAINERS=("so-tcpreplay")
mkdir -p /opt/so/log/tcpreplay
update_docker_containers "tcpreplay" "" "" "/opt/so/log/tcpreplay/init.log"
so-tcpreplay-start || fail "Unable to initialize tcpreplay"
fi
echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface ${REPLAYIFACE}..."
docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i ${REPLAYIFACE} -M${REPLAYSPEED} $@"
echo "Replay completed. Warnings shown above are typically expected."

View File

@@ -15,31 +15,4 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
# Usage: so-test so-tcpreplay /opt/samples/*
. /usr/sbin/so-common
REPLAY_ENABLED=$(docker images | grep so-tcpreplay)
REPLAY_RUNNING=$(docker ps | grep so-tcpreplay)
if [ "$REPLAY_ENABLED" != "" ] && [ "$REPLAY_RUNNING" != "" ]; then
echo
echo "Preparing to replay PCAPs..."
docker cp so-tcpreplay:/opt/samples /opt/samples
docker exec -it so-tcpreplay /usr/local/bin/tcpreplay -i bond0 -M10 /opt/samples/*
echo
echo "PCAP's have been replayed - it is normal to see some warnings."
echo
else
echo "Replay functionality not enabled! Enabling Now...."
echo
echo "Note that you will need internet access to download the appropriate components"
/usr/sbin/so-start tcpreplay
echo "Replay functionality enabled. Replaying PCAPs Now...."
docker cp so-tcpreplay:/opt/samples /opt/samples
docker exec -it so-tcpreplay /usr/local/bin/tcpreplay -i bond0 -M10 /opt/samples/*
echo
echo "PCAP's have been replayed - it is normal to see some warnings."
echo
fi

View File

@@ -88,6 +88,13 @@ append_so-aptcacherng_so-status.conf:
{% endif %} {% endif %}
strelka_yara_update_old:
cron.absent:
- user: root
- name: '/usr/sbin/so-yara-update > /dev/null 2>&1'
- hour: '7'
- minute: '1'
strelka_yara_update: strelka_yara_update:
cron.present: cron.present:
- user: root - user: root

View File

@@ -1,4 +1,4 @@
{%- set MANAGER = salt['pillar.get']('global:url_base', '') %} {%- set MANAGER = salt['pillar.get']('manager:mainip', '') %}
{%- set URLBASE = salt['pillar.get']('global:url_base', '') %} {%- set URLBASE = salt['pillar.get']('global:url_base', '') %}
{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %} {%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} {%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %}

View File

@@ -3,18 +3,6 @@
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %} {% set MANAGER = salt['grains.get']('master') %}
so-tcpreplayimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/{{ IMAGEREPO }}/so-tcpreplay:{{ VERSION }}
so-tcpreplaytag:
cmd.run:
- name: docker tag {{ IMAGEREPO }}/so-tcpreplay:{{ VERSION }} {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-tcpreplay:{{ VERSION }}
so-tcpreplaypush:
cmd.run:
- name: docker push {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-tcpreplay:{{ VERSION }}
so-tcpreplay: so-tcpreplay:
docker_container.running: docker_container.running:
- network_mode: "host" - network_mode: "host"
@@ -23,6 +11,9 @@ so-tcpreplay:
- user: root - user: root
- interactive: True - interactive: True
- tty: True - tty: True
- binds:
- /opt/so/samples:/opt/so/samples:ro
{% else %} {% else %}

View File

@@ -123,7 +123,7 @@ cortexscript:
- source: salt://thehive/scripts/cortex_init - source: salt://thehive/scripts/cortex_init
- cwd: /opt/so - cwd: /opt/so
- template: jinja - template: jinja
- hide_output: True - hide_output: False
so-thehive: so-thehive:
docker_container.running: docker_container.running:
@@ -148,7 +148,7 @@ thehivescript:
- source: salt://thehive/scripts/hive_init - source: salt://thehive/scripts/hive_init
- cwd: /opt/so - cwd: /opt/so
- template: jinja - template: jinja
- hide_output: True - hide_output: False
{% else %} {% else %}

View File

@@ -1,6 +1,5 @@
#!/bin/bash #!/bin/bash
# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} # {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
# {%- set URLBASE = salt['pillar.get']('global:url_base', '') %}
# {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %} # {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %}
# {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %} # {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %}
# {%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') %} # {%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') %}
@@ -19,8 +18,8 @@ cortex_clean(){
} }
cortex_init(){ cortex_init(){
CORTEX_URL="{{URLBASE}}/cortex" CORTEX_URL="http://{{MANAGERIP}}:9001/cortex/"
CORTEX_API_URL="{{CORTEX_URL}}/api" CORTEX_API_URL="${CORTEX_URL}api"
CORTEX_USER="{{CORTEXUSER}}" CORTEX_USER="{{CORTEXUSER}}"
CORTEX_PASSWORD="{{CORTEXPASSWORD}}" CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
CORTEX_KEY="{{CORTEXKEY}}" CORTEX_KEY="{{CORTEXKEY}}"
@@ -30,27 +29,27 @@ cortex_init(){
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}" CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf" SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf"
if wait_for_web_response https://$CORTEX_URL "Cortex"; then if wait_for_web_response $CORTEX_URL "Cortex"; then
# Migrate DB # Migrate DB
curl -v -k -XPOST -L "https://$CORTEX_API_URL/maintenance/migrate" curl -sk -XPOST -L "$CORTEX_API_URL/maintenance/migrate"
# Create intial Cortex superadmin # Create intial Cortex superadmin
curl -v -k -L "https://$CORTEX_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}" curl -sk -L "$CORTEX_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
# Create user-supplied org # Create user-supplied org
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}" curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
# Create user-supplied org user # Create user-supplied org user
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }" curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
# Enable URLScan.io Analyzer # Enable URLScan.io Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}' curl -sv -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
# Enable Cert PassiveDNS Analyzer # Enable Cert PassiveDNS Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}' curl -sv -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
# Revoke $CORTEX_USER key # Revoke $CORTEX_USER key
curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" -L "https://$CORTEX_API_URL/user/$CORTEX_USER/key" curl -sk -XDELETE -H "Authorization: Bearer $CORTEX_KEY" -L "$CORTEX_API_URL/user/$CORTEX_USER/key"
# Update SOCtopus config with apikey value # Update SOCtopus config with apikey value
#sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG #sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG
@@ -58,6 +57,7 @@ cortex_init(){
touch /opt/so/state/cortex.txt touch /opt/so/state/cortex.txt
else else
echo "We experienced an issue connecting to Cortex!" echo "We experienced an issue connecting to Cortex!"
exit 1
fi fi
} }
@@ -65,10 +65,11 @@ if [ -f /opt/so/state/cortex.txt ]; then
cortex_clean cortex_clean
exit 0 exit 0
else else
if wait_for_web_response http://{{MANAGERIP}}:9400 '"status":"green"'; then if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"'; then
cortex_init cortex_init
cortex_clean cortex_clean
else else
echo "TheHive Elasticsearch server is not ready; unable to proceed with cortex init." echo "TheHive Elasticsearch server is not ready; unable to proceed with Cortex init."
exit 1
fi fi
fi fi

View File

@@ -1,6 +1,5 @@
#!/bin/bash #!/bin/bash
# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} # {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
# {%- set URLBASE = salt['pillar.get']('global:url_base', '') %}
# {%- set THEHIVEUSER = salt['pillar.get']('global:hiveuser', 'hiveadmin') %} # {%- set THEHIVEUSER = salt['pillar.get']('global:hiveuser', 'hiveadmin') %}
# {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %} # {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %}
# {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %} # {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
@@ -13,29 +12,30 @@ thehive_clean(){
} }
thehive_init(){ thehive_init(){
THEHIVE_URL="{{URLBASE}}/thehive" THEHIVE_URL="http://{{MANAGERIP}}:9000/thehive/"
THEHIVE_API_URL="$THEHIVE_URL/api" THEHIVE_API_URL="${THEHIVE_URL}api"
THEHIVE_USER="{{THEHIVEUSER}}" THEHIVE_USER="{{THEHIVEUSER}}"
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}" THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
THEHIVE_KEY="{{THEHIVEKEY}}" THEHIVE_KEY="{{THEHIVEKEY}}"
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf" SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
echo -n "Waiting for TheHive..." echo -n "Waiting for TheHive..."
if wait_for_web_response https://$THEHIVE_URL "TheHive"; then if wait_for_web_response $THEHIVE_URL "TheHive"; then
# Migrate DB # Migrate DB
curl -v -k -XPOST -L "https://$THEHIVE_API_URL/maintenance/migrate" curl -sk -XPOST -L "$THEHIVE_API_URL/maintenance/migrate"
# Create intial TheHive user # Create intial TheHive user
curl -v -k -L "https://$THEHIVE_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}" curl -sk -L "$THEHIVE_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
# Pre-load custom fields # Pre-load custom fields
# #
# reputation # reputation
curl -v -k -L "https://$THEHIVE_API_URL/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}" curl -sk -L "$THEHIVE_API_URL/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
touch /opt/so/state/thehive.txt touch /opt/so/state/thehive.txt
else else
echo "We experienced an issue connecting to TheHive!" echo "We experienced an issue connecting to TheHive!"
exit 1
fi fi
} }
@@ -43,10 +43,11 @@ if [ -f /opt/so/state/thehive.txt ]; then
thehive_clean thehive_clean
exit 0 exit 0
else else
if wait_for_web_response http://{{MANAGERIP}}:9400 '"status":"green"'; then if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"'; then
thehive_init thehive_init
thehive_clean thehive_clean
else else
echo "TheHive Elasticsearch server is not ready; unable to proceed with hive init." echo "TheHive Elasticsearch server is not ready; unable to proceed with TheHive init."
exit 1
fi fi
fi fi

View File

@@ -66,7 +66,7 @@ PLAYBOOK=1
REDIRECTINFO=HOSTNAME REDIRECTINFO=HOSTNAME
RULESETUP=ETOPEN RULESETUP=ETOPEN
# SHARDCOUNT= # SHARDCOUNT=
SKIP_REBOOT=0 # SKIP_REBOOT=0
SOREMOTEPASS1=onionuser SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser SOREMOTEPASS2=onionuser
STRELKA=1 STRELKA=1

77
setup/automation/eval_iso Normal file
View File

@@ -0,0 +1,77 @@
#!/bin/bash
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TESTING=true
address_type=DHCP
ADMINUSER=onionuser
ADMINPASS1=onionuser
ADMINPASS2=onionuser
ALLOW_CIDR=0.0.0.0/0
ALLOW_ROLE=a
BASICZEEK=7
BASICSURI=7
# BLOGS=
BNICS=eth1
ZEEKVERSION=ZEEK
# CURCLOSEDAYS=
# EVALADVANCED=BASIC
GRAFANA=1
# HELIXAPIKEY=
HNMANAGER=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12
HNSENSOR=inherit
HOSTNAME=eval
install_type=EVAL
# LSINPUTBATCHCOUNT=
# LSINPUTTHREADS=
# LSPIPELINEBATCH=
# LSPIPELINEWORKERS=
MANAGERADV=BASIC
MANAGERUPDATES=1
# MDNS=
# MGATEWAY=
# MIP=
# MMASK=
MNIC=eth0
# MSEARCH=
# MSRV=
# MTU=
NIDS=Suricata
# NODE_ES_HEAP_SIZE=
# NODE_LS_HEAP_SIZE=
NODESETUP=NODEBASIC
NSMSETUP=BASIC
NODEUPDATES=MANAGER
# OINKCODE=
OSQUERY=1
# PATCHSCHEDULEDAYS=
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
# REDIRECTHOST=
REDIRECTINFO=IP
RULESETUP=ETOPEN
# SHARDCOUNT=
# SKIP_REBOOT=
SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser
STRELKA=1
THEHIVE=1
WAZUH=1
WEBUSER=onionuser@somewhere.invalid
WEBPASSWD1=0n10nus3r
WEBPASSWD2=0n10nus3r

View File

@@ -62,11 +62,11 @@ OSQUERY=1
# PATCHSCHEDULEHOURS= # PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto PATCHSCHEDULENAME=auto
PLAYBOOK=1 PLAYBOOK=1
REDIRECTHOST=securityonion REDIRECTHOST=$(curl http://169.254.169.254/latest/meta-data/public-ipv4)
REDIRECTINFO=OTHER REDIRECTINFO=OTHER
RULESETUP=ETOPEN RULESETUP=ETOPEN
# SHARDCOUNT= # SHARDCOUNT=
SKIP_REBOOT=0 # SKIP_REBOOT=
SOREMOTEPASS1=onionuser SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser SOREMOTEPASS2=onionuser
STRELKA=1 STRELKA=1

View File

@@ -66,7 +66,7 @@ PLAYBOOK=1
REDIRECTINFO=IP REDIRECTINFO=IP
RULESETUP=ETOPEN RULESETUP=ETOPEN
# SHARDCOUNT= # SHARDCOUNT=
SKIP_REBOOT=1 # SKIP_REBOOT=
SOREMOTEPASS1=onionuser SOREMOTEPASS1=onionuser
SOREMOTEPASS2=onionuser SOREMOTEPASS2=onionuser
STRELKA=1 STRELKA=1

View File

@@ -84,7 +84,7 @@ calculate_useable_cores() {
lb_procs_round=$(printf "%.0f\n" $cores_for_zeek) lb_procs_round=$(printf "%.0f\n" $cores_for_zeek)
if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi if [ "$lb_procs_round" -lt 1 ]; then lb_procs=1; else lb_procs=$lb_procs_round; fi
export lb_procs export lb_procs
} }
airgap_rules() { airgap_rules() {
@@ -799,13 +799,13 @@ detect_os() {
echo "Installing required packages to run installer..." echo "Installing required packages to run installer..."
# Install network manager so we can do interface stuff # Install network manager so we can do interface stuff
if ! command -v nmcli > /dev/null 2>&1; then if ! command -v nmcli > /dev/null 2>&1; then
if wait_for_apt; then apt-get install -y network-manager >> "$setup_log" 2<&1; else exit 1; fi
{ {
apt-get install -y network-manager; systemctl enable NetworkManager
systemctl enable NetworkManager; systemctl start NetworkManager
systemctl start NetworkManager;
} >> "$setup_log" 2<&1 } >> "$setup_log" 2<&1
fi fi
apt-get install -y bc curl >> "$setup_log" 2>&1 if wait_for_apt; then apt-get install -y bc curl >> "$setup_log" 2>&1; else exit 1; fi
else else
echo "We were unable to determine if you are using a supported OS." echo "We were unable to determine if you are using a supported OS."
@@ -882,21 +882,28 @@ docker_install() {
else else
case "$install_type" in case "$install_type" in
'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORT') 'MANAGER' | 'EVAL' | 'STANDALONE' | 'MANAGERSEARCH' | 'IMPORT')
apt-get update >> "$setup_log" 2>&1 if wait_for_apt 'whiptail_prog_new_message'; then apt-get update >> "$setup_log" 2>&1; else kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1; fi
;; ;;
*) *)
{ if wait_for_apt 'whiptail_prog_new_message'; then
apt-key add "$temp_install_dir"/gpg/docker.pub; {
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"; apt-key add "$temp_install_dir"/gpg/docker.pub;
apt-get update; add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable";
} >> "$setup_log" 2>&1 apt-get update;
} >> "$setup_log" 2>&1
else
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
;; ;;
esac esac
if wait_for_apt 'whiptail_prog_new_message'; then
if [ $OSVER != "xenial" ]; then if [ $OSVER != "xenial" ]; then
apt-get -y install docker-ce python3-docker >> "$setup_log" 2>&1 apt-get -y install docker-ce python3-docker >> "$setup_log" 2>&1
else
apt-get -y install docker-ce python-docker >> "$setup_log" 2>&1
fi
else else
apt-get -y install docker-ce python-docker >> "$setup_log" 2>&1 kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi fi
fi fi
docker_registry docker_registry
@@ -939,7 +946,7 @@ docker_seed_update() {
if [ "$install_type" == 'HELIXSENSOR' ]; then if [ "$install_type" == 'HELIXSENSOR' ]; then
percent_delta=6 percent_delta=6
fi fi
((docker_seed_update_percent=docker_seed_update_percent+percent_delta)) ((docker_seed_update_percent+=percent_delta))
set_progress_str "$docker_seed_update_percent" "Downloading $name" set_progress_str "$docker_seed_update_percent" "Downloading $name"
} }
@@ -1370,6 +1377,38 @@ minio_generate_keys() {
} }
network_init() {
disable_ipv6
set_hostname
if [[ "$setup_type" == 'iso' ]]; then
set_management_interface
fi
}
network_init_whiptail() {
case "$setup_type" in
'iso')
whiptail_set_hostname
whiptail_management_nic
whiptail_dhcp_or_static
if [ "$address_type" != 'DHCP' ]; then
whiptail_management_interface_ip
whiptail_management_interface_mask
whiptail_management_interface_gateway
whiptail_management_interface_dns
whiptail_management_interface_dns_search
fi
;;
'network')
whiptail_network_notice
whiptail_dhcp_warn
whiptail_set_hostname
whiptail_management_nic
;;
esac
}
network_setup() { network_setup() {
{ {
echo "Finishing up network setup"; echo "Finishing up network setup";
@@ -1572,7 +1611,11 @@ remove_package() {
fi fi
else else
if dpkg -l | grep -q "$package_name"; then if dpkg -l | grep -q "$package_name"; then
apt purge -y "$package_name" if wait_for_apt 'whiptail_prog_new_message'; then
apt purge -y "$package_name"
else
exit 1
fi
fi fi
fi fi
} }
@@ -1644,36 +1687,43 @@ saltify() {
yum -y install epel-release yum -y install epel-release
yum -y install salt-minion-3002.2\ yum -y install salt-minion-3002.2\
python3\ python3\
python36-docker\ python36-docker\
python36-dateutil\ python36-dateutil\
python36-m2crypto\ python36-m2crypto\
python36-mysql\ python36-mysql\
yum-utils\ yum-utils\
device-mapper-persistent-data\ device-mapper-persistent-data\
lvm2\ lvm2\
openssl\ openssl\
jq; jq;
yum -y update --exclude=salt*; yum -y update --exclude=salt*;
fi fi
systemctl enable salt-minion; systemctl enable salt-minion;
} >> "$setup_log" 2>&1 } >> "$setup_log" 2>&1
yum versionlock salt* yum versionlock salt*
else else
DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade >> "$setup_log" 2>&1 if wait_for_apt 'whiptail_prog_new_message'; then
DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade >> "$setup_log" 2>&1
else
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
if [ $OSVER != "xenial" ]; then if [ $OSVER != "xenial" ]; then
# Switch to Python 3 as default if this is not xenial # Switch to Python 3 as default if this is not xenial
update-alternatives --install /usr/bin/python python /usr/bin/python3.6 10 >> "$setup_log" 2>&1 update-alternatives --install /usr/bin/python python /usr/bin/python3.6 10 >> "$setup_log" 2>&1
fi fi
# Add the pre-requisites for installing docker-ce if wait_for_apt 'whiptail_prog_new_message'; then
apt-get -y install ca-certificates\ # Add the pre-requisites for installing docker-ce
curl\ apt-get -y install ca-certificates\
software-properties-common\ curl\
apt-transport-https\ software-properties-common\
openssl\ apt-transport-https\
netcat\ openssl\
jq >> "$setup_log" 2>&1 netcat\
jq >> "$setup_log" 2>&1
else
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
# Grab the version from the os-release file # Grab the version from the os-release file
local ubuntu_version local ubuntu_version
ubuntu_version=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}') ubuntu_version=$(grep VERSION_ID /etc/os-release | awk -F '[ "]' '{print $2}')
@@ -1681,7 +1731,11 @@ saltify() {
case "$install_type" in case "$install_type" in
'FLEET') 'FLEET')
if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi if wait_for_apt 'whiptail_prog_new_message'; then
if [ "$OSVER" != 'xenial' ]; then apt-get -y install python3-mysqldb >> "$setup_log" 2>&1; else apt-get -y install python-mysqldb >> "$setup_log" 2>&1; fi
else
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
;; ;;
'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR') 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT' | 'HELIXSENSOR')
@@ -1703,13 +1757,18 @@ saltify() {
curl -s https://packages.wazuh.com/key/GPG-KEY-WAZUH | apt-key add - >> "$setup_log" 2>&1 curl -s https://packages.wazuh.com/key/GPG-KEY-WAZUH | apt-key add - >> "$setup_log" 2>&1
# Add repo # Add repo
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
# Initialize the new repos
apt-get update >> "$setup_log" 2>&1 if wait_for_apt 'whiptail_prog_new_message'; then
set_progress_str 6 'Installing various dependencies' # Initialize the new repos
apt-get -y install sqlite3 argon2 libssl-dev >> "$setup_log" 2>&1 apt-get update >> "$setup_log" 2>&1
set_progress_str 7 'Installing salt-master' set_progress_str 6 'Installing various dependencies'
apt-get -y install salt-master=3002.2+ds-1 >> "$setup_log" 2>&1 apt-get -y install sqlite3 argon2 libssl-dev >> "$setup_log" 2>&1
apt-mark hold salt-master >> "$setup_log" 2>&1 set_progress_str 7 'Installing salt-master'
apt-get -y install salt-master=3002.2+ds-1 >> "$setup_log" 2>&1
apt-mark hold salt-master >> "$setup_log" 2>&1
else
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
;; ;;
*) *)
# Copy down the gpg keys and install them from the manager # Copy down the gpg keys and install them from the manager
@@ -1723,18 +1782,21 @@ saltify() {
echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log" echo "deb https://packages.wazuh.com/3.x/apt/ stable main" > /etc/apt/sources.list.d/wazuh.list 2>> "$setup_log"
;; ;;
esac esac
apt-get update >> "$setup_log" 2>&1 if wait_for_apt 'whiptail_prog_new_message'; then
set_progress_str 8 'Installing salt-minion & python modules' apt-get update >> "$setup_log" 2>&1
apt-get -y install salt-minion=3002.2+ds-1\ set_progress_str 8 'Installing salt-minion & python modules'
salt-common=3002.2+ds-1 >> "$setup_log" 2>&1 apt-get -y install salt-minion=3002.2+ds-1\
apt-mark hold salt-minion salt-common >> "$setup_log" 2>&1 salt-common=3002.2+ds-1 >> "$setup_log" 2>&1
if [ "$OSVER" != 'xenial' ]; then apt-mark hold salt-minion salt-common >> "$setup_log" 2>&1
apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb >> "$setup_log" 2>&1 if [ "$OSVER" != 'xenial' ]; then
else apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb >> "$setup_log" 2>&1
apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb >> "$setup_log" 2>&1 else
apt-get -y install python-pip python-dateutil python-m2crypto python-mysqldb >> "$setup_log" 2>&1
fi
else
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi fi
fi fi
} }
salt_checkin() { salt_checkin() {
@@ -1897,7 +1959,8 @@ setup_salt_master_dirs() {
set_progress_str() { set_progress_str() {
local percentage_input=$1 local percentage_input=$1
local progress_bar_text=$2 progress_bar_text=$2
export progress_bar_text
if (( "$percentage_input" >= "$percentage" )); then if (( "$percentage_input" >= "$percentage" )); then
percentage="$percentage_input" percentage="$percentage_input"
@@ -2154,8 +2217,12 @@ update_packages() {
if [ "$OS" = 'centos' ]; then if [ "$OS" = 'centos' ]; then
yum -y update >> "$setup_log" yum -y update >> "$setup_log"
else else
apt-get -y update >> "$setup_log" if wait_for_apt 'whiptail_prog_new_message'; then
apt-get -y upgrade >> "$setup_log" apt-get -y update >> "$setup_log"
apt-get -y upgrade >> "$setup_log"
else
kill -SIGUSR1 "$(ps --pid $$ -oppid=)"; exit 1
fi
fi fi
} }
@@ -2226,6 +2293,12 @@ es_heapsize() {
fi fi
} }
whiptail_prog_new_message() {
local message=$1
set_progress_str "$percentage" "$message"
}
# Enable Zeek Logs # Enable Zeek Logs
zeek_logs_enabled() { zeek_logs_enabled() {
echo "Enabling Zeek Logs" >> "$setup_log" 2>&1 echo "Enabling Zeek Logs" >> "$setup_log" 2>&1

View File

@@ -23,7 +23,7 @@ if [ "$uid" -ne 0 ]; then
fi fi
# Save the original argument array since we modify it # Save the original argument array since we modify it
readarray -t original_args <<< "$@" original_args=("$@")
cd "$(dirname "$0")" || exit 255 cd "$(dirname "$0")" || exit 255
@@ -67,7 +67,11 @@ while [[ $# -gt 0 ]]; do
esac esac
done done
if ! [ -f /root/install_opt ] && [ -d /root/manager_setup/securityonion ] && [[ $(pwd) != /root/manager_setup/securityonion/setup ]]; then if [[ "$setup_type" == 'iso' ]]; then
is_iso=true
fi
if ! [ -f $install_opt_file ] && [ -d /root/manager_setup/securityonion ] && [[ $(pwd) != /root/manager_setup/securityonion/setup ]]; then
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}" exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
fi fi
@@ -81,7 +85,7 @@ fi
parse_install_username parse_install_username
if ! [ -f /root/install_opt ]; then if ! [ -f $install_opt_file ]; then
# Begin Installation pre-processing # Begin Installation pre-processing
title "Initializing Setup" title "Initializing Setup"
info "Installing as the $INSTALLUSERNAME user" info "Installing as the $INSTALLUSERNAME user"
@@ -164,17 +168,33 @@ if [ "$automated" == no ]; then
fi fi
fi fi
if ! [ -f /root/install_opt ]; then if ! [[ -f $install_opt_file ]]; then
if (whiptail_you_sure); then if (whiptail_you_sure); then
true true
else else
echo "User cancelled setup." | tee -a "$setup_log" echo "User cancelled setup." | tee -a "$setup_log"
whiptail_cancel whiptail_cancel
fi fi
if [[ $setup_type == 'iso' ]]; then
whiptail_install_type whiptail_first_menu_iso
if [[ $option == "Configure Network" ]]; then
network_init_whiptail
whiptail_management_interface_setup
network_init
printf '%s\n' \
"MNIC=$MNIC" \
"HOSTNAME=$HOSTNAME" > "$net_init_file"
whiptail --title "Security Onion Setup" \
--msgbox "Successfully set up networking, setup will now exit." 7 75
exit 0
else
whiptail_install_type
fi
else
whiptail_install_type
fi
else else
source /root/install_opt source $install_opt_file
fi fi
if [ "$install_type" = 'EVAL' ]; then if [ "$install_type" = 'EVAL' ]; then
@@ -217,11 +237,6 @@ elif [ "$install_type" = 'ANALYST' ]; then
is_analyst=true is_analyst=true
fi fi
# Say yes to the dress if its an ISO install
if [[ "$setup_type" == 'iso' ]]; then
is_iso=true
fi
# Check if this is an airgap install # Check if this is an airgap install
if [[ ( $is_manager || $is_import ) && $is_iso ]]; then if [[ ( $is_manager || $is_import ) && $is_iso ]]; then
whiptail_airgap whiptail_airgap
@@ -230,7 +245,7 @@ if [[ ( $is_manager || $is_import ) && $is_iso ]]; then
fi fi
fi fi
if ! [ -f /root/install_opt ]; then if ! [[ -f $install_opt_file ]]; then
if [[ $is_manager && $is_sensor ]]; then if [[ $is_manager && $is_sensor ]]; then
check_requirements "standalone" check_requirements "standalone"
elif [[ $is_fleet_standalone ]]; then elif [[ $is_fleet_standalone ]]; then
@@ -243,43 +258,26 @@ if ! [ -f /root/install_opt ]; then
check_requirements "import" check_requirements "import"
fi fi
case "$setup_type" in [[ -f $net_init_file ]] && whiptail_net_reinit && reinit_networking=true
'iso')
whiptail_set_hostname
whiptail_management_nic
whiptail_dhcp_or_static
if [ "$address_type" != 'DHCP' ]; then if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
whiptail_management_interface_ip network_init_whiptail
whiptail_management_interface_mask else
whiptail_management_interface_gateway source "$net_init_file"
whiptail_management_interface_dns fi
whiptail_management_interface_dns_search
fi
;;
'network')
whiptail_network_notice
whiptail_dhcp_warn
whiptail_set_hostname
whiptail_management_nic
;;
esac
if [[ $is_minion ]]; then if [[ $is_minion ]]; then
whiptail_management_server whiptail_management_server
fi fi
if [[ $is_minion || $is_iso ]]; then if [[ $is_minion ]] || [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
whiptail_management_interface_setup whiptail_management_interface_setup
fi fi
# Init networking so rest of install works if [[ $reinit_networking ]] || ! [[ -f $net_init_file ]]; then
disable_ipv6 network_init
set_hostname
if [[ "$setup_type" == 'iso' ]]; then
set_management_interface
fi fi
if [[ -n "$TURBO" ]]; then if [[ -n "$TURBO" ]]; then
use_turbo_proxy use_turbo_proxy
fi fi
@@ -298,8 +296,8 @@ if ! [ -f /root/install_opt ]; then
"install_type=$install_type" \ "install_type=$install_type" \
"MNIC=$MNIC" \ "MNIC=$MNIC" \
"HOSTNAME=$HOSTNAME" \ "HOSTNAME=$HOSTNAME" \
"MSRV=$MSRV"\ "MSRV=$MSRV" \
"MSRVIP=$MSRVIP" > /root/install_opt "MSRVIP=$MSRVIP" > "$install_opt_file"
download_repo_tarball download_repo_tarball
exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}" exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}"
fi fi
@@ -396,7 +394,10 @@ if [[ $is_manager && ! $is_eval ]]; then
whiptail_manager_adv_escluster whiptail_manager_adv_escluster
fi fi
fi fi
whiptail_zeek_version whiptail_metadata_tool
if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$ZEEKVERSION" != 'SURICATA' ]; then
whiptail_manager_adv_service_zeeklogs
fi
# Don't run this function for now since Snort is not yet supported # Don't run this function for now since Snort is not yet supported
# whiptail_nids # whiptail_nids
NIDS=Suricata NIDS=Suricata
@@ -406,9 +407,6 @@ if [[ $is_manager && ! $is_eval ]]; then
whiptail_oinkcode whiptail_oinkcode
fi fi
if [ "$MANAGERADV" = 'ADVANCED' ] && [ "$ZEEKVERSION" != 'SURICATA' ]; then
whiptail_manager_adv_service_zeeklogs
fi
fi fi
if [[ $is_manager ]]; then if [[ $is_manager ]]; then
@@ -842,8 +840,13 @@ if [[ -n $SO_ERROR ]]; then
else else
echo "Successfully completed setup! Continuing with post-installation steps" >> $setup_log 2>&1 echo "Successfully completed setup! Continuing with post-installation steps" >> $setup_log 2>&1
{ {
[ -n "$TESTING" ] && logCmd so-test
export percentage=95 # set to last percentage used in previous subshell export percentage=95 # set to last percentage used in previous subshell
if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then if [[ -n $ALLOW_ROLE && -n $ALLOW_CIDR ]]; then
set_progress_str 96 "Stopping SOC prior to adjusting firewall rules"
so-soc-stop # Stop SOC so it doesn't accept external requests prior to the reboot
set_progress_str 97 "Running so-allow -${ALLOW_ROLE} for ${ALLOW_CIDR}" set_progress_str 97 "Running so-allow -${ALLOW_ROLE} for ${ALLOW_CIDR}"
IP=$ALLOW_CIDR so-allow -$ALLOW_ROLE >> $setup_log 2>&1 IP=$ALLOW_CIDR so-allow -$ALLOW_ROLE >> $setup_log 2>&1
fi fi

View File

@@ -66,3 +66,9 @@ mkdir -p "$local_salt_dir"
SCRIPTDIR=$(pwd) SCRIPTDIR=$(pwd)
export SCRIPTDIR export SCRIPTDIR
install_opt_file=/root/install_opt
export install_opt_file
net_init_file=/root/net_init
export net_init_file

View File

@@ -658,6 +658,17 @@ whiptail_log_size_limit() {
} }
whiptail_first_menu_iso() {
[ -n "$TESTING" ] && return
option=$(whiptail --title "Security Onion Setup" --menu "Select an option" 10 75 2 \
"Configure Network" "Configure networking only " \
"Security Onion Installer" "Run the standard Security Onion installation " \
3>&1 1>&2 2>&3
)
local exitstatus=$?
whiptail_check_exitstatus $exitstatus
}
whiptail_make_changes() { whiptail_make_changes() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return
@@ -760,7 +771,11 @@ whiptail_management_interface_setup() {
if [[ $is_iso ]]; then if [[ $is_iso ]]; then
if [[ $minion_msg != "" ]]; then if [[ $minion_msg != "" ]]; then
msg="initialize networking and $minion_msg" if [[ -f $net_init_file ]]; then
msg=$minion_msg
else
msg="initialize networking and $minion_msg"
fi
else else
msg="initialize networking" msg="initialize networking"
fi fi
@@ -967,6 +982,12 @@ whiptail_network_notice() {
} }
whiptail_net_reinit() {
[ -n "$TESTING" ] && return
whiptail --title "Security Onion Setup" --yesno "The management interface has already been configured. Do you want to reconfigure it?" 8 75
}
whiptail_node_advanced() { whiptail_node_advanced() {
[ -n "$TESTING" ] && return [ -n "$TESTING" ] && return